- Move the check for addr from get_unshared_area() and get_shared_area() into arch_get_unmapped_area(). - Pass inode->i_mapping instead of inode to get_shared_area(). - Call get_shared_area() always, not just when i_shared_maps is set. - Pull offset calculation out into get_offset() - Write a replacement get_offset (idea by viro). Index: arch/parisc/kernel/sys_parisc.c =================================================================== RCS file: /var/cvs/linux-2.6/arch/parisc/kernel/sys_parisc.c,v retrieving revision 1.6 diff -u -p -r1.6 sys_parisc.c --- arch/parisc/kernel/sys_parisc.c 12 Aug 2003 19:18:13 -0000 1.6 +++ arch/parisc/kernel/sys_parisc.c 19 Aug 2003 19:40:43 -0000 @@ -30,8 +30,6 @@ static unsigned long get_unshared_area(u { struct vm_area_struct *vma; - if (!addr) - addr = TASK_UNMAPPED_BASE; addr = PAGE_ALIGN(addr); for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { @@ -46,17 +44,38 @@ static unsigned long get_unshared_area(u #define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1)) -static unsigned long get_shared_area(struct inode *inode, unsigned long addr, - unsigned long len, unsigned long pgoff) +/* + * We need to know the offset to use. Old scheme was to look for + * existing mapping and use the same offset. New scheme is to use the + * address of the kernel data structure as the seed for the offset. + * We'll see how that works... + */ +#if 0 +static int get_offset(struct address_space *mapping) +{ + struct vm_area_struct *vma = list_entry(mapping->i_mmap_shared.next, + struct vm_area_struct, shared); + return (vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT)) & + (SHMLBA - 1); +} +#else +/* The mapping is cacheline aligned, so there's no information in the bottom + * few bits of the address. We're looking for 10 bits (4MB / 4k), so let's + * drop the bottom 8 bits and use bits 8-17. + */ +static int get_offset(struct address_space *mapping) { - struct vm_area_struct *vma, *first_vma; - int offset; + int offset = (int) mapping << (PAGE_SHIFT - 8); + return offset & 0x3FF000; +} +#endif - first_vma = list_entry(inode->i_mapping->i_mmap_shared.next, struct vm_area_struct, shared); - offset = (first_vma->vm_start + ((pgoff - first_vma->vm_pgoff) << PAGE_SHIFT)) & (SHMLBA - 1); +static unsigned long get_shared_area(struct address_space *mapping, + unsigned long addr, unsigned long len, unsigned long pgoff) +{ + struct vm_area_struct *vma; + int offset = get_offset(mapping); - if (!addr) - addr = TASK_UNMAPPED_BASE; addr = DCACHE_ALIGN(addr - offset) + offset; for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { @@ -74,17 +93,17 @@ static unsigned long get_shared_area(str unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - struct inode *inode = NULL; + struct inode *inode; if (len > TASK_SIZE) return -ENOMEM; + if (!addr) + addr = TASK_UNMAPPED_BASE; - if (filp) { - inode = filp->f_dentry->d_inode; - } + inode = filp ? filp->f_dentry->d_inode : NULL; - if (inode && (flags & MAP_SHARED) && (!list_empty(&inode->i_mapping->i_mmap_shared))) { - addr = get_shared_area(inode, addr, len, pgoff); + if (inode && (flags & MAP_SHARED)) { + addr = get_shared_area(inode->i_mapping, addr, len, pgoff); } else { addr = get_unshared_area(addr, len); } .