diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 27c4e2ac39a9..937c6ee1786f 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -414,9 +414,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, if (next >= end) break; - hash = hugetlb_fault_mutex_hash(h, current->mm, - &pseudo_vma, - mapping, next, 0); + hash = hugetlb_fault_mutex_hash(h, mapping, next, 0); mutex_lock(&hugetlb_fault_mutex_table[hash]); lock_page(page); @@ -569,7 +567,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); struct vm_area_struct pseudo_vma; - struct mm_struct *mm = current->mm; loff_t hpage_size = huge_page_size(h); unsigned long hpage_shift = huge_page_shift(h); pgoff_t start, index, end; @@ -633,8 +630,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, addr = index * hpage_size; /* mutex taken here, fault path and hole punch */ - hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, - index, addr); + hash = hugetlb_fault_mutex_hash(h, mapping, index, addr); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 3957d99e66ea..cc185525a94b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -91,9 +91,7 @@ void putback_active_hugepage(struct page *page); void free_huge_page(struct page *page); void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve); extern struct mutex *hugetlb_fault_mutex_table; -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, - struct vm_area_struct *vma, - struct address_space *mapping, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, pgoff_t idx, unsigned long address); #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 324b2953e57e..d7f65a8c629b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3703,21 +3703,14 @@ backout_unlocked: } #ifdef CONFIG_SMP -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, - struct vm_area_struct *vma, - struct address_space *mapping, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, pgoff_t idx, unsigned long address) { unsigned long key[2]; u32 hash; - if (vma->vm_flags & VM_SHARED) { - key[0] = (unsigned long) mapping; - key[1] = idx; - } else { - key[0] = (unsigned long) mm; - key[1] = address >> huge_page_shift(h); - } + key[0] = (unsigned long) mapping; + key[1] = idx; hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); @@ -3728,9 +3721,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, * For uniprocesor systems we always use a single mutex, so just * return 0 and avoid the hashing overhead. */ -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, - struct vm_area_struct *vma, - struct address_space *mapping, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, pgoff_t idx, unsigned long address) { return 0; @@ -3776,7 +3767,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ - hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); + hash = hugetlb_fault_mutex_hash(h, mapping, idx, address); mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep);