mm/hugetlb: fix a typos in comments
[akpm@linux-foundation.org: coding style fixes] Signed-off-by: Ethon Paul <ethp@qq.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Link: http://lkml.kernel.org/r/20200410163714.14085-1-ethp@qq.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b4f315b40d
commit
7c8de35889
16
mm/hugetlb.c
16
mm/hugetlb.c
@ -85,7 +85,7 @@ static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
|
||||
spin_unlock(&spool->lock);
|
||||
|
||||
/* If no pages are used, and no other handles to the subpool
|
||||
* remain, give up any reservations mased on minimum size and
|
||||
* remain, give up any reservations based on minimum size and
|
||||
* free the subpool */
|
||||
if (free) {
|
||||
if (spool->min_hpages != -1)
|
||||
@ -133,7 +133,7 @@ void hugepage_put_subpool(struct hugepage_subpool *spool)
|
||||
* the request. Otherwise, return the number of pages by which the
|
||||
* global pools must be adjusted (upward). The returned value may
|
||||
* only be different than the passed value (delta) in the case where
|
||||
* a subpool minimum size must be manitained.
|
||||
* a subpool minimum size must be maintained.
|
||||
*/
|
||||
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
|
||||
long delta)
|
||||
@ -473,7 +473,7 @@ out_of_memory:
|
||||
*
|
||||
* Return the number of new huge pages added to the map. This number is greater
|
||||
* than or equal to zero. If file_region entries needed to be allocated for
|
||||
* this operation and we were not able to allocate, it ruturns -ENOMEM.
|
||||
* this operation and we were not able to allocate, it returns -ENOMEM.
|
||||
* region_add of regions of length 1 never allocate file_regions and cannot
|
||||
* fail; region_chg will always allocate at least 1 entry and a region_add for
|
||||
* 1 page will only require at most 1 entry.
|
||||
@ -988,7 +988,7 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
|
||||
* We know VM_NORESERVE is not set. Therefore, there SHOULD
|
||||
* be a region map for all pages. The only situation where
|
||||
* there is no region map is if a hole was punched via
|
||||
* fallocate. In this case, there really are no reverves to
|
||||
* fallocate. In this case, there really are no reserves to
|
||||
* use. This situation is indicated if chg != 0.
|
||||
*/
|
||||
if (chg)
|
||||
@ -1519,7 +1519,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
|
||||
* For gigantic hugepages allocated through bootmem at
|
||||
* boot, it's safer to be consistent with the not-gigantic
|
||||
* hugepages and clear the PG_reserved bit from all tail pages
|
||||
* too. Otherwse drivers using get_user_pages() to access tail
|
||||
* too. Otherwise drivers using get_user_pages() to access tail
|
||||
* pages may get the reference counting wrong if they see
|
||||
* PG_reserved set on a tail page (despite the head page not
|
||||
* having PG_reserved set). Enforcing this consistency between
|
||||
@ -4579,9 +4579,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
/*
|
||||
* entry could be a migration/hwpoison entry at this point, so this
|
||||
* check prevents the kernel from going below assuming that we have
|
||||
* a active hugepage in pagecache. This goto expects the 2nd page fault,
|
||||
* and is_hugetlb_entry_(migration|hwpoisoned) check will properly
|
||||
* handle it.
|
||||
* an active hugepage in pagecache. This goto expects the 2nd page
|
||||
* fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
|
||||
* properly handle it.
|
||||
*/
|
||||
if (!pte_present(entry))
|
||||
goto out_mutex;
|
||||
|
Loading…
Reference in New Issue
Block a user