mm/page_isolation: simplify return value of start_isolate_page_range()
Callers no longer need the number of isolated pageblocks. Let's simplify. Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Oscar Salvador <osalvador@suse.de> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> Cc: Baoquan He <bhe@redhat.com> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Cc: Charan Teja Reddy <charante@codeaurora.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michel Lespinasse <walken@google.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Tony Luck <tony.luck@intel.com> Link: https://lkml.kernel.org/r/20200819175957.28465-7-david@redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ea15153c3d
commit
3fa0c7c79d
@ -1514,7 +1514,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
||||
ret = start_isolate_page_range(start_pfn, end_pfn,
|
||||
MIGRATE_MOVABLE,
|
||||
MEMORY_OFFLINE | REPORT_FAILURE);
|
||||
if (ret < 0) {
|
||||
if (ret) {
|
||||
reason = "failure to isolate range";
|
||||
goto failed_removal;
|
||||
}
|
||||
|
@ -8468,7 +8468,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
||||
|
||||
ret = start_isolate_page_range(pfn_max_align_down(start),
|
||||
pfn_max_align_up(end), migratetype, 0);
|
||||
if (ret < 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
|
@ -173,8 +173,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
|
||||
* (e.g. __offline_pages will need to call it after check for isolated range for
|
||||
* a next retry).
|
||||
*
|
||||
* Return: the number of isolated pageblocks on success and -EBUSY if any part
|
||||
* of range cannot be isolated.
|
||||
* Return: 0 on success and -EBUSY if any part of range cannot be isolated.
|
||||
*/
|
||||
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
||||
unsigned migratetype, int flags)
|
||||
@ -182,7 +181,6 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
||||
unsigned long pfn;
|
||||
unsigned long undo_pfn;
|
||||
struct page *page;
|
||||
int nr_isolate_pageblock = 0;
|
||||
|
||||
BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
|
||||
BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
|
||||
@ -196,10 +194,9 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
||||
undo_pfn = pfn;
|
||||
goto undo;
|
||||
}
|
||||
nr_isolate_pageblock++;
|
||||
}
|
||||
}
|
||||
return nr_isolate_pageblock;
|
||||
return 0;
|
||||
undo:
|
||||
for (pfn = start_pfn;
|
||||
pfn < undo_pfn;
|
||||
|
Loading…
x
Reference in New Issue
Block a user