powerpc/perf: fix imc allocation failure handling
The alloc_pages_node return value should be tested for failure before being passed to page_address. Tested-by: Anju T Sudhakar <anju@linux.vnet.ibm.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20190724084638.24982-3-npiggin@gmail.com
This commit is contained in:
parent
31f210cf42
commit
10c4bd7cd2
@ -577,6 +577,7 @@ static int core_imc_mem_init(int cpu, int size)
|
|||||||
{
|
{
|
||||||
int nid, rc = 0, core_id = (cpu / threads_per_core);
|
int nid, rc = 0, core_id = (cpu / threads_per_core);
|
||||||
struct imc_mem_info *mem_info;
|
struct imc_mem_info *mem_info;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* alloc_pages_node() will allocate memory for core in the
|
* alloc_pages_node() will allocate memory for core in the
|
||||||
@ -587,11 +588,12 @@ static int core_imc_mem_init(int cpu, int size)
|
|||||||
mem_info->id = core_id;
|
mem_info->id = core_id;
|
||||||
|
|
||||||
/* We need only vbase for core counters */
|
/* We need only vbase for core counters */
|
||||||
mem_info->vbase = page_address(alloc_pages_node(nid,
|
page = alloc_pages_node(nid,
|
||||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
||||||
__GFP_NOWARN, get_order(size)));
|
__GFP_NOWARN, get_order(size));
|
||||||
if (!mem_info->vbase)
|
if (!page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
mem_info->vbase = page_address(page);
|
||||||
|
|
||||||
/* Init the mutex */
|
/* Init the mutex */
|
||||||
core_imc_refc[core_id].id = core_id;
|
core_imc_refc[core_id].id = core_id;
|
||||||
@ -849,15 +851,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
|
|||||||
int nid = cpu_to_node(cpu_id);
|
int nid = cpu_to_node(cpu_id);
|
||||||
|
|
||||||
if (!local_mem) {
|
if (!local_mem) {
|
||||||
|
struct page *page;
|
||||||
/*
|
/*
|
||||||
* This case could happen only once at start, since we dont
|
* This case could happen only once at start, since we dont
|
||||||
* free the memory in cpu offline path.
|
* free the memory in cpu offline path.
|
||||||
*/
|
*/
|
||||||
local_mem = page_address(alloc_pages_node(nid,
|
page = alloc_pages_node(nid,
|
||||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
||||||
__GFP_NOWARN, get_order(size)));
|
__GFP_NOWARN, get_order(size));
|
||||||
if (!local_mem)
|
if (!page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
local_mem = page_address(page);
|
||||||
|
|
||||||
per_cpu(thread_imc_mem, cpu_id) = local_mem;
|
per_cpu(thread_imc_mem, cpu_id) = local_mem;
|
||||||
}
|
}
|
||||||
@ -1095,11 +1099,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
|
|||||||
int core_id = (cpu_id / threads_per_core);
|
int core_id = (cpu_id / threads_per_core);
|
||||||
|
|
||||||
if (!local_mem) {
|
if (!local_mem) {
|
||||||
local_mem = page_address(alloc_pages_node(phys_id,
|
struct page *page;
|
||||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
|
||||||
__GFP_NOWARN, get_order(size)));
|
page = alloc_pages_node(phys_id,
|
||||||
if (!local_mem)
|
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
||||||
|
__GFP_NOWARN, get_order(size));
|
||||||
|
if (!page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
local_mem = page_address(page);
|
||||||
per_cpu(trace_imc_mem, cpu_id) = local_mem;
|
per_cpu(trace_imc_mem, cpu_id) = local_mem;
|
||||||
|
|
||||||
/* Initialise the counters for trace mode */
|
/* Initialise the counters for trace mode */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user