kaiser: fix perf crashes
Avoid perf crashes: place debug_store in the user-mapped per-cpu area instead of allocating, and use page allocator plus kaiser_add_mapping() to keep the BTS and PEBS buffers user-mapped (that is, present in the user mapping, though visible only to kernel and hardware). The PEBS fixup buffer does not need this treatment. The need for a user-mapped struct debug_store showed up before doing any conscious perf testing: in a couple of kernel paging oopses on Westmere, implicating the debug_store offset of the per-cpu area. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Jiri Kosina <jkosina@suse.cz> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
487f0b73d8
commit
20cbe9a3aa
@ -2,11 +2,15 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/kaiser.h>
|
||||
#include <asm/perf_event.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#include "perf_event.h"
|
||||
|
||||
static
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct debug_store, cpu_debug_store);
|
||||
|
||||
/* The size of a BTS record in bytes: */
|
||||
#define BTS_RECORD_SIZE 24
|
||||
|
||||
@ -268,6 +272,39 @@ void fini_debug_store_on_cpu(int cpu)
|
||||
|
||||
static DEFINE_PER_CPU(void *, insn_buffer);
|
||||
|
||||
static void *dsalloc(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
#ifdef CONFIG_KAISER
|
||||
unsigned int order = get_order(size);
|
||||
struct page *page;
|
||||
unsigned long addr;
|
||||
|
||||
page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
|
||||
if (!page)
|
||||
return NULL;
|
||||
addr = (unsigned long)page_address(page);
|
||||
if (kaiser_add_mapping(addr, size, __PAGE_KERNEL) < 0) {
|
||||
__free_pages(page, order);
|
||||
addr = 0;
|
||||
}
|
||||
return (void *)addr;
|
||||
#else
|
||||
return kmalloc_node(size, flags | __GFP_ZERO, node);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void dsfree(const void *buffer, size_t size)
|
||||
{
|
||||
#ifdef CONFIG_KAISER
|
||||
if (!buffer)
|
||||
return;
|
||||
kaiser_remove_mapping((unsigned long)buffer, size);
|
||||
free_pages((unsigned long)buffer, get_order(size));
|
||||
#else
|
||||
kfree(buffer);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int alloc_pebs_buffer(int cpu)
|
||||
{
|
||||
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
|
||||
@ -278,7 +315,7 @@ static int alloc_pebs_buffer(int cpu)
|
||||
if (!x86_pmu.pebs)
|
||||
return 0;
|
||||
|
||||
buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
|
||||
buffer = dsalloc(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
|
||||
if (unlikely(!buffer))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -289,7 +326,7 @@ static int alloc_pebs_buffer(int cpu)
|
||||
if (x86_pmu.intel_cap.pebs_format < 2) {
|
||||
ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
|
||||
if (!ibuffer) {
|
||||
kfree(buffer);
|
||||
dsfree(buffer, x86_pmu.pebs_buffer_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
per_cpu(insn_buffer, cpu) = ibuffer;
|
||||
@ -315,7 +352,8 @@ static void release_pebs_buffer(int cpu)
|
||||
kfree(per_cpu(insn_buffer, cpu));
|
||||
per_cpu(insn_buffer, cpu) = NULL;
|
||||
|
||||
kfree((void *)(unsigned long)ds->pebs_buffer_base);
|
||||
dsfree((void *)(unsigned long)ds->pebs_buffer_base,
|
||||
x86_pmu.pebs_buffer_size);
|
||||
ds->pebs_buffer_base = 0;
|
||||
}
|
||||
|
||||
@ -329,7 +367,7 @@ static int alloc_bts_buffer(int cpu)
|
||||
if (!x86_pmu.bts)
|
||||
return 0;
|
||||
|
||||
buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
|
||||
buffer = dsalloc(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
|
||||
if (unlikely(!buffer)) {
|
||||
WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
|
||||
return -ENOMEM;
|
||||
@ -355,19 +393,15 @@ static void release_bts_buffer(int cpu)
|
||||
if (!ds || !x86_pmu.bts)
|
||||
return;
|
||||
|
||||
kfree((void *)(unsigned long)ds->bts_buffer_base);
|
||||
dsfree((void *)(unsigned long)ds->bts_buffer_base, BTS_BUFFER_SIZE);
|
||||
ds->bts_buffer_base = 0;
|
||||
}
|
||||
|
||||
static int alloc_ds_buffer(int cpu)
|
||||
{
|
||||
int node = cpu_to_node(cpu);
|
||||
struct debug_store *ds;
|
||||
|
||||
ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
|
||||
if (unlikely(!ds))
|
||||
return -ENOMEM;
|
||||
struct debug_store *ds = per_cpu_ptr(&cpu_debug_store, cpu);
|
||||
|
||||
memset(ds, 0, sizeof(*ds));
|
||||
per_cpu(cpu_hw_events, cpu).ds = ds;
|
||||
|
||||
return 0;
|
||||
@ -381,7 +415,6 @@ static void release_ds_buffer(int cpu)
|
||||
return;
|
||||
|
||||
per_cpu(cpu_hw_events, cpu).ds = NULL;
|
||||
kfree(ds);
|
||||
}
|
||||
|
||||
void release_ds_buffers(void)
|
||||
|
Loading…
x
Reference in New Issue
Block a user