kmmio.c handles the list of mmio probes with callbacks, list of traced pages, and attaching into the page fault handler and die notifier. It arms, traps and disarms the given pages, this is the core of mmiotrace. mmio-mod.c is a user interface, hooking into ioremap functions and registering the mmio probes. It also decodes the required information from trapped mmio accesses via the pre and post callbacks in each probe. Currently, hooking into ioremap functions works by redefining the symbols of the target (binary) kernel module, so that it calls the traced versions of the functions. The most notable changes done since the last discussion are: - kmmio.c is a built-in, not part of the module - direct call from fault.c to kmmio.c, removing all dynamic hooks - prepare for unregistering probes at any time - make kmmio re-initializable and accessible to more than one user - rewrite kmmio locking to remove all spinlocks from page fault path Can I abuse call_rcu() like I do in kmmio.c:unregister_kmmio_probe() or is there a better way? The function called via call_rcu() itself calls call_rcu() again, will this work or break? There I need a second grace period for RCU after the first grace period for page faults. Mmiotrace itself (mmio-mod.c) is still a module, I am going to attack that next. At some point I will start looking into how to make mmiotrace a tracer component of ftrace (thanks for the hint, Ingo). Ftrace should make the user space part of mmiotracing as simple as 'cat /debug/trace/mmio > dump.txt'. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
47 lines
1.4 KiB
C
47 lines
1.4 KiB
C
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/init.h>
|
|
#include <linux/init_task.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mqueue.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/desc.h>
|
|
|
|
static struct fs_struct init_fs = INIT_FS;
|
|
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
|
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
|
struct mm_struct init_mm = INIT_MM(init_mm);
|
|
EXPORT_UNUSED_SYMBOL(init_mm); /* will be removed in 2.6.26 */
|
|
|
|
/*
|
|
* Initial thread structure.
|
|
*
|
|
* We need to make sure that this is THREAD_SIZE aligned due to the
|
|
* way process stacks are handled. This is done by having a special
|
|
* "init_task" linker map entry..
|
|
*/
|
|
union thread_union init_thread_union
|
|
__attribute__((__section__(".data.init_task"))) =
|
|
{ INIT_THREAD_INFO(init_task) };
|
|
|
|
/*
|
|
* Initial task structure.
|
|
*
|
|
* All other task structs will be allocated on slabs in fork.c
|
|
*/
|
|
struct task_struct init_task = INIT_TASK(init_task);
|
|
EXPORT_SYMBOL(init_task);
|
|
|
|
/*
|
|
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
|
* no more per-task TSS's. The TSS size is kept cacheline-aligned
|
|
* so they are allowed to end up in the .data.cacheline_aligned
|
|
* section. Since TSS's are completely CPU-local, we want them
|
|
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
|
*/
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
|
|