2008-10-22 22:26:29 -07:00
# ifndef _ASM_X86_PTRACE_H
# define _ASM_X86_PTRACE_H
2007-10-23 22:37:24 +02:00
# include <linux/compiler.h> /* For __user */
# include <asm/ptrace-abi.h>
2008-05-28 09:46:19 +02:00
# include <asm/processor-flags.h>
2007-10-23 22:37:24 +02:00
2008-05-28 09:46:19 +02:00
# ifdef __KERNEL__
# include <asm/segment.h>
# endif
2008-01-30 13:31:09 +01:00
2007-10-23 22:37:24 +02:00
# ifndef __ASSEMBLY__
# ifdef __i386__
/* this struct defines the way the registers are stored on the
stack during a system call . */
2008-01-30 13:30:56 +01:00
# ifndef __KERNEL__
2007-10-23 22:37:24 +02:00
struct pt_regs {
long ebx ;
long ecx ;
long edx ;
long esi ;
long edi ;
long ebp ;
long eax ;
int xds ;
int xes ;
int xfs ;
2008-01-30 13:30:56 +01:00
/* int gs; */
2007-10-23 22:37:24 +02:00
long orig_eax ;
long eip ;
int xcs ;
long eflags ;
long esp ;
int xss ;
} ;
2008-01-30 13:30:56 +01:00
# else /* __KERNEL__ */
struct pt_regs {
2008-02-08 12:09:56 -08:00
unsigned long bx ;
unsigned long cx ;
unsigned long dx ;
unsigned long si ;
unsigned long di ;
unsigned long bp ;
2008-02-08 12:09:57 -08:00
unsigned long ax ;
2008-02-08 12:09:56 -08:00
unsigned long ds ;
unsigned long es ;
unsigned long fs ;
2008-01-30 13:30:56 +01:00
/* int gs; */
2008-02-08 12:09:57 -08:00
unsigned long orig_ax ;
2008-02-08 12:09:56 -08:00
unsigned long ip ;
unsigned long cs ;
unsigned long flags ;
unsigned long sp ;
unsigned long ss ;
2008-01-30 13:30:56 +01:00
} ;
2007-10-23 22:37:24 +02:00
# endif /* __KERNEL__ */
# else /* __i386__ */
2008-01-30 13:30:56 +01:00
# ifndef __KERNEL__
2007-10-23 22:37:24 +02:00
struct pt_regs {
unsigned long r15 ;
unsigned long r14 ;
unsigned long r13 ;
unsigned long r12 ;
unsigned long rbp ;
unsigned long rbx ;
/* arguments: non interrupts/non tracing syscalls only save upto here*/
unsigned long r11 ;
unsigned long r10 ;
unsigned long r9 ;
unsigned long r8 ;
unsigned long rax ;
unsigned long rcx ;
unsigned long rdx ;
unsigned long rsi ;
unsigned long rdi ;
unsigned long orig_rax ;
/* end of arguments */
/* cpu exception frame or undefined */
unsigned long rip ;
unsigned long cs ;
unsigned long eflags ;
unsigned long rsp ;
unsigned long ss ;
/* top of stack page */
} ;
2008-01-30 13:30:56 +01:00
# else /* __KERNEL__ */
struct pt_regs {
unsigned long r15 ;
unsigned long r14 ;
unsigned long r13 ;
unsigned long r12 ;
unsigned long bp ;
unsigned long bx ;
/* arguments: non interrupts/non tracing syscalls only save upto here*/
unsigned long r11 ;
unsigned long r10 ;
unsigned long r9 ;
unsigned long r8 ;
unsigned long ax ;
unsigned long cx ;
unsigned long dx ;
unsigned long si ;
unsigned long di ;
unsigned long orig_ax ;
/* end of arguments */
/* cpu exception frame or undefined */
unsigned long ip ;
unsigned long cs ;
unsigned long flags ;
unsigned long sp ;
unsigned long ss ;
/* top of stack page */
} ;
2007-10-23 22:37:24 +02:00
2008-01-30 13:33:16 +01:00
# endif /* __KERNEL__ */
# endif /* !__i386__ */
2007-10-23 22:37:24 +02:00
2008-04-08 11:01:58 +02:00
2008-01-30 13:33:16 +01:00
# ifdef __KERNEL__
2008-04-08 11:01:58 +02:00
# include <linux/init.h>
2007-10-23 22:37:24 +02:00
2008-04-08 11:01:58 +02:00
struct cpuinfo_x86 ;
2007-10-23 22:37:24 +02:00
struct task_struct ;
2008-01-30 13:33:16 +01:00
extern unsigned long profile_pc ( struct pt_regs * regs ) ;
2007-10-23 22:37:24 +02:00
extern unsigned long
2008-01-30 13:33:12 +01:00
convert_ip_to_linear ( struct task_struct * child , struct pt_regs * regs ) ;
2008-03-23 01:03:17 -07:00
extern void send_sigtrap ( struct task_struct * tsk , struct pt_regs * regs ,
2008-09-23 15:23:52 +05:30
int error_code , int si_code ) ;
2008-09-05 16:27:11 -07:00
void signal_fault ( struct pt_regs * regs , void __user * frame , char * where ) ;
2008-07-21 22:35:38 +05:30
extern long syscall_trace_enter ( struct pt_regs * ) ;
extern void syscall_trace_leave ( struct pt_regs * ) ;
2008-02-08 12:09:57 -08:00
static inline unsigned long regs_return_value ( struct pt_regs * regs )
{
return regs - > ax ;
}
2008-01-30 13:30:46 +01:00
2008-01-30 13:33:16 +01:00
/*
* user_mode_vm ( regs ) determines whether a register set came from user mode .
* This is true if V8086 mode was enabled OR if the register set was from
* protected mode with RPL - 3 CS value . This tricky test checks that with
* one comparison . Many places in the kernel can bypass this full check
* if they have already ruled out V8086 mode , so user_mode ( regs ) can be used .
*/
static inline int user_mode ( struct pt_regs * regs )
{
# ifdef CONFIG_X86_32
return ( regs - > cs & SEGMENT_RPL_MASK ) = = USER_RPL ;
# else
return ! ! ( regs - > cs & 3 ) ;
# endif
}
static inline int user_mode_vm ( struct pt_regs * regs )
{
# ifdef CONFIG_X86_32
2008-03-28 17:56:57 +03:00
return ( ( regs - > cs & SEGMENT_RPL_MASK ) | ( regs - > flags & X86_VM_MASK ) ) > =
2008-03-23 01:03:17 -07:00
USER_RPL ;
2008-01-30 13:33:16 +01:00
# else
return user_mode ( regs ) ;
# endif
}
static inline int v8086_mode ( struct pt_regs * regs )
{
# ifdef CONFIG_X86_32
2008-03-28 17:56:57 +03:00
return ( regs - > flags & X86_VM_MASK ) ;
2008-01-30 13:33:16 +01:00
# else
return 0 ; /* No V86 mode support in long mode */
# endif
}
2008-01-30 13:33:16 +01:00
/*
* X86_32 CPUs don ' t save ss and esp if the CPU is already in kernel mode
* when it traps . So regs will be the current sp .
*
* This is valid only for kernel mode traps .
*/
static inline unsigned long kernel_trap_sp ( struct pt_regs * regs )
2008-01-30 13:33:16 +01:00
{
# ifdef CONFIG_X86_32
return ( unsigned long ) regs ;
# else
return regs - > sp ;
# endif
}
static inline unsigned long instruction_pointer ( struct pt_regs * regs )
{
return regs - > ip ;
}
static inline unsigned long frame_pointer ( struct pt_regs * regs )
{
return regs - > bp ;
}
2008-04-18 17:08:44 -07:00
static inline unsigned long user_stack_pointer ( struct pt_regs * regs )
{
return regs - > sp ;
}
2008-01-30 13:30:48 +01:00
/*
* These are defined as per linux / ptrace . h , which see .
*/
# define arch_has_single_step() (1)
extern void user_enable_single_step ( struct task_struct * ) ;
extern void user_disable_single_step ( struct task_struct * ) ;
2008-01-30 13:30:54 +01:00
extern void user_enable_block_step ( struct task_struct * ) ;
# ifdef CONFIG_X86_DEBUGCTLMSR
# define arch_has_block_step() (1)
# else
# define arch_has_block_step() (boot_cpu_data.x86 >= 6)
# endif
2008-01-30 13:30:46 +01:00
struct user_desc ;
extern int do_get_thread_area ( struct task_struct * p , int idx ,
struct user_desc __user * info ) ;
extern int do_set_thread_area ( struct task_struct * p , int idx ,
struct user_desc __user * info , int can_allocate ) ;
# endif /* __KERNEL__ */
2007-10-23 22:37:24 +02:00
# endif /* !__ASSEMBLY__ */
2008-10-22 22:26:29 -07:00
# endif /* _ASM_X86_PTRACE_H */