2008-07-29 21:29:19 +04:00
# ifndef __ASM_X86_XSAVE_H
# define __ASM_X86_XSAVE_H
2008-07-30 04:23:16 +04:00
# include <linux/types.h>
2008-07-29 21:29:19 +04:00
# include <asm/processor.h>
# include <asm/i387.h>
# define XSTATE_FP 0x1
# define XSTATE_SSE 0x2
2009-04-11 02:21:24 +04:00
# define XSTATE_YMM 0x4
2008-07-29 21:29:19 +04:00
# define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
# define FXSAVE_SIZE 512
/*
* These are the features that the OS can handle currently .
*/
2009-04-11 02:21:24 +04:00
# define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
2008-07-29 21:29:19 +04:00
2008-07-29 21:29:20 +04:00
# ifdef CONFIG_X86_64
# define REX_PREFIX "0x48, "
# else
# define REX_PREFIX
# endif
2008-07-30 04:23:16 +04:00
extern unsigned int xstate_size ;
extern u64 pcntxt_mask ;
2008-07-29 21:29:19 +04:00
extern struct xsave_struct * init_xstate_buf ;
2010-02-11 22:50:59 +03:00
extern u64 xstate_fx_sw_bytes [ USER_XSTATE_FX_SW_WORDS ] ;
2008-07-29 21:29:19 +04:00
extern void xsave_cntxt_init ( void ) ;
extern void xsave_init ( void ) ;
2010-02-11 22:50:59 +03:00
extern void update_regset_xstate_info ( unsigned int size , u64 xstate_mask ) ;
2008-07-29 21:29:20 +04:00
extern int init_fpu ( struct task_struct * child ) ;
2008-07-29 21:29:25 +04:00
extern int check_for_xstate ( struct i387_fxsave_struct __user * buf ,
void __user * fpstate ,
struct _fpx_sw_bytes * sw ) ;
2008-07-29 21:29:20 +04:00
static inline int xrstor_checking ( struct xsave_struct * fx )
{
int err ;
asm volatile ( " 1: .byte " REX_PREFIX " 0x0f,0xae,0x2f \n \t "
" 2: \n "
" .section .fixup, \" ax \" \n "
" 3: movl $-1,%[err] \n "
" jmp 2b \n "
" .previous \n "
_ASM_EXTABLE ( 1 b , 3 b )
: [ err ] " =r " ( err )
: " D " ( fx ) , " m " ( * fx ) , " a " ( - 1 ) , " d " ( - 1 ) , " 0 " ( 0 )
: " memory " ) ;
return err ;
}
2008-07-29 21:29:25 +04:00
static inline int xsave_user ( struct xsave_struct __user * buf )
2008-07-29 21:29:23 +04:00
{
int err ;
__asm__ __volatile__ ( " 1: .byte " REX_PREFIX " 0x0f,0xae,0x27 \n "
" 2: \n "
" .section .fixup, \" ax \" \n "
" 3: movl $-1,%[err] \n "
" jmp 2b \n "
" .previous \n "
" .section __ex_table, \" a \" \n "
_ASM_ALIGN " \n "
_ASM_PTR " 1b,3b \n "
" .previous "
: [ err ] " =r " ( err )
: " D " ( buf ) , " a " ( - 1 ) , " d " ( - 1 ) , " 0 " ( 0 )
: " memory " ) ;
if ( unlikely ( err ) & & __clear_user ( buf , xstate_size ) )
err = - EFAULT ;
/* No need to clear here because the caller clears USED_MATH */
return err ;
}
2008-07-30 04:23:16 +04:00
static inline int xrestore_user ( struct xsave_struct __user * buf , u64 mask )
2008-07-29 21:29:23 +04:00
{
int err ;
struct xsave_struct * xstate = ( ( __force struct xsave_struct * ) buf ) ;
2008-07-30 04:23:16 +04:00
u32 lmask = mask ;
u32 hmask = mask > > 32 ;
2008-07-29 21:29:23 +04:00
__asm__ __volatile__ ( " 1: .byte " REX_PREFIX " 0x0f,0xae,0x2f \n "
" 2: \n "
" .section .fixup, \" ax \" \n "
" 3: movl $-1,%[err] \n "
" jmp 2b \n "
" .previous \n "
" .section __ex_table, \" a \" \n "
_ASM_ALIGN " \n "
_ASM_PTR " 1b,3b \n "
" .previous "
: [ err ] " =r " ( err )
: " D " ( xstate ) , " a " ( lmask ) , " d " ( hmask ) , " 0 " ( 0 )
: " memory " ) ; /* memory required? */
return err ;
}
2008-07-30 04:23:16 +04:00
static inline void xrstor_state ( struct xsave_struct * fx , u64 mask )
2008-07-29 21:29:23 +04:00
{
2008-07-30 04:23:16 +04:00
u32 lmask = mask ;
u32 hmask = mask > > 32 ;
2008-07-29 21:29:23 +04:00
asm volatile ( " .byte " REX_PREFIX " 0x0f,0xae,0x2f \n \t "
: : " D " ( fx ) , " m " ( * fx ) , " a " ( lmask ) , " d " ( hmask )
: " memory " ) ;
}
2008-07-29 21:29:20 +04:00
static inline void xsave ( struct task_struct * tsk )
{
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn ' t require extended registers . */
__asm__ __volatile__ ( " .byte " REX_PREFIX " 0x0f,0xae,0x27 "
: : " D " ( & ( tsk - > thread . xstate - > xsave ) ) ,
" a " ( - 1 ) , " d " ( - 1 ) : " memory " ) ;
}
2008-07-29 21:29:19 +04:00
# endif