2008-07-29 21:29:19 +04:00
# ifndef __ASM_X86_XSAVE_H
# define __ASM_X86_XSAVE_H
2008-07-30 04:23:16 +04:00
# include <linux/types.h>
2008-07-29 21:29:19 +04:00
# include <asm/processor.h>
2010-07-21 21:03:54 +04:00
# define XSTATE_CPUID 0x0000000d
2008-07-29 21:29:19 +04:00
# define XSTATE_FP 0x1
# define XSTATE_SSE 0x2
2009-04-11 02:21:24 +04:00
# define XSTATE_YMM 0x4
2008-07-29 21:29:19 +04:00
# define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
# define FXSAVE_SIZE 512
2010-06-13 13:29:39 +04:00
# define XSAVE_HDR_SIZE 64
# define XSAVE_HDR_OFFSET FXSAVE_SIZE
# define XSAVE_YMM_SIZE 256
# define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
2010-05-17 13:22:23 +04:00
2008-07-29 21:29:19 +04:00
/*
* These are the features that the OS can handle currently .
*/
2009-04-11 02:21:24 +04:00
# define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
2008-07-29 21:29:19 +04:00
2008-07-29 21:29:20 +04:00
# ifdef CONFIG_X86_64
# define REX_PREFIX "0x48, "
# else
# define REX_PREFIX
# endif
2008-07-30 04:23:16 +04:00
extern unsigned int xstate_size ;
extern u64 pcntxt_mask ;
2010-02-11 22:50:59 +03:00
extern u64 xstate_fx_sw_bytes [ USER_XSTATE_FX_SW_WORDS ] ;
2008-07-29 21:29:19 +04:00
extern void xsave_init ( void ) ;
2010-02-11 22:50:59 +03:00
extern void update_regset_xstate_info ( unsigned int size , u64 xstate_mask ) ;
2008-07-29 21:29:20 +04:00
extern int init_fpu ( struct task_struct * child ) ;
2008-07-29 21:29:25 +04:00
extern int check_for_xstate ( struct i387_fxsave_struct __user * buf ,
void __user * fpstate ,
struct _fpx_sw_bytes * sw ) ;
2008-07-29 21:29:20 +04:00
2010-05-06 12:45:46 +04:00
static inline int fpu_xrstor_checking ( struct fpu * fpu )
2008-07-29 21:29:20 +04:00
{
2010-05-06 12:45:46 +04:00
struct xsave_struct * fx = & fpu - > state - > xsave ;
2008-07-29 21:29:20 +04:00
int err ;
asm volatile ( " 1: .byte " REX_PREFIX " 0x0f,0xae,0x2f \n \t "
" 2: \n "
" .section .fixup, \" ax \" \n "
" 3: movl $-1,%[err] \n "
" jmp 2b \n "
" .previous \n "
_ASM_EXTABLE ( 1 b , 3 b )
: [ err ] " =r " ( err )
: " D " ( fx ) , " m " ( * fx ) , " a " ( - 1 ) , " d " ( - 1 ) , " 0 " ( 0 )
: " memory " ) ;
return err ;
}
2008-07-29 21:29:25 +04:00
static inline int xsave_user ( struct xsave_struct __user * buf )
2008-07-29 21:29:23 +04:00
{
int err ;
2010-06-23 03:23:37 +04:00
/*
* Clear the xsave header first , so that reserved fields are
* initialized to zero .
*/
err = __clear_user ( & buf - > xsave_hdr ,
sizeof ( struct xsave_hdr_struct ) ) ;
if ( unlikely ( err ) )
return - EFAULT ;
2008-07-29 21:29:23 +04:00
__asm__ __volatile__ ( " 1: .byte " REX_PREFIX " 0x0f,0xae,0x27 \n "
" 2: \n "
" .section .fixup, \" ax \" \n "
" 3: movl $-1,%[err] \n "
" jmp 2b \n "
" .previous \n "
2012-04-21 00:42:25 +04:00
_ASM_EXTABLE ( 1 b , 3 b )
2008-07-29 21:29:23 +04:00
: [ err ] " =r " ( err )
: " D " ( buf ) , " a " ( - 1 ) , " d " ( - 1 ) , " 0 " ( 0 )
: " memory " ) ;
if ( unlikely ( err ) & & __clear_user ( buf , xstate_size ) )
err = - EFAULT ;
/* No need to clear here because the caller clears USED_MATH */
return err ;
}
2008-07-30 04:23:16 +04:00
static inline int xrestore_user ( struct xsave_struct __user * buf , u64 mask )
2008-07-29 21:29:23 +04:00
{
int err ;
struct xsave_struct * xstate = ( ( __force struct xsave_struct * ) buf ) ;
2008-07-30 04:23:16 +04:00
u32 lmask = mask ;
u32 hmask = mask > > 32 ;
2008-07-29 21:29:23 +04:00
__asm__ __volatile__ ( " 1: .byte " REX_PREFIX " 0x0f,0xae,0x2f \n "
" 2: \n "
" .section .fixup, \" ax \" \n "
" 3: movl $-1,%[err] \n "
" jmp 2b \n "
" .previous \n "
2012-04-21 00:42:25 +04:00
_ASM_EXTABLE ( 1 b , 3 b )
2008-07-29 21:29:23 +04:00
: [ err ] " =r " ( err )
: " D " ( xstate ) , " a " ( lmask ) , " d " ( hmask ) , " 0 " ( 0 )
: " memory " ) ; /* memory required? */
return err ;
}
2008-07-30 04:23:16 +04:00
static inline void xrstor_state ( struct xsave_struct * fx , u64 mask )
2008-07-29 21:29:23 +04:00
{
2008-07-30 04:23:16 +04:00
u32 lmask = mask ;
u32 hmask = mask > > 32 ;
2008-07-29 21:29:23 +04:00
asm volatile ( " .byte " REX_PREFIX " 0x0f,0xae,0x2f \n \t "
: : " D " ( fx ) , " m " ( * fx ) , " a " ( lmask ) , " d " ( hmask )
: " memory " ) ;
}
2010-07-20 03:05:49 +04:00
static inline void xsave_state ( struct xsave_struct * fx , u64 mask )
{
u32 lmask = mask ;
u32 hmask = mask > > 32 ;
asm volatile ( " .byte " REX_PREFIX " 0x0f,0xae,0x27 \n \t "
: : " D " ( fx ) , " m " ( * fx ) , " a " ( lmask ) , " d " ( hmask )
: " memory " ) ;
}
2010-05-06 12:45:46 +04:00
static inline void fpu_xsave ( struct fpu * fpu )
2008-07-29 21:29:20 +04:00
{
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn ' t require extended registers . */
2010-07-20 03:05:52 +04:00
alternative_input (
" .byte " REX_PREFIX " 0x0f,0xae,0x27 " ,
" .byte " REX_PREFIX " 0x0f,0xae,0x37 " ,
X86_FEATURE_XSAVEOPT ,
[ fx ] " D " ( & fpu - > state - > xsave ) , " a " ( - 1 ) , " d " ( - 1 ) :
" memory " ) ;
2008-07-29 21:29:20 +04:00
}
2008-07-29 21:29:19 +04:00
# endif