2005-04-16 15:20:36 -07:00
/*
* S390 version
*
* Derived from " include/asm-i386/mmu_context.h "
*/
# ifndef __S390_MMU_CONTEXT_H
# define __S390_MMU_CONTEXT_H
2007-02-05 21:18:17 +01:00
# include <asm/pgalloc.h>
2008-02-09 18:24:35 +01:00
# include <asm/uaccess.h>
2010-08-24 09:26:21 +02:00
# include <asm/tlbflush.h>
2012-03-28 18:30:02 +01:00
# include <asm/ctl_reg.h>
2007-05-02 19:27:14 +02:00
# include <asm-generic/mm_hooks.h>
2008-01-26 14:10:58 +01:00
static inline int init_new_context ( struct task_struct * tsk ,
struct mm_struct * mm )
{
2010-08-24 09:26:21 +02:00
atomic_set ( & mm - > context . attach_count , 0 ) ;
mm - > context . flush_mm = 0 ;
2008-02-09 18:24:35 +01:00
mm - > context . asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS ;
2008-01-26 14:10:58 +01:00
# ifdef CONFIG_64BIT
2008-02-09 18:24:37 +01:00
mm - > context . asce_bits | = _ASCE_TYPE_REGION3 ;
2008-01-26 14:10:58 +01:00
# endif
2011-05-10 17:13:43 +02:00
if ( current - > mm & & current - > mm - > context . alloc_pgste ) {
2008-10-28 11:10:15 +01:00
/*
* alloc_pgste indicates , that any NEW context will be created
* with extended page tables . The old context is unchanged . The
* page table allocation and the page table operations will
* look at has_pgste to distinguish normal and extended page
* tables . The only way to create extended page tables is to
* set alloc_pgste and then create a new context ( e . g . dup_mm ) .
* The page table allocation is called after init_new_context
* and if has_pgste is set , it will create extended page
* tables .
*/
mm - > context . has_pgste = 1 ;
mm - > context . alloc_pgste = 1 ;
2008-03-25 18:47:10 +01:00
} else {
2008-10-28 11:10:15 +01:00
mm - > context . has_pgste = 0 ;
mm - > context . alloc_pgste = 0 ;
2008-03-25 18:47:10 +01:00
}
2008-02-09 18:24:37 +01:00
mm - > context . asce_limit = STACK_TOP_MAX ;
crst_table_init ( ( unsigned long * ) mm - > pgd , pgd_entry_type ( mm ) ) ;
2008-01-26 14:10:58 +01:00
return 0 ;
}
2005-04-16 15:20:36 -07:00
# define destroy_context(mm) do { } while (0)
2012-05-23 16:24:51 +02:00
# ifndef CONFIG_64BIT
2007-02-05 21:18:17 +01:00
# define LCTL_OPCODE "lctl"
# else
# define LCTL_OPCODE "lctlg"
# endif
2007-10-22 12:52:47 +02:00
static inline void update_mm ( struct mm_struct * mm , struct task_struct * tsk )
2005-04-16 15:20:36 -07:00
{
2008-02-09 18:24:35 +01:00
pgd_t * pgd = mm - > pgd ;
S390_lowcore . user_asce = mm - > context . asce_bits | __pa ( pgd ) ;
2009-12-07 12:51:43 +01:00
if ( user_mode ! = HOME_SPACE_MODE ) {
2007-10-22 12:52:47 +02:00
/* Load primary space page table origin. */
asm volatile ( LCTL_OPCODE " 1,1,%0 \n "
2011-05-23 10:24:23 +02:00
: : " m " ( S390_lowcore . user_asce ) ) ;
2007-10-22 12:52:47 +02:00
} else
/* Load home space page table origin. */
asm volatile ( LCTL_OPCODE " 13,13,%0 "
: : " m " ( S390_lowcore . user_asce ) ) ;
2008-02-09 18:24:37 +01:00
set_fs ( current - > thread . mm_segment ) ;
2005-04-16 15:20:36 -07:00
}
static inline void switch_mm ( struct mm_struct * prev , struct mm_struct * next ,
2007-02-05 21:18:17 +01:00
struct task_struct * tsk )
2005-04-16 15:20:36 -07:00
{
2009-03-26 15:25:01 +01:00
cpumask_set_cpu ( smp_processor_id ( ) , mm_cpumask ( next ) ) ;
2007-10-22 12:52:47 +02:00
update_mm ( next , tsk ) ;
2010-08-24 09:26:21 +02:00
atomic_dec ( & prev - > context . attach_count ) ;
WARN_ON ( atomic_read ( & prev - > context . attach_count ) < 0 ) ;
atomic_inc ( & next - > context . attach_count ) ;
/* Check for TLBs not flushed yet */
if ( next - > context . flush_mm )
__tlb_flush_mm ( next ) ;
2005-04-16 15:20:36 -07:00
}
2007-10-22 12:52:47 +02:00
# define enter_lazy_tlb(mm,tsk) do { } while (0)
2005-04-16 15:20:36 -07:00
# define deactivate_mm(tsk,mm) do { } while (0)
2005-11-08 21:34:42 -08:00
static inline void activate_mm ( struct mm_struct * prev ,
2005-04-16 15:20:36 -07:00
struct mm_struct * next )
{
switch_mm ( prev , next , current ) ;
}
2007-02-05 21:18:17 +01:00
# endif /* __S390_MMU_CONTEXT_H */