2009-09-22 04:03:51 +04:00
/* Copyright (C) 2009 Red Hat, Inc.
*
* See . . / COPYING for licensing terms .
*/
# include <linux/mm.h>
# include <linux/mmu_context.h>
2011-10-16 10:01:52 +04:00
# include <linux/export.h>
2009-09-22 04:03:51 +04:00
# include <linux/sched.h>
# include <asm/mmu_context.h>
/*
* use_mm
* Makes the calling kernel thread take on the specified
* mm context .
* ( Note : this routine is intended to be called only
* from a kernel thread context )
*/
void use_mm ( struct mm_struct * mm )
{
struct mm_struct * active_mm ;
struct task_struct * tsk = current ;
task_lock ( tsk ) ;
active_mm = tsk - > active_mm ;
2009-09-22 04:03:52 +04:00
if ( active_mm ! = mm ) {
atomic_inc ( & mm - > mm_count ) ;
tsk - > active_mm = mm ;
}
2009-09-22 04:03:51 +04:00
tsk - > mm = mm ;
switch_mm ( active_mm , mm , tsk ) ;
task_unlock ( tsk ) ;
2012-10-26 19:17:44 +04:00
# ifdef finish_arch_post_lock_switch
finish_arch_post_lock_switch ( ) ;
# endif
2009-09-22 04:03:51 +04:00
2009-09-22 04:03:52 +04:00
if ( active_mm ! = mm )
mmdrop ( active_mm ) ;
2009-09-22 04:03:51 +04:00
}
2010-01-14 09:17:18 +03:00
EXPORT_SYMBOL_GPL ( use_mm ) ;
2009-09-22 04:03:51 +04:00
/*
* unuse_mm
* Reverses the effect of use_mm , i . e . releases the
* specified mm context which was earlier taken on
* by the calling kernel thread
* ( Note : this routine is intended to be called only
* from a kernel thread context )
*/
void unuse_mm ( struct mm_struct * mm )
{
struct task_struct * tsk = current ;
task_lock ( tsk ) ;
2012-03-22 03:34:13 +04:00
sync_mm_rss ( mm ) ;
2009-09-22 04:03:51 +04:00
tsk - > mm = NULL ;
/* active_mm is still 'mm' */
enter_lazy_tlb ( mm , tsk ) ;
task_unlock ( tsk ) ;
}
2010-01-14 09:17:18 +03:00
EXPORT_SYMBOL_GPL ( unuse_mm ) ;