2009-09-22 04:03:51 +04:00
/* Copyright (C) 2009 Red Hat, Inc.
*
* See . . / COPYING for licensing terms .
*/
# include <linux/mm.h>
# include <linux/mmu_context.h>
2010-01-14 09:17:18 +03:00
# include <linux/module.h>
2009-09-22 04:03:51 +04:00
# include <linux/sched.h>
# include <asm/mmu_context.h>
/*
* use_mm
* Makes the calling kernel thread take on the specified
* mm context .
* Called by the retry thread execute retries within the
* iocb issuer ' s mm context , so that copy_from / to_user
* operations work seamlessly for aio .
* ( Note : this routine is intended to be called only
* from a kernel thread context )
*/
void use_mm ( struct mm_struct * mm )
{
struct mm_struct * active_mm ;
struct task_struct * tsk = current ;
task_lock ( tsk ) ;
active_mm = tsk - > active_mm ;
2009-09-22 04:03:52 +04:00
if ( active_mm ! = mm ) {
atomic_inc ( & mm - > mm_count ) ;
tsk - > active_mm = mm ;
}
2009-09-22 04:03:51 +04:00
tsk - > mm = mm ;
switch_mm ( active_mm , mm , tsk ) ;
task_unlock ( tsk ) ;
2009-09-22 04:03:52 +04:00
if ( active_mm ! = mm )
mmdrop ( active_mm ) ;
2009-09-22 04:03:51 +04:00
}
2010-01-14 09:17:18 +03:00
EXPORT_SYMBOL_GPL ( use_mm ) ;
2009-09-22 04:03:51 +04:00
/*
* unuse_mm
* Reverses the effect of use_mm , i . e . releases the
* specified mm context which was earlier taken on
* by the calling kernel thread
* ( Note : this routine is intended to be called only
* from a kernel thread context )
*/
void unuse_mm ( struct mm_struct * mm )
{
struct task_struct * tsk = current ;
task_lock ( tsk ) ;
2010-03-23 23:35:37 +03:00
sync_mm_rss ( tsk , mm ) ;
2009-09-22 04:03:51 +04:00
tsk - > mm = NULL ;
/* active_mm is still 'mm' */
enter_lazy_tlb ( mm , tsk ) ;
task_unlock ( tsk ) ;
}
2010-01-14 09:17:18 +03:00
EXPORT_SYMBOL_GPL ( unuse_mm ) ;