2005-04-17 02:20:36 +04:00
/*
* The " user cache " .
*
* ( C ) Copyright 1991 - 2000 Linus Torvalds
*
* We have a per - user structure to keep track of how many
* processes , files etc the user has claimed , in order to be
* able to have per - user limits for system resources .
*/
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/bitops.h>
# include <linux/key.h>
2006-01-25 17:23:07 +03:00
# include <linux/interrupt.h>
2007-07-16 10:40:59 +04:00
# include <linux/module.h>
# include <linux/user_namespace.h>
2005-04-17 02:20:36 +04:00
/*
* UID task count cache , to get fast user lookup in " alloc_uid "
* when changing user ID ' s ( ie setuid ( ) and friends ) .
*/
# define UIDHASH_MASK (UIDHASH_SZ - 1)
# define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
2007-07-16 10:40:59 +04:00
# define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
2005-04-17 02:20:36 +04:00
2006-12-07 07:33:20 +03:00
static struct kmem_cache * uid_cachep ;
2006-01-25 17:23:07 +03:00
/*
* The uidhash_lock is mostly taken from process context , but it is
* occasionally also taken from softirq / tasklet context , when
* task - structs get RCU - freed . Hence all locking must be softirq - safe .
2006-02-01 03:34:26 +03:00
* But free_uid ( ) is also called with local interrupts disabled , and running
* local_bh_enable ( ) with local interrupts disabled is an error - we ' ll run
* softirq callbacks , and they can unconditionally enable interrupts , and
* the caller of free_uid ( ) didn ' t expect that . .
2006-01-25 17:23:07 +03:00
*/
2005-04-17 02:20:36 +04:00
static DEFINE_SPINLOCK ( uidhash_lock ) ;
struct user_struct root_user = {
. __count = ATOMIC_INIT ( 1 ) ,
. processes = ATOMIC_INIT ( 1 ) ,
. files = ATOMIC_INIT ( 0 ) ,
. sigpending = ATOMIC_INIT ( 0 ) ,
. mq_bytes = 0 ,
. locked_shm = 0 ,
# ifdef CONFIG_KEYS
. uid_keyring = & root_user_keyring ,
. session_keyring = & root_session_keyring ,
# endif
} ;
/*
* These routines must be called with the uidhash spinlock held !
*/
static inline void uid_hash_insert ( struct user_struct * up , struct list_head * hashent )
{
list_add ( & up - > uidhash_list , hashent ) ;
}
static inline void uid_hash_remove ( struct user_struct * up )
{
list_del ( & up - > uidhash_list ) ;
}
static inline struct user_struct * uid_hash_find ( uid_t uid , struct list_head * hashent )
{
2007-09-19 09:46:43 +04:00
struct user_struct * user ;
2005-04-17 02:20:36 +04:00
2007-09-19 09:46:43 +04:00
list_for_each_entry ( user , hashent , uidhash_list ) {
2005-04-17 02:20:36 +04:00
if ( user - > uid = = uid ) {
atomic_inc ( & user - > __count ) ;
return user ;
}
}
return NULL ;
}
/*
* Locate the user_struct for the passed UID . If found , take a ref on it . The
* caller must undo that ref with free_uid ( ) .
*
* If the user_struct could not be found , return NULL .
*/
struct user_struct * find_user ( uid_t uid )
{
struct user_struct * ret ;
2006-02-01 03:34:26 +03:00
unsigned long flags ;
2007-07-16 10:40:59 +04:00
struct user_namespace * ns = current - > nsproxy - > user_ns ;
2005-04-17 02:20:36 +04:00
2006-02-01 03:34:26 +03:00
spin_lock_irqsave ( & uidhash_lock , flags ) ;
2007-07-16 10:40:59 +04:00
ret = uid_hash_find ( uid , uidhashentry ( ns , uid ) ) ;
2006-02-01 03:34:26 +03:00
spin_unlock_irqrestore ( & uidhash_lock , flags ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
void free_uid ( struct user_struct * up )
{
2006-02-01 03:34:26 +03:00
unsigned long flags ;
2006-03-24 14:15:47 +03:00
if ( ! up )
return ;
2006-02-01 03:34:26 +03:00
local_irq_save ( flags ) ;
2006-03-24 14:15:47 +03:00
if ( atomic_dec_and_lock ( & up - > __count , & uidhash_lock ) ) {
2005-04-17 02:20:36 +04:00
uid_hash_remove ( up ) ;
2006-03-24 14:15:47 +03:00
spin_unlock_irqrestore ( & uidhash_lock , flags ) ;
2005-04-17 02:20:36 +04:00
key_put ( up - > uid_keyring ) ;
key_put ( up - > session_keyring ) ;
kmem_cache_free ( uid_cachep , up ) ;
2006-03-24 14:15:47 +03:00
} else {
local_irq_restore ( flags ) ;
2005-04-17 02:20:36 +04:00
}
}
2007-07-16 10:40:59 +04:00
struct user_struct * alloc_uid ( struct user_namespace * ns , uid_t uid )
2005-04-17 02:20:36 +04:00
{
2007-07-16 10:40:59 +04:00
struct list_head * hashent = uidhashentry ( ns , uid ) ;
2005-04-17 02:20:36 +04:00
struct user_struct * up ;
2006-02-01 03:34:26 +03:00
spin_lock_irq ( & uidhash_lock ) ;
2005-04-17 02:20:36 +04:00
up = uid_hash_find ( uid , hashent ) ;
2006-02-01 03:34:26 +03:00
spin_unlock_irq ( & uidhash_lock ) ;
2005-04-17 02:20:36 +04:00
if ( ! up ) {
struct user_struct * new ;
2006-12-07 07:33:17 +03:00
new = kmem_cache_alloc ( uid_cachep , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ! new )
return NULL ;
new - > uid = uid ;
atomic_set ( & new - > __count , 1 ) ;
atomic_set ( & new - > processes , 0 ) ;
atomic_set ( & new - > files , 0 ) ;
atomic_set ( & new - > sigpending , 0 ) ;
2006-06-02 00:10:59 +04:00
# ifdef CONFIG_INOTIFY_USER
[PATCH] inotify
inotify is intended to correct the deficiencies of dnotify, particularly
its inability to scale and its terrible user interface:
* dnotify requires the opening of one fd per each directory
that you intend to watch. This quickly results in too many
open files and pins removable media, preventing unmount.
* dnotify is directory-based. You only learn about changes to
directories. Sure, a change to a file in a directory affects
the directory, but you are then forced to keep a cache of
stat structures.
* dnotify's interface to user-space is awful. Signals?
inotify provides a more usable, simple, powerful solution to file change
notification:
* inotify's interface is a system call that returns a fd, not SIGIO.
You get a single fd, which is select()-able.
* inotify has an event that says "the filesystem that the item
you were watching is on was unmounted."
* inotify can watch directories or files.
Inotify is currently used by Beagle (a desktop search infrastructure),
Gamin (a FAM replacement), and other projects.
See Documentation/filesystems/inotify.txt.
Signed-off-by: Robert Love <rml@novell.com>
Cc: John McCutchan <ttb@tentacle.dhs.org>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-07-13 01:06:03 +04:00
atomic_set ( & new - > inotify_watches , 0 ) ;
atomic_set ( & new - > inotify_devs , 0 ) ;
# endif
2005-04-17 02:20:36 +04:00
new - > mq_bytes = 0 ;
new - > locked_shm = 0 ;
2006-06-23 01:47:17 +04:00
if ( alloc_uid_keyring ( new , current ) < 0 ) {
2005-04-17 02:20:36 +04:00
kmem_cache_free ( uid_cachep , new ) ;
return NULL ;
}
/*
* Before adding this , check whether we raced
* on adding the same user already . .
*/
2006-02-01 03:34:26 +03:00
spin_lock_irq ( & uidhash_lock ) ;
2005-04-17 02:20:36 +04:00
up = uid_hash_find ( uid , hashent ) ;
if ( up ) {
key_put ( new - > uid_keyring ) ;
key_put ( new - > session_keyring ) ;
kmem_cache_free ( uid_cachep , new ) ;
} else {
uid_hash_insert ( new , hashent ) ;
up = new ;
}
2006-02-01 03:34:26 +03:00
spin_unlock_irq ( & uidhash_lock ) ;
2005-04-17 02:20:36 +04:00
}
return up ;
}
void switch_uid ( struct user_struct * new_user )
{
struct user_struct * old_user ;
/* What if a process setreuid()'s and this brings the
* new uid over his NPROC rlimit ? We can check this now
* cheaply with the new uid cache , so if it matters
* we should be checking for it . - DaveM
*/
old_user = current - > user ;
atomic_inc ( & new_user - > processes ) ;
atomic_dec ( & old_user - > processes ) ;
switch_uid_keyring ( new_user ) ;
current - > user = new_user ;
2006-11-04 21:06:02 +03:00
/*
* We need to synchronize with __sigqueue_alloc ( )
* doing a get_uid ( p - > user ) . . If that saw the old
* user value , we need to wait until it has exited
* its critical region before we can free the old
* structure .
*/
smp_mb ( ) ;
spin_unlock_wait ( & current - > sighand - > siglock ) ;
2005-04-17 02:20:36 +04:00
free_uid ( old_user ) ;
suid_keys ( current ) ;
}
static int __init uid_cache_init ( void )
{
int n ;
uid_cachep = kmem_cache_create ( " uid_cache " , sizeof ( struct user_struct ) ,
2007-07-20 05:11:58 +04:00
0 , SLAB_HWCACHE_ALIGN | SLAB_PANIC , NULL ) ;
2005-04-17 02:20:36 +04:00
for ( n = 0 ; n < UIDHASH_SZ ; + + n )
2007-07-16 10:40:59 +04:00
INIT_LIST_HEAD ( init_user_ns . uidhash_table + n ) ;
2005-04-17 02:20:36 +04:00
/* Insert the root user immediately (init already runs as root) */
2006-02-01 03:34:26 +03:00
spin_lock_irq ( & uidhash_lock ) ;
2007-07-16 10:40:59 +04:00
uid_hash_insert ( & root_user , uidhashentry ( & init_user_ns , 0 ) ) ;
2006-02-01 03:34:26 +03:00
spin_unlock_irq ( & uidhash_lock ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
module_init ( uid_cache_init ) ;