2008-02-08 15:18:22 +03:00
/*
* linux / ipc / namespace . c
* Copyright ( C ) 2006 Pavel Emelyanov < xemul @ openvz . org > OpenVZ , SWsoft Inc .
*/
# include <linux/ipc.h>
# include <linux/msg.h>
# include <linux/ipc_namespace.h>
# include <linux/rcupdate.h>
# include <linux/nsproxy.h>
# include <linux/slab.h>
2017-02-02 19:54:15 +03:00
# include <linux/cred.h>
namespaces: ipc namespaces: implement support for posix msqueues
Implement multiple mounts of the mqueue file system, and link it to usage
of CLONE_NEWIPC.
Each ipc ns has a corresponding mqueuefs superblock. When a user does
clone(CLONE_NEWIPC) or unshare(CLONE_NEWIPC), the unshare will cause an
internal mount of a new mqueuefs sb linked to the new ipc ns.
When a user does 'mount -t mqueue mqueue /dev/mqueue', he mounts the
mqueuefs superblock.
Posix message queues can be worked with both through the mq_* system calls
(see mq_overview(7)), and through the VFS through the mqueue mount. Any
usage of mq_open() and friends will work with the acting task's ipc
namespace. Any actions through the VFS will work with the mqueuefs in
which the file was created. So if a user doesn't remount mqueuefs after
unshare(CLONE_NEWIPC), mq_open("/ab") will not be reflected in "ls
/dev/mqueue".
If task a mounts mqueue for ipc_ns:1, then clones task b with a new ipcns,
ipcns:2, and then task a is the last task in ipc_ns:1 to exit, then (1)
ipc_ns:1 will be freed, (2) it's superblock will live on until task b
umounts the corresponding mqueuefs, and vfs actions will continue to
succeed, but (3) sb->s_fs_info will be NULL for the sb corresponding to
the deceased ipc_ns:1.
To make this happen, we must protect the ipc reference count when
a) a task exits and drops its ipcns->count, since it might be dropping
it to 0 and freeing the ipcns
b) a task accesses the ipcns through its mqueuefs interface, since it
bumps the ipcns refcount and might race with the last task in the ipcns
exiting.
So the kref is changed to an atomic_t so we can use
atomic_dec_and_lock(&ns->count,mq_lock), and every access to the ipcns
through ns = mqueuefs_sb->s_fs_info is protected by the same lock.
Signed-off-by: Cedric Le Goater <clg@fr.ibm.com>
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 06:01:10 +04:00
# include <linux/fs.h>
# include <linux/mount.h>
2011-03-24 02:43:23 +03:00
# include <linux/user_namespace.h>
2013-04-12 04:50:06 +04:00
# include <linux/proc_ns.h>
2017-02-06 12:57:33 +03:00
# include <linux/sched/task.h>
2008-02-08 15:18:22 +03:00
# include "util.h"
2016-08-08 22:20:23 +03:00
static struct ucounts * inc_ipc_namespaces ( struct user_namespace * ns )
{
return inc_ucount ( ns , current_euid ( ) , UCOUNT_IPC_NAMESPACES ) ;
}
static void dec_ipc_namespaces ( struct ucounts * ucounts )
{
dec_ucount ( ucounts , UCOUNT_IPC_NAMESPACES ) ;
}
2012-07-26 15:02:49 +04:00
static struct ipc_namespace * create_ipc_ns ( struct user_namespace * user_ns ,
2011-03-24 02:43:24 +03:00
struct ipc_namespace * old_ns )
2008-02-08 15:18:22 +03:00
{
struct ipc_namespace * ns ;
2016-08-08 22:20:23 +03:00
struct ucounts * ucounts ;
namespaces: ipc namespaces: implement support for posix msqueues
Implement multiple mounts of the mqueue file system, and link it to usage
of CLONE_NEWIPC.
Each ipc ns has a corresponding mqueuefs superblock. When a user does
clone(CLONE_NEWIPC) or unshare(CLONE_NEWIPC), the unshare will cause an
internal mount of a new mqueuefs sb linked to the new ipc ns.
When a user does 'mount -t mqueue mqueue /dev/mqueue', he mounts the
mqueuefs superblock.
Posix message queues can be worked with both through the mq_* system calls
(see mq_overview(7)), and through the VFS through the mqueue mount. Any
usage of mq_open() and friends will work with the acting task's ipc
namespace. Any actions through the VFS will work with the mqueuefs in
which the file was created. So if a user doesn't remount mqueuefs after
unshare(CLONE_NEWIPC), mq_open("/ab") will not be reflected in "ls
/dev/mqueue".
If task a mounts mqueue for ipc_ns:1, then clones task b with a new ipcns,
ipcns:2, and then task a is the last task in ipc_ns:1 to exit, then (1)
ipc_ns:1 will be freed, (2) it's superblock will live on until task b
umounts the corresponding mqueuefs, and vfs actions will continue to
succeed, but (3) sb->s_fs_info will be NULL for the sb corresponding to
the deceased ipc_ns:1.
To make this happen, we must protect the ipc reference count when
a) a task exits and drops its ipcns->count, since it might be dropping
it to 0 and freeing the ipcns
b) a task accesses the ipcns through its mqueuefs interface, since it
bumps the ipcns refcount and might race with the last task in the ipcns
exiting.
So the kref is changed to an atomic_t so we can use
atomic_dec_and_lock(&ns->count,mq_lock), and every access to the ipcns
through ns = mqueuefs_sb->s_fs_info is protected by the same lock.
Signed-off-by: Cedric Le Goater <clg@fr.ibm.com>
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 06:01:10 +04:00
int err ;
2008-02-08 15:18:22 +03:00
2016-09-22 21:08:36 +03:00
err = - ENOSPC ;
2016-08-08 22:20:23 +03:00
ucounts = inc_ipc_namespaces ( user_ns ) ;
if ( ! ucounts )
goto fail ;
err = - ENOMEM ;
2008-02-08 15:18:22 +03:00
ns = kmalloc ( sizeof ( struct ipc_namespace ) , GFP_KERNEL ) ;
if ( ns = = NULL )
2016-08-08 22:20:23 +03:00
goto fail_dec ;
2008-02-08 15:18:22 +03:00
2014-11-01 07:45:45 +03:00
err = ns_alloc_inum ( & ns - > ns ) ;
2016-08-08 22:20:23 +03:00
if ( err )
goto fail_free ;
2014-11-01 09:32:53 +03:00
ns - > ns . ops = & ipcns_operations ;
2011-06-15 21:21:48 +04:00
namespaces: ipc namespaces: implement support for posix msqueues
Implement multiple mounts of the mqueue file system, and link it to usage
of CLONE_NEWIPC.
Each ipc ns has a corresponding mqueuefs superblock. When a user does
clone(CLONE_NEWIPC) or unshare(CLONE_NEWIPC), the unshare will cause an
internal mount of a new mqueuefs sb linked to the new ipc ns.
When a user does 'mount -t mqueue mqueue /dev/mqueue', he mounts the
mqueuefs superblock.
Posix message queues can be worked with both through the mq_* system calls
(see mq_overview(7)), and through the VFS through the mqueue mount. Any
usage of mq_open() and friends will work with the acting task's ipc
namespace. Any actions through the VFS will work with the mqueuefs in
which the file was created. So if a user doesn't remount mqueuefs after
unshare(CLONE_NEWIPC), mq_open("/ab") will not be reflected in "ls
/dev/mqueue".
If task a mounts mqueue for ipc_ns:1, then clones task b with a new ipcns,
ipcns:2, and then task a is the last task in ipc_ns:1 to exit, then (1)
ipc_ns:1 will be freed, (2) it's superblock will live on until task b
umounts the corresponding mqueuefs, and vfs actions will continue to
succeed, but (3) sb->s_fs_info will be NULL for the sb corresponding to
the deceased ipc_ns:1.
To make this happen, we must protect the ipc reference count when
a) a task exits and drops its ipcns->count, since it might be dropping
it to 0 and freeing the ipcns
b) a task accesses the ipcns through its mqueuefs interface, since it
bumps the ipcns refcount and might race with the last task in the ipcns
exiting.
So the kref is changed to an atomic_t so we can use
atomic_dec_and_lock(&ns->count,mq_lock), and every access to the ipcns
through ns = mqueuefs_sb->s_fs_info is protected by the same lock.
Signed-off-by: Cedric Le Goater <clg@fr.ibm.com>
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 06:01:10 +04:00
atomic_set ( & ns - > count , 1 ) ;
2016-05-31 20:26:41 +03:00
ns - > user_ns = get_user_ns ( user_ns ) ;
2016-08-08 22:20:23 +03:00
ns - > ucounts = ucounts ;
2016-05-31 20:26:41 +03:00
namespaces: ipc namespaces: implement support for posix msqueues
Implement multiple mounts of the mqueue file system, and link it to usage
of CLONE_NEWIPC.
Each ipc ns has a corresponding mqueuefs superblock. When a user does
clone(CLONE_NEWIPC) or unshare(CLONE_NEWIPC), the unshare will cause an
internal mount of a new mqueuefs sb linked to the new ipc ns.
When a user does 'mount -t mqueue mqueue /dev/mqueue', he mounts the
mqueuefs superblock.
Posix message queues can be worked with both through the mq_* system calls
(see mq_overview(7)), and through the VFS through the mqueue mount. Any
usage of mq_open() and friends will work with the acting task's ipc
namespace. Any actions through the VFS will work with the mqueuefs in
which the file was created. So if a user doesn't remount mqueuefs after
unshare(CLONE_NEWIPC), mq_open("/ab") will not be reflected in "ls
/dev/mqueue".
If task a mounts mqueue for ipc_ns:1, then clones task b with a new ipcns,
ipcns:2, and then task a is the last task in ipc_ns:1 to exit, then (1)
ipc_ns:1 will be freed, (2) it's superblock will live on until task b
umounts the corresponding mqueuefs, and vfs actions will continue to
succeed, but (3) sb->s_fs_info will be NULL for the sb corresponding to
the deceased ipc_ns:1.
To make this happen, we must protect the ipc reference count when
a) a task exits and drops its ipcns->count, since it might be dropping
it to 0 and freeing the ipcns
b) a task accesses the ipcns through its mqueuefs interface, since it
bumps the ipcns refcount and might race with the last task in the ipcns
exiting.
So the kref is changed to an atomic_t so we can use
atomic_dec_and_lock(&ns->count,mq_lock), and every access to the ipcns
through ns = mqueuefs_sb->s_fs_info is protected by the same lock.
Signed-off-by: Cedric Le Goater <clg@fr.ibm.com>
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 06:01:10 +04:00
err = mq_init_ns ( ns ) ;
2016-08-08 22:20:23 +03:00
if ( err )
goto fail_put ;
2008-04-29 12:00:40 +04:00
2008-02-08 15:18:57 +03:00
sem_init_ns ( ns ) ;
msg_init_ns ( ns ) ;
shm_init_ns ( ns ) ;
2008-02-08 15:18:22 +03:00
return ns ;
2016-08-08 22:20:23 +03:00
fail_put :
put_user_ns ( ns - > user_ns ) ;
ns_free_inum ( & ns - > ns ) ;
fail_free :
kfree ( ns ) ;
fail_dec :
dec_ipc_namespaces ( ucounts ) ;
fail :
return ERR_PTR ( err ) ;
2008-02-08 15:18:22 +03:00
}
2011-03-24 02:43:24 +03:00
struct ipc_namespace * copy_ipcs ( unsigned long flags ,
2012-07-26 15:02:49 +04:00
struct user_namespace * user_ns , struct ipc_namespace * ns )
2008-02-08 15:18:22 +03:00
{
if ( ! ( flags & CLONE_NEWIPC ) )
2009-06-18 03:27:54 +04:00
return get_ipc_ns ( ns ) ;
2012-07-26 15:02:49 +04:00
return create_ipc_ns ( user_ns , ns ) ;
2008-02-08 15:18:22 +03:00
}
2008-02-08 15:18:57 +03:00
/*
* free_ipcs - free all ipcs of one type
* @ ns : the namespace to remove the ipcs from
* @ ids : the table of ipcs to free
* @ free : the function called to free each individual ipc
*
* Called for each kind of ipc when an ipc_namespace exits .
*/
void free_ipcs ( struct ipc_namespace * ns , struct ipc_ids * ids ,
void ( * free ) ( struct ipc_namespace * , struct kern_ipc_perm * ) )
{
struct kern_ipc_perm * perm ;
int next_id ;
int total , in_use ;
2013-09-12 01:26:24 +04:00
down_write ( & ids - > rwsem ) ;
2008-02-08 15:18:57 +03:00
in_use = ids - > in_use ;
for ( total = 0 , next_id = 0 ; total < in_use ; next_id + + ) {
perm = idr_find ( & ids - > ipcs_idr , next_id ) ;
if ( perm = = NULL )
continue ;
2013-09-12 01:26:29 +04:00
rcu_read_lock ( ) ;
ipc_lock_object ( perm ) ;
2008-02-08 15:18:57 +03:00
free ( ns , perm ) ;
total + + ;
}
2013-09-12 01:26:24 +04:00
up_write ( & ids - > rwsem ) ;
2008-02-08 15:18:57 +03:00
}
2009-06-18 03:27:56 +04:00
static void free_ipc_ns ( struct ipc_namespace * ns )
{
sem_exit_ns ( ns ) ;
msg_exit_ns ( ns ) ;
shm_exit_ns ( ns ) ;
2016-08-08 22:20:23 +03:00
dec_ipc_namespaces ( ns - > ucounts ) ;
2011-03-24 02:43:23 +03:00
put_user_ns ( ns - > user_ns ) ;
2014-11-01 07:45:45 +03:00
ns_free_inum ( & ns - > ns ) ;
2011-03-25 11:57:01 +03:00
kfree ( ns ) ;
2009-06-18 03:27:56 +04:00
}
namespaces: ipc namespaces: implement support for posix msqueues
Implement multiple mounts of the mqueue file system, and link it to usage
of CLONE_NEWIPC.
Each ipc ns has a corresponding mqueuefs superblock. When a user does
clone(CLONE_NEWIPC) or unshare(CLONE_NEWIPC), the unshare will cause an
internal mount of a new mqueuefs sb linked to the new ipc ns.
When a user does 'mount -t mqueue mqueue /dev/mqueue', he mounts the
mqueuefs superblock.
Posix message queues can be worked with both through the mq_* system calls
(see mq_overview(7)), and through the VFS through the mqueue mount. Any
usage of mq_open() and friends will work with the acting task's ipc
namespace. Any actions through the VFS will work with the mqueuefs in
which the file was created. So if a user doesn't remount mqueuefs after
unshare(CLONE_NEWIPC), mq_open("/ab") will not be reflected in "ls
/dev/mqueue".
If task a mounts mqueue for ipc_ns:1, then clones task b with a new ipcns,
ipcns:2, and then task a is the last task in ipc_ns:1 to exit, then (1)
ipc_ns:1 will be freed, (2) it's superblock will live on until task b
umounts the corresponding mqueuefs, and vfs actions will continue to
succeed, but (3) sb->s_fs_info will be NULL for the sb corresponding to
the deceased ipc_ns:1.
To make this happen, we must protect the ipc reference count when
a) a task exits and drops its ipcns->count, since it might be dropping
it to 0 and freeing the ipcns
b) a task accesses the ipcns through its mqueuefs interface, since it
bumps the ipcns refcount and might race with the last task in the ipcns
exiting.
So the kref is changed to an atomic_t so we can use
atomic_dec_and_lock(&ns->count,mq_lock), and every access to the ipcns
through ns = mqueuefs_sb->s_fs_info is protected by the same lock.
Signed-off-by: Cedric Le Goater <clg@fr.ibm.com>
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 06:01:10 +04:00
/*
* put_ipc_ns - drop a reference to an ipc namespace .
* @ ns : the namespace to put
*
* If this is the last task in the namespace exiting , and
* it is dropping the refcount to 0 , then it can race with
* a task in another ipc namespace but in a mounts namespace
* which has this ipcns ' s mqueuefs mounted , doing some action
* with one of the mqueuefs files . That can raise the refcount .
* So dropping the refcount , and raising the refcount when
* accessing it through the VFS , are protected with mq_lock .
*
* ( Clearly , a task raising the refcount on its own ipc_ns
* needn ' t take mq_lock since it can ' t race with the last task
* in the ipcns exiting ) .
*/
void put_ipc_ns ( struct ipc_namespace * ns )
2008-02-08 15:18:22 +03:00
{
namespaces: ipc namespaces: implement support for posix msqueues
Implement multiple mounts of the mqueue file system, and link it to usage
of CLONE_NEWIPC.
Each ipc ns has a corresponding mqueuefs superblock. When a user does
clone(CLONE_NEWIPC) or unshare(CLONE_NEWIPC), the unshare will cause an
internal mount of a new mqueuefs sb linked to the new ipc ns.
When a user does 'mount -t mqueue mqueue /dev/mqueue', he mounts the
mqueuefs superblock.
Posix message queues can be worked with both through the mq_* system calls
(see mq_overview(7)), and through the VFS through the mqueue mount. Any
usage of mq_open() and friends will work with the acting task's ipc
namespace. Any actions through the VFS will work with the mqueuefs in
which the file was created. So if a user doesn't remount mqueuefs after
unshare(CLONE_NEWIPC), mq_open("/ab") will not be reflected in "ls
/dev/mqueue".
If task a mounts mqueue for ipc_ns:1, then clones task b with a new ipcns,
ipcns:2, and then task a is the last task in ipc_ns:1 to exit, then (1)
ipc_ns:1 will be freed, (2) it's superblock will live on until task b
umounts the corresponding mqueuefs, and vfs actions will continue to
succeed, but (3) sb->s_fs_info will be NULL for the sb corresponding to
the deceased ipc_ns:1.
To make this happen, we must protect the ipc reference count when
a) a task exits and drops its ipcns->count, since it might be dropping
it to 0 and freeing the ipcns
b) a task accesses the ipcns through its mqueuefs interface, since it
bumps the ipcns refcount and might race with the last task in the ipcns
exiting.
So the kref is changed to an atomic_t so we can use
atomic_dec_and_lock(&ns->count,mq_lock), and every access to the ipcns
through ns = mqueuefs_sb->s_fs_info is protected by the same lock.
Signed-off-by: Cedric Le Goater <clg@fr.ibm.com>
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 06:01:10 +04:00
if ( atomic_dec_and_lock ( & ns - > count , & mq_lock ) ) {
mq_clear_sbinfo ( ns ) ;
spin_unlock ( & mq_lock ) ;
mq_put_mnt ( ns ) ;
free_ipc_ns ( ns ) ;
}
}
2010-03-08 05:48:39 +03:00
2014-11-01 07:25:30 +03:00
static inline struct ipc_namespace * to_ipc_ns ( struct ns_common * ns )
{
return container_of ( ns , struct ipc_namespace , ns ) ;
}
2014-11-01 07:37:32 +03:00
static struct ns_common * ipcns_get ( struct task_struct * task )
2010-03-08 05:48:39 +03:00
{
struct ipc_namespace * ns = NULL ;
struct nsproxy * nsproxy ;
2014-02-04 07:13:49 +04:00
task_lock ( task ) ;
nsproxy = task - > nsproxy ;
2010-03-08 05:48:39 +03:00
if ( nsproxy )
ns = get_ipc_ns ( nsproxy - > ipc_ns ) ;
2014-02-04 07:13:49 +04:00
task_unlock ( task ) ;
2010-03-08 05:48:39 +03:00
2014-11-01 07:25:30 +03:00
return ns ? & ns - > ns : NULL ;
2010-03-08 05:48:39 +03:00
}
2014-11-01 07:37:32 +03:00
static void ipcns_put ( struct ns_common * ns )
2010-03-08 05:48:39 +03:00
{
2014-11-01 07:25:30 +03:00
return put_ipc_ns ( to_ipc_ns ( ns ) ) ;
2010-03-08 05:48:39 +03:00
}
2014-11-01 07:37:32 +03:00
static int ipcns_install ( struct nsproxy * nsproxy , struct ns_common * new )
2010-03-08 05:48:39 +03:00
{
2014-11-01 07:25:30 +03:00
struct ipc_namespace * ns = to_ipc_ns ( new ) ;
2012-12-14 19:55:36 +04:00
if ( ! ns_capable ( ns - > user_ns , CAP_SYS_ADMIN ) | |
2013-03-20 23:49:49 +04:00
! ns_capable ( current_user_ns ( ) , CAP_SYS_ADMIN ) )
2012-07-26 12:13:20 +04:00
return - EPERM ;
2010-03-08 05:48:39 +03:00
/* Ditch state from the old ipc namespace */
exit_sem ( current ) ;
put_ipc_ns ( nsproxy - > ipc_ns ) ;
nsproxy - > ipc_ns = get_ipc_ns ( ns ) ;
return 0 ;
}
2016-09-06 10:47:13 +03:00
static struct user_namespace * ipcns_owner ( struct ns_common * ns )
{
return to_ipc_ns ( ns ) - > user_ns ;
}
2010-03-08 05:48:39 +03:00
const struct proc_ns_operations ipcns_operations = {
. name = " ipc " ,
. type = CLONE_NEWIPC ,
. get = ipcns_get ,
. put = ipcns_put ,
. install = ipcns_install ,
2016-09-06 10:47:13 +03:00
. owner = ipcns_owner ,
2010-03-08 05:48:39 +03:00
} ;