linux/net/socket.c

2364 lines
55 KiB
C
Raw Normal View History

/*
* NET An implementation of the SOCKET network access protocol.
*
* Version: @(#)socket.c 1.1.93 18/02/95
*
* Authors: Orest Zborowski, <obz@Kodak.COM>
* Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Fixes:
* Anonymous : NOTSOCK/BADF cleanup. Error fix in
* shutdown()
* Alan Cox : verify_area() fixes
* Alan Cox : Removed DDI
* Jonathan Kamens : SOCK_DGRAM reconnect bug
* Alan Cox : Moved a load of checks to the very
* top level.
* Alan Cox : Move address structures to/from user
* mode above the protocol layers.
* Rob Janssen : Allow 0 length sends.
* Alan Cox : Asynchronous I/O support (cribbed from the
* tty drivers).
* Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style)
* Jeff Uphoff : Made max number of sockets command-line
* configurable.
* Matti Aarnio : Made the number of sockets dynamic,
* to be allocated when needed, and mr.
* Uphoff's max is used as max to be
* allowed to allocate.
* Linus : Argh. removed all the socket allocation
* altogether: it's in the inode now.
* Alan Cox : Made sock_alloc()/sock_release() public
* for NetROM and future kernel nfsd type
* stuff.
* Alan Cox : sendmsg/recvmsg basics.
* Tom Dyas : Export net symbols.
* Marcin Dalecki : Fixed problems with CONFIG_NET="n".
* Alan Cox : Added thread locking to sys_* calls
* for sockets. May have errors at the
* moment.
* Kevin Buhr : Fixed the dumb errors in the above.
* Andi Kleen : Some small cleanups, optimizations,
* and fixed a copy_from_user() bug.
* Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0)
* Tigran Aivazian : Made listen(2) backlog sanity checks
* protocol-independent
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*
* This module is effectively the top level interface to the BSD socket
* paradigm.
*
* Based upon Swansea University Computer Society NET3.039
*/
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/file.h>
#include <linux/net.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/wanrouter.h>
#include <linux/if_bridge.h>
#include <linux/if_frad.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/kmod.h>
#include <linux/audit.h>
#include <linux/wireless.h>
#include <linux/nsproxy.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <net/compat.h>
#include <net/wext.h>
#include <net/sock.h>
#include <linux/netfilter.h>
static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
static int sock_mmap(struct file *file, struct vm_area_struct *vma);
static int sock_close(struct inode *inode, struct file *file);
static unsigned int sock_poll(struct file *file,
struct poll_table_struct *wait);
static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
static long compat_sock_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
#endif
static int sock_fasync(int fd, struct file *filp, int on);
static ssize_t sock_sendpage(struct file *file, struct page *page,
int offset, size_t size, loff_t *ppos, int more);
static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
/*
* Socket files have a set of 'special' operations as well as the generic file ones. These don't appear
* in the operation structures but are done directly via the socketcall() multiplexor.
*/
static const struct file_operations socket_file_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.aio_read = sock_aio_read,
.aio_write = sock_aio_write,
.poll = sock_poll,
.unlocked_ioctl = sock_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_sock_ioctl,
#endif
.mmap = sock_mmap,
.open = sock_no_open, /* special open code to disallow open via /proc */
.release = sock_close,
.fasync = sock_fasync,
.sendpage = sock_sendpage,
.splice_write = generic_splice_sendpage,
.splice_read = sock_splice_read,
};
/*
* The protocol list. Each protocol is registered in here.
*/
static DEFINE_SPINLOCK(net_family_lock);
static const struct net_proto_family *net_families[NPROTO] __read_mostly;
/*
* Statistics counters of the socket lists
*/
static DEFINE_PER_CPU(int, sockets_in_use) = 0;
/*
* Support routines.
* Move socket addresses back and forth across the kernel/user
* divide and look after the messy bits.
*/
#define MAX_SOCK_ADDR 128 /* 108 for Unix domain -
16 for IP, 16 for IPX,
24 for IPv6,
about 80 for AX.25
must be at least one bigger than
the AF_UNIX size (see net/unix/af_unix.c
:unix_mkname()).
*/
/**
* move_addr_to_kernel - copy a socket address into kernel space
* @uaddr: Address in user space
* @kaddr: Address in kernel space
* @ulen: Length in user space
*
* The address is copied into kernel space. If the provided address is
* too long an error code of -EINVAL is returned. If the copy gives
* invalid addresses -EFAULT is returned. On a success 0 is returned.
*/
int move_addr_to_kernel(void __user *uaddr, int ulen, void *kaddr)
{
if (ulen < 0 || ulen > MAX_SOCK_ADDR)
return -EINVAL;
if (ulen == 0)
return 0;
if (copy_from_user(kaddr, uaddr, ulen))
return -EFAULT;
return audit_sockaddr(ulen, kaddr);
}
/**
* move_addr_to_user - copy an address to user space
* @kaddr: kernel space address
* @klen: length of address in kernel
* @uaddr: user space address
* @ulen: pointer to user length field
*
* The value pointed to by ulen on entry is the buffer length available.
* This is overwritten with the buffer space used. -EINVAL is returned
* if an overlong buffer is specified or a negative buffer size. -EFAULT
* is returned if either the buffer or the length field are not
* accessible.
* After copying the data up to the limit the user specifies, the true
* length of the data is written over the length limit the user
* specified. Zero is returned for a success.
*/
int move_addr_to_user(void *kaddr, int klen, void __user *uaddr,
int __user *ulen)
{
int err;
int len;
err = get_user(len, ulen);
if (err)
return err;
if (len > klen)
len = klen;
if (len < 0 || len > MAX_SOCK_ADDR)
return -EINVAL;
if (len) {
if (audit_sockaddr(klen, kaddr))
return -ENOMEM;
if (copy_to_user(uaddr, kaddr, len))
return -EFAULT;
}
/*
* "fromlen shall refer to the value before truncation.."
* 1003.1g
*/
return __put_user(klen, ulen);
}
#define SOCKFS_MAGIC 0x534F434B
static struct kmem_cache *sock_inode_cachep __read_mostly;
static struct inode *sock_alloc_inode(struct super_block *sb)
{
struct socket_alloc *ei;
ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
init_waitqueue_head(&ei->socket.wait);
ei->socket.fasync_list = NULL;
ei->socket.state = SS_UNCONNECTED;
ei->socket.flags = 0;
ei->socket.ops = NULL;
ei->socket.sk = NULL;
ei->socket.file = NULL;
return &ei->vfs_inode;
}
static void sock_destroy_inode(struct inode *inode)
{
kmem_cache_free(sock_inode_cachep,
container_of(inode, struct socket_alloc, vfs_inode));
}
static void init_once(struct kmem_cache *cachep, void *foo)
{
struct socket_alloc *ei = (struct socket_alloc *)foo;
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
{
sock_inode_cachep = kmem_cache_create("sock_inode_cache",
sizeof(struct socket_alloc),
0,
(SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD),
init_once);
if (sock_inode_cachep == NULL)
return -ENOMEM;
return 0;
}
static struct super_operations sockfs_ops = {
.alloc_inode = sock_alloc_inode,
.destroy_inode =sock_destroy_inode,
.statfs = simple_statfs,
};
[PATCH] VFS: Permit filesystem to override root dentry on mount Extend the get_sb() filesystem operation to take an extra argument that permits the VFS to pass in the target vfsmount that defines the mountpoint. The filesystem is then required to manually set the superblock and root dentry pointers. For most filesystems, this should be done with simple_set_mnt() which will set the superblock pointer and then set the root dentry to the superblock's s_root (as per the old default behaviour). The get_sb() op now returns an integer as there's now no need to return the superblock pointer. This patch permits a superblock to be implicitly shared amongst several mount points, such as can be done with NFS to avoid potential inode aliasing. In such a case, simple_set_mnt() would not be called, and instead the mnt_root and mnt_sb would be set directly. The patch also makes the following changes: (*) the get_sb_*() convenience functions in the core kernel now take a vfsmount pointer argument and return an integer, so most filesystems have to change very little. (*) If one of the convenience function is not used, then get_sb() should normally call simple_set_mnt() to instantiate the vfsmount. This will always return 0, and so can be tail-called from get_sb(). (*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the dcache upon superblock destruction rather than shrink_dcache_anon(). This is required because the superblock may now have multiple trees that aren't actually bound to s_root, but that still need to be cleaned up. The currently called functions assume that the whole tree is rooted at s_root, and that anonymous dentries are not the roots of trees which results in dentries being left unculled. However, with the way NFS superblock sharing are currently set to be implemented, these assumptions are violated: the root of the filesystem is simply a dummy dentry and inode (the real inode for '/' may well be inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries with child trees. [*] Anonymous until discovered from another tree. (*) The documentation has been adjusted, including the additional bit of changing ext2_* into foo_* in the documentation. [akpm@osdl.org: convert ipath_fs, do other stuff] Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Cc: Nathan Scott <nathans@sgi.com> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 13:02:57 +04:00
static int sockfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
struct vfsmount *mnt)
{
[PATCH] VFS: Permit filesystem to override root dentry on mount Extend the get_sb() filesystem operation to take an extra argument that permits the VFS to pass in the target vfsmount that defines the mountpoint. The filesystem is then required to manually set the superblock and root dentry pointers. For most filesystems, this should be done with simple_set_mnt() which will set the superblock pointer and then set the root dentry to the superblock's s_root (as per the old default behaviour). The get_sb() op now returns an integer as there's now no need to return the superblock pointer. This patch permits a superblock to be implicitly shared amongst several mount points, such as can be done with NFS to avoid potential inode aliasing. In such a case, simple_set_mnt() would not be called, and instead the mnt_root and mnt_sb would be set directly. The patch also makes the following changes: (*) the get_sb_*() convenience functions in the core kernel now take a vfsmount pointer argument and return an integer, so most filesystems have to change very little. (*) If one of the convenience function is not used, then get_sb() should normally call simple_set_mnt() to instantiate the vfsmount. This will always return 0, and so can be tail-called from get_sb(). (*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the dcache upon superblock destruction rather than shrink_dcache_anon(). This is required because the superblock may now have multiple trees that aren't actually bound to s_root, but that still need to be cleaned up. The currently called functions assume that the whole tree is rooted at s_root, and that anonymous dentries are not the roots of trees which results in dentries being left unculled. However, with the way NFS superblock sharing are currently set to be implemented, these assumptions are violated: the root of the filesystem is simply a dummy dentry and inode (the real inode for '/' may well be inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries with child trees. [*] Anonymous until discovered from another tree. (*) The documentation has been adjusted, including the additional bit of changing ext2_* into foo_* in the documentation. [akpm@osdl.org: convert ipath_fs, do other stuff] Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Cc: Nathan Scott <nathans@sgi.com> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 13:02:57 +04:00
return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC,
mnt);
}
static struct vfsmount *sock_mnt __read_mostly;
static struct file_system_type sock_fs_type = {
.name = "sockfs",
.get_sb = sockfs_get_sb,
.kill_sb = kill_anon_super,
};
static int sockfs_delete_dentry(struct dentry *dentry)
{
/*
* At creation time, we pretended this dentry was hashed
* (by clearing DCACHE_UNHASHED bit in d_flags)
* At delete time, we restore the truth : not hashed.
* (so that dput() can proceed correctly)
*/
dentry->d_flags |= DCACHE_UNHASHED;
return 0;
}
/*
* sockfs_dname() is called from d_path().
*/
static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]",
dentry->d_inode->i_ino);
}
static struct dentry_operations sockfs_dentry_operations = {
.d_delete = sockfs_delete_dentry,
.d_dname = sockfs_dname,
};
/*
* Obtains the first available file descriptor and sets it up for use.
*
* These functions create file structures and maps them to fd space
* of the current process. On success it returns file descriptor
* and file struct implicitly stored in sock->file.
* Note that another thread may close file descriptor before we return
* from this function. We use the fact that now we do not refer
* to socket after mapping. If one day we will need it, this
* function will increment ref. count on file by 1.
*
* In any case returned fd MAY BE not valid!
* This race condition is unavoidable
* with shared fd spaces, we cannot solve it inside kernel,
* but we take care of internal coherence yet.
*/
static int sock_alloc_fd(struct file **filep)
{
int fd;
fd = get_unused_fd();
if (likely(fd >= 0)) {
struct file *file = get_empty_filp();
*filep = file;
if (unlikely(!file)) {
put_unused_fd(fd);
return -ENFILE;
}
} else
*filep = NULL;
return fd;
}
static int sock_attach_fd(struct socket *sock, struct file *file)
{
r/o bind mounts: filesystem helpers for custom 'struct file's Why do we need r/o bind mounts? This feature allows a read-only view into a read-write filesystem. In the process of doing that, it also provides infrastructure for keeping track of the number of writers to any given mount. This has a number of uses. It allows chroots to have parts of filesystems writable. It will be useful for containers in the future because users may have root inside a container, but should not be allowed to write to somefilesystems. This also replaces patches that vserver has had out of the tree for several years. It allows security enhancement by making sure that parts of your filesystem read-only (such as when you don't trust your FTP server), when you don't want to have entire new filesystems mounted, or when you want atime selectively updated. I've been using the following script to test that the feature is working as desired. It takes a directory and makes a regular bind and a r/o bind mount of it. It then performs some normal filesystem operations on the three directories, including ones that are expected to fail, like creating a file on the r/o mount. This patch: Some filesystems forego the vfs and may_open() and create their own 'struct file's. This patch creates a couple of helper functions which can be used by these filesystems, and will provide a unified place which the r/o bind mount code may patch. Also, rename an existing, static-scope init_file() to a less generic name. Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-17 10:31:13 +04:00
struct dentry *dentry;
struct qstr name = { .name = "" };
r/o bind mounts: filesystem helpers for custom 'struct file's Why do we need r/o bind mounts? This feature allows a read-only view into a read-write filesystem. In the process of doing that, it also provides infrastructure for keeping track of the number of writers to any given mount. This has a number of uses. It allows chroots to have parts of filesystems writable. It will be useful for containers in the future because users may have root inside a container, but should not be allowed to write to somefilesystems. This also replaces patches that vserver has had out of the tree for several years. It allows security enhancement by making sure that parts of your filesystem read-only (such as when you don't trust your FTP server), when you don't want to have entire new filesystems mounted, or when you want atime selectively updated. I've been using the following script to test that the feature is working as desired. It takes a directory and makes a regular bind and a r/o bind mount of it. It then performs some normal filesystem operations on the three directories, including ones that are expected to fail, like creating a file on the r/o mount. This patch: Some filesystems forego the vfs and may_open() and create their own 'struct file's. This patch creates a couple of helper functions which can be used by these filesystems, and will provide a unified place which the r/o bind mount code may patch. Also, rename an existing, static-scope init_file() to a less generic name. Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-17 10:31:13 +04:00
dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
if (unlikely(!dentry))
return -ENOMEM;
r/o bind mounts: filesystem helpers for custom 'struct file's Why do we need r/o bind mounts? This feature allows a read-only view into a read-write filesystem. In the process of doing that, it also provides infrastructure for keeping track of the number of writers to any given mount. This has a number of uses. It allows chroots to have parts of filesystems writable. It will be useful for containers in the future because users may have root inside a container, but should not be allowed to write to somefilesystems. This also replaces patches that vserver has had out of the tree for several years. It allows security enhancement by making sure that parts of your filesystem read-only (such as when you don't trust your FTP server), when you don't want to have entire new filesystems mounted, or when you want atime selectively updated. I've been using the following script to test that the feature is working as desired. It takes a directory and makes a regular bind and a r/o bind mount of it. It then performs some normal filesystem operations on the three directories, including ones that are expected to fail, like creating a file on the r/o mount. This patch: Some filesystems forego the vfs and may_open() and create their own 'struct file's. This patch creates a couple of helper functions which can be used by these filesystems, and will provide a unified place which the r/o bind mount code may patch. Also, rename an existing, static-scope init_file() to a less generic name. Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-17 10:31:13 +04:00
dentry->d_op = &sockfs_dentry_operations;
/*
* We dont want to push this dentry into global dentry hash table.
* We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
* This permits a working /proc/$pid/fd/XXX on sockets
*/
r/o bind mounts: filesystem helpers for custom 'struct file's Why do we need r/o bind mounts? This feature allows a read-only view into a read-write filesystem. In the process of doing that, it also provides infrastructure for keeping track of the number of writers to any given mount. This has a number of uses. It allows chroots to have parts of filesystems writable. It will be useful for containers in the future because users may have root inside a container, but should not be allowed to write to somefilesystems. This also replaces patches that vserver has had out of the tree for several years. It allows security enhancement by making sure that parts of your filesystem read-only (such as when you don't trust your FTP server), when you don't want to have entire new filesystems mounted, or when you want atime selectively updated. I've been using the following script to test that the feature is working as desired. It takes a directory and makes a regular bind and a r/o bind mount of it. It then performs some normal filesystem operations on the three directories, including ones that are expected to fail, like creating a file on the r/o mount. This patch: Some filesystems forego the vfs and may_open() and create their own 'struct file's. This patch creates a couple of helper functions which can be used by these filesystems, and will provide a unified place which the r/o bind mount code may patch. Also, rename an existing, static-scope init_file() to a less generic name. Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-17 10:31:13 +04:00
dentry->d_flags &= ~DCACHE_UNHASHED;
d_instantiate(dentry, SOCK_INODE(sock));
sock->file = file;
r/o bind mounts: filesystem helpers for custom 'struct file's Why do we need r/o bind mounts? This feature allows a read-only view into a read-write filesystem. In the process of doing that, it also provides infrastructure for keeping track of the number of writers to any given mount. This has a number of uses. It allows chroots to have parts of filesystems writable. It will be useful for containers in the future because users may have root inside a container, but should not be allowed to write to somefilesystems. This also replaces patches that vserver has had out of the tree for several years. It allows security enhancement by making sure that parts of your filesystem read-only (such as when you don't trust your FTP server), when you don't want to have entire new filesystems mounted, or when you want atime selectively updated. I've been using the following script to test that the feature is working as desired. It takes a directory and makes a regular bind and a r/o bind mount of it. It then performs some normal filesystem operations on the three directories, including ones that are expected to fail, like creating a file on the r/o mount. This patch: Some filesystems forego the vfs and may_open() and create their own 'struct file's. This patch creates a couple of helper functions which can be used by these filesystems, and will provide a unified place which the r/o bind mount code may patch. Also, rename an existing, static-scope init_file() to a less generic name. Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-17 10:31:13 +04:00
init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE,
&socket_file_ops);
SOCK_INODE(sock)->i_fop = &socket_file_ops;
file->f_flags = O_RDWR;
file->f_pos = 0;
file->private_data = sock;
return 0;
}
int sock_map_fd(struct socket *sock)
{
struct file *newfile;
int fd = sock_alloc_fd(&newfile);
if (likely(fd >= 0)) {
int err = sock_attach_fd(sock, newfile);
if (unlikely(err < 0)) {
put_filp(newfile);
put_unused_fd(fd);
return err;
}
fd_install(fd, newfile);
}
return fd;
}
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
static struct socket *sock_from_file(struct file *file, int *err)
{
if (file->f_op == &socket_file_ops)
return file->private_data; /* set in sock_map_fd */
*err = -ENOTSOCK;
return NULL;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
}
/**
* sockfd_lookup - Go from a file number to its socket slot
* @fd: file handle
* @err: pointer to an error code return
*
* The file handle passed in is locked and the socket it is bound
* too is returned. If an error occurs the err pointer is overwritten
* with a negative errno code and NULL is returned. The function checks
* for both invalid handles and passing a handle which is not a socket.
*
* On a success the socket object pointer is returned.
*/
struct socket *sockfd_lookup(int fd, int *err)
{
struct file *file;
struct socket *sock;
file = fget(fd);
if (!file) {
*err = -EBADF;
return NULL;
}
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
sock = sock_from_file(file, err);
if (!sock)
fput(file);
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
return sock;
}
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
{
struct file *file;
struct socket *sock;
*err = -EBADF;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
file = fget_light(fd, fput_needed);
if (file) {
sock = sock_from_file(file, err);
if (sock)
return sock;
fput_light(file, *fput_needed);
}
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
return NULL;
}
/**
* sock_alloc - allocate a socket
*
* Allocate a new inode and socket object. The two are bound together
* and initialised. The socket is then returned. If we are out of inodes
* NULL is returned.
*/
static struct socket *sock_alloc(void)
{
struct inode *inode;
struct socket *sock;
inode = new_inode(sock_mnt->mnt_sb);
if (!inode)
return NULL;
sock = SOCKET_I(inode);
inode->i_mode = S_IFSOCK | S_IRWXUGO;
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
get_cpu_var(sockets_in_use)++;
put_cpu_var(sockets_in_use);
return sock;
}
/*
* In theory you can't get an open on this inode, but /proc provides
* a back door. Remember to keep it shut otherwise you'll let the
* creepy crawlies in.
*/
static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
{
return -ENXIO;
}
const struct file_operations bad_sock_fops = {
.owner = THIS_MODULE,
.open = sock_no_open,
};
/**
* sock_release - close a socket
* @sock: socket to close
*
* The socket is released from the protocol stack if it has a release
* callback, and the inode is then released if the socket is bound to
* an inode not a file.
*/
void sock_release(struct socket *sock)
{
if (sock->ops) {
struct module *owner = sock->ops->owner;
sock->ops->release(sock);
sock->ops = NULL;
module_put(owner);
}
if (sock->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
get_cpu_var(sockets_in_use)--;
put_cpu_var(sockets_in_use);
if (!sock->file) {
iput(SOCK_INODE(sock));
return;
}
sock->file = NULL;
}
static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size)
{
struct sock_iocb *si = kiocb_to_siocb(iocb);
int err;
si->sock = sock;
si->scm = NULL;
si->msg = msg;
si->size = size;
err = security_socket_sendmsg(sock, msg, size);
if (err)
return err;
return sock->ops->sendmsg(iocb, sock, msg, size);
}
int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct kiocb iocb;
struct sock_iocb siocb;
int ret;
init_sync_kiocb(&iocb, NULL);
iocb.private = &siocb;
ret = __sock_sendmsg(&iocb, sock, msg, size);
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&iocb);
return ret;
}
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
mm_segment_t oldfs = get_fs();
int result;
set_fs(KERNEL_DS);
/*
* the following is safe, since for compiler definitions of kvec and
* iovec are identical, yielding the same in-core layout and alignment
*/
msg->msg_iov = (struct iovec *)vec;
msg->msg_iovlen = num;
result = sock_sendmsg(sock, msg, size);
set_fs(oldfs);
return result;
}
/*
* called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
*/
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
ktime_t kt = skb->tstamp;
if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
struct timeval tv;
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
if (kt.tv64 == 0)
kt = ktime_get_real();
skb->tstamp = kt;
tv = ktime_to_timeval(kt);
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv);
} else {
struct timespec ts;
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
if (kt.tv64 == 0)
kt = ktime_get_real();
skb->tstamp = kt;
ts = ktime_to_timespec(kt);
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts), &ts);
}
}
EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
int err;
struct sock_iocb *si = kiocb_to_siocb(iocb);
si->sock = sock;
si->scm = NULL;
si->msg = msg;
si->size = size;
si->flags = flags;
err = security_socket_recvmsg(sock, msg, size, flags);
if (err)
return err;
return sock->ops->recvmsg(iocb, sock, msg, size, flags);
}
int sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct kiocb iocb;
struct sock_iocb siocb;
int ret;
init_sync_kiocb(&iocb, NULL);
iocb.private = &siocb;
ret = __sock_recvmsg(&iocb, sock, msg, size, flags);
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&iocb);
return ret;
}
int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size, int flags)
{
mm_segment_t oldfs = get_fs();
int result;
set_fs(KERNEL_DS);
/*
* the following is safe, since for compiler definitions of kvec and
* iovec are identical, yielding the same in-core layout and alignment
*/
msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num;
result = sock_recvmsg(sock, msg, size, flags);
set_fs(oldfs);
return result;
}
static void sock_aio_dtor(struct kiocb *iocb)
{
kfree(iocb->private);
}
static ssize_t sock_sendpage(struct file *file, struct page *page,
int offset, size_t size, loff_t *ppos, int more)
{
struct socket *sock;
int flags;
sock = file->private_data;
flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
if (more)
flags |= MSG_MORE;
return sock->ops->sendpage(sock, page, offset, size, flags);
}
static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct socket *sock = file->private_data;
if (unlikely(!sock->ops->splice_read))
return -EINVAL;
return sock->ops->splice_read(sock, ppos, pipe, len, flags);
}
static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
struct sock_iocb *siocb)
{
if (!is_sync_kiocb(iocb)) {
siocb = kmalloc(sizeof(*siocb), GFP_KERNEL);
if (!siocb)
return NULL;
iocb->ki_dtor = sock_aio_dtor;
}
siocb->kiocb = iocb;
iocb->private = siocb;
return siocb;
}
static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
struct file *file, const struct iovec *iov,
unsigned long nr_segs)
{
struct socket *sock = file->private_data;
size_t size = 0;
int i;
for (i = 0; i < nr_segs; i++)
size += iov[i].iov_len;
msg->msg_name = NULL;
msg->msg_namelen = 0;
msg->msg_control = NULL;
msg->msg_controllen = 0;
msg->msg_iov = (struct iovec *)iov;
msg->msg_iovlen = nr_segs;
msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags);
}
static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct sock_iocb siocb, *x;
if (pos != 0)
return -ESPIPE;
if (iocb->ki_left == 0) /* Match SYS5 behaviour */
return 0;
x = alloc_sock_iocb(iocb, &siocb);
if (!x)
return -ENOMEM;
return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
}
static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
struct file *file, const struct iovec *iov,
unsigned long nr_segs)
{
struct socket *sock = file->private_data;
size_t size = 0;
int i;
for (i = 0; i < nr_segs; i++)
size += iov[i].iov_len;
msg->msg_name = NULL;
msg->msg_namelen = 0;
msg->msg_control = NULL;
msg->msg_controllen = 0;
msg->msg_iov = (struct iovec *)iov;
msg->msg_iovlen = nr_segs;
msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
if (sock->type == SOCK_SEQPACKET)
msg->msg_flags |= MSG_EOR;
return __sock_sendmsg(iocb, sock, msg, size);
}
static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct sock_iocb siocb, *x;
if (pos != 0)
return -ESPIPE;
x = alloc_sock_iocb(iocb, &siocb);
if (!x)
return -ENOMEM;
return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
}
/*
* Atomic setting of ioctl hooks to avoid race
* with module unload.
*/
static DEFINE_MUTEX(br_ioctl_mutex);
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL;
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *))
{
mutex_lock(&br_ioctl_mutex);
br_ioctl_hook = hook;
mutex_unlock(&br_ioctl_mutex);
}
EXPORT_SYMBOL(brioctl_set);
static DEFINE_MUTEX(vlan_ioctl_mutex);
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
static int (*vlan_ioctl_hook) (struct net *, void __user *arg);
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
void vlan_ioctl_set(int (*hook) (struct net *, void __user *))
{
mutex_lock(&vlan_ioctl_mutex);
vlan_ioctl_hook = hook;
mutex_unlock(&vlan_ioctl_mutex);
}
EXPORT_SYMBOL(vlan_ioctl_set);
static DEFINE_MUTEX(dlci_ioctl_mutex);
static int (*dlci_ioctl_hook) (unsigned int, void __user *);
void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
{
mutex_lock(&dlci_ioctl_mutex);
dlci_ioctl_hook = hook;
mutex_unlock(&dlci_ioctl_mutex);
}
EXPORT_SYMBOL(dlci_ioctl_set);
/*
* With an ioctl, arg may well be a user mode pointer, but we don't know
* what to do with it - that's up to the protocol still.
*/
static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct socket *sock;
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
struct sock *sk;
void __user *argp = (void __user *)arg;
int pid, err;
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
struct net *net;
sock = file->private_data;
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
sk = sock->sk;
net = sock_net(sk);
if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
err = dev_ioctl(net, cmd, argp);
} else
#ifdef CONFIG_WIRELESS_EXT
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
err = dev_ioctl(net, cmd, argp);
} else
#endif /* CONFIG_WIRELESS_EXT */
switch (cmd) {
case FIOSETOWN:
case SIOCSPGRP:
err = -EFAULT;
if (get_user(pid, (int __user *)argp))
break;
err = f_setown(sock->file, pid, 1);
break;
case FIOGETOWN:
case SIOCGPGRP:
err = put_user(f_getown(sock->file),
(int __user *)argp);
break;
case SIOCGIFBR:
case SIOCSIFBR:
case SIOCBRADDBR:
case SIOCBRDELBR:
err = -ENOPKG;
if (!br_ioctl_hook)
request_module("bridge");
mutex_lock(&br_ioctl_mutex);
if (br_ioctl_hook)
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
err = br_ioctl_hook(net, cmd, argp);
mutex_unlock(&br_ioctl_mutex);
break;
case SIOCGIFVLAN:
case SIOCSIFVLAN:
err = -ENOPKG;
if (!vlan_ioctl_hook)
request_module("8021q");
mutex_lock(&vlan_ioctl_mutex);
if (vlan_ioctl_hook)
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
err = vlan_ioctl_hook(net, argp);
mutex_unlock(&vlan_ioctl_mutex);
break;
case SIOCADDDLCI:
case SIOCDELDLCI:
err = -ENOPKG;
if (!dlci_ioctl_hook)
request_module("dlci");
mutex_lock(&dlci_ioctl_mutex);
if (dlci_ioctl_hook)
err = dlci_ioctl_hook(cmd, argp);
mutex_unlock(&dlci_ioctl_mutex);
break;
default:
err = sock->ops->ioctl(sock, cmd, arg);
/*
* If this ioctl is unknown try to hand it down
* to the NIC driver.
*/
if (err == -ENOIOCTLCMD)
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 22:56:21 +04:00
err = dev_ioctl(net, cmd, argp);
break;
}
return err;
}
int sock_create_lite(int family, int type, int protocol, struct socket **res)
{
int err;
struct socket *sock = NULL;
err = security_socket_create(family, type, protocol, 1);
if (err)
goto out;
sock = sock_alloc();
if (!sock) {
err = -ENOMEM;
goto out;
}
sock->type = type;
err = security_socket_post_create(sock, family, type, protocol, 1);
if (err)
goto out_release;
out:
*res = sock;
return err;
out_release:
sock_release(sock);
sock = NULL;
goto out;
}
/* No kernel lock held - perfect */
static unsigned int sock_poll(struct file *file, poll_table *wait)
{
struct socket *sock;
/*
* We can't return errors to poll, so it's either yes or no.
*/
sock = file->private_data;
return sock->ops->poll(file, sock, wait);
}
static int sock_mmap(struct file *file, struct vm_area_struct *vma)
{
struct socket *sock = file->private_data;
return sock->ops->mmap(file, sock, vma);
}
static int sock_close(struct inode *inode, struct file *filp)
{
/*
* It was possible the inode is NULL we were
* closing an unfinished socket.
*/
if (!inode) {
printk(KERN_DEBUG "sock_close: NULL inode\n");
return 0;
}
sock_fasync(-1, filp, 0);
sock_release(SOCKET_I(inode));
return 0;
}
/*
* Update the socket async list
*
* Fasync_list locking strategy.
*
* 1. fasync_list is modified only under process context socket lock
* i.e. under semaphore.
* 2. fasync_list is used under read_lock(&sk->sk_callback_lock)
* or under socket lock.
* 3. fasync_list can be used from softirq context, so that
* modification under socket lock have to be enhanced with
* write_lock_bh(&sk->sk_callback_lock).
* --ANK (990710)
*/
static int sock_fasync(int fd, struct file *filp, int on)
{
struct fasync_struct *fa, *fna = NULL, **prev;
struct socket *sock;
struct sock *sk;
if (on) {
fna = kmalloc(sizeof(struct fasync_struct), GFP_KERNEL);
if (fna == NULL)
return -ENOMEM;
}
sock = filp->private_data;
sk = sock->sk;
if (sk == NULL) {
kfree(fna);
return -EINVAL;
}
lock_sock(sk);
prev = &(sock->fasync_list);
for (fa = *prev; fa != NULL; prev = &fa->fa_next, fa = *prev)
if (fa->fa_file == filp)
break;
if (on) {
if (fa != NULL) {
write_lock_bh(&sk->sk_callback_lock);
fa->fa_fd = fd;
write_unlock_bh(&sk->sk_callback_lock);
kfree(fna);
goto out;
}
fna->fa_file = filp;
fna->fa_fd = fd;
fna->magic = FASYNC_MAGIC;
fna->fa_next = sock->fasync_list;
write_lock_bh(&sk->sk_callback_lock);
sock->fasync_list = fna;
write_unlock_bh(&sk->sk_callback_lock);
} else {
if (fa != NULL) {
write_lock_bh(&sk->sk_callback_lock);
*prev = fa->fa_next;
write_unlock_bh(&sk->sk_callback_lock);
kfree(fa);
}
}
out:
release_sock(sock->sk);
return 0;
}
/* This function may be called only under socket lock or callback_lock */
int sock_wake_async(struct socket *sock, int how, int band)
{
if (!sock || !sock->fasync_list)
return -1;
switch (how) {
case SOCK_WAKE_WAITD:
if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
break;
goto call_kill;
case SOCK_WAKE_SPACE:
if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags))
break;
/* fall through */
case SOCK_WAKE_IO:
call_kill:
__kill_fasync(sock->fasync_list, SIGIO, band);
break;
case SOCK_WAKE_URG:
__kill_fasync(sock->fasync_list, SIGURG, band);
}
return 0;
}
static int __sock_create(struct net *net, int family, int type, int protocol,
struct socket **res, int kern)
{
int err;
struct socket *sock;
const struct net_proto_family *pf;
/*
* Check protocol is in range
*/
if (family < 0 || family >= NPROTO)
return -EAFNOSUPPORT;
if (type < 0 || type >= SOCK_MAX)
return -EINVAL;
/* Compatibility.
This uglymoron is moved from INET layer to here to avoid
deadlock in module load.
*/
if (family == PF_INET && type == SOCK_PACKET) {
static int warned;
if (!warned) {
warned = 1;
printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n",
current->comm);
}
family = PF_PACKET;
}
err = security_socket_create(family, type, protocol, kern);
if (err)
return err;
/*
* Allocate the socket and allow the family to set things up. if
* the protocol is 0, the family is instructed to select an appropriate
* default.
*/
sock = sock_alloc();
if (!sock) {
if (net_ratelimit())
printk(KERN_WARNING "socket: no more sockets\n");
return -ENFILE; /* Not exactly a match, but its the
closest posix thing */
}
sock->type = type;
#if defined(CONFIG_KMOD)
/* Attempt to load a protocol module if the find failed.
*
* 12/09/1996 Marcin: But! this makes REALLY only sense, if the user
* requested real, full-featured networking support upon configuration.
* Otherwise module support will break!
*/
if (net_families[family] == NULL)
request_module("net-pf-%d", family);
#endif
rcu_read_lock();
pf = rcu_dereference(net_families[family]);
err = -EAFNOSUPPORT;
if (!pf)
goto out_release;
/*
* We will call the ->create function, that possibly is in a loadable
* module, so we have to bump that loadable module refcnt first.
*/
if (!try_module_get(pf->owner))
goto out_release;
/* Now protected by module ref count */
rcu_read_unlock();
err = pf->create(net, sock, protocol);
if (err < 0)
goto out_module_put;
[NET]: Fix module reference counts for loadable protocol modules I have been experimenting with loadable protocol modules, and ran into several issues with module reference counting. The first issue was that __module_get failed at the BUG_ON check at the top of the routine (checking that my module reference count was not zero) when I created the first socket. When sk_alloc() is called, my module reference count was still 0. When I looked at why sctp didn't have this problem, I discovered that sctp creates a control socket during module init (when the module ref count is not 0), which keeps the reference count non-zero. This section has been updated to address the point Stephen raised about checking the return value of try_module_get(). The next problem arose when my socket init routine returned an error. This resulted in my module reference count being decremented below 0. My socket ops->release routine was also being called. The issue here is that sock_release() calls the ops->release routine and decrements the ref count if sock->ops is not NULL. Since the socket probably didn't get correctly initialized, this should not be done, so we will set sock->ops to NULL because we will not call try_module_get(). While searching for another bug, I also noticed that sys_accept() has a possibility of doing a module_put() when it did not do an __module_get so I re-ordered the call to security_socket_accept(). Signed-off-by: Frank Filz <ffilzlnx@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-28 02:23:38 +04:00
/*
* Now to bump the refcnt of the [loadable] module that owns this
* socket at sock_release time we decrement its refcnt.
*/
if (!try_module_get(sock->ops->owner))
goto out_module_busy;
/*
* Now that we're done with the ->create function, the [loadable]
* module can have its refcnt decremented
*/
module_put(pf->owner);
err = security_socket_post_create(sock, family, type, protocol, kern);
if (err)
goto out_sock_release;
*res = sock;
return 0;
out_module_busy:
err = -EAFNOSUPPORT;
out_module_put:
sock->ops = NULL;
module_put(pf->owner);
out_sock_release:
sock_release(sock);
return err;
out_release:
rcu_read_unlock();
goto out_sock_release;
}
int sock_create(int family, int type, int protocol, struct socket **res)
{
return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
}
int sock_create_kern(int family, int type, int protocol, struct socket **res)
{
return __sock_create(&init_net, family, type, protocol, res, 1);
}
asmlinkage long sys_socket(int family, int type, int protocol)
{
int retval;
struct socket *sock;
retval = sock_create(family, type, protocol, &sock);
if (retval < 0)
goto out;
retval = sock_map_fd(sock);
if (retval < 0)
goto out_release;
out:
/* It may be already another descriptor 8) Not kernel problem. */
return retval;
out_release:
sock_release(sock);
return retval;
}
/*
* Create a pair of connected sockets.
*/
asmlinkage long sys_socketpair(int family, int type, int protocol,
int __user *usockvec)
{
struct socket *sock1, *sock2;
int fd1, fd2, err;
struct file *newfile1, *newfile2;
/*
* Obtain the first socket and check if the underlying protocol
* supports the socketpair call.
*/
err = sock_create(family, type, protocol, &sock1);
if (err < 0)
goto out;
err = sock_create(family, type, protocol, &sock2);
if (err < 0)
goto out_release_1;
err = sock1->ops->socketpair(sock1, sock2);
if (err < 0)
goto out_release_both;
fd1 = sock_alloc_fd(&newfile1);
if (unlikely(fd1 < 0)) {
err = fd1;
goto out_release_both;
}
fd2 = sock_alloc_fd(&newfile2);
if (unlikely(fd2 < 0)) {
err = fd2;
put_filp(newfile1);
put_unused_fd(fd1);
goto out_release_both;
}
err = sock_attach_fd(sock1, newfile1);
if (unlikely(err < 0)) {
goto out_fd2;
}
err = sock_attach_fd(sock2, newfile2);
if (unlikely(err < 0)) {
fput(newfile1);
goto out_fd1;
}
err = audit_fd_pair(fd1, fd2);
if (err < 0) {
fput(newfile1);
fput(newfile2);
goto out_fd;
}
fd_install(fd1, newfile1);
fd_install(fd2, newfile2);
/* fd1 and fd2 may be already another descriptors.
* Not kernel problem.
*/
err = put_user(fd1, &usockvec[0]);
if (!err)
err = put_user(fd2, &usockvec[1]);
if (!err)
return 0;
sys_close(fd2);
sys_close(fd1);
return err;
out_release_both:
sock_release(sock2);
out_release_1:
sock_release(sock1);
out:
return err;
out_fd2:
put_filp(newfile1);
sock_release(sock1);
out_fd1:
put_filp(newfile2);
sock_release(sock2);
out_fd:
put_unused_fd(fd1);
put_unused_fd(fd2);
goto out;
}
/*
* Bind a name to a socket. Nothing much to do here since it's
* the protocol's responsibility to handle the local address.
*
* We move the socket address to kernel space before we call
* the protocol layer (having also checked the address is ok).
*/
asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
{
struct socket *sock;
char address[MAX_SOCK_ADDR];
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int err, fput_needed;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
err = move_addr_to_kernel(umyaddr, addrlen, address);
if (err >= 0) {
err = security_socket_bind(sock,
(struct sockaddr *)address,
addrlen);
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
if (!err)
err = sock->ops->bind(sock,
(struct sockaddr *)
address, addrlen);
}
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
fput_light(sock->file, fput_needed);
}
return err;
}
/*
* Perform a listen. Basically, we allow the protocol to do anything
* necessary for a listen, and if that works, we mark the socket as
* ready for listening.
*/
asmlinkage long sys_listen(int fd, int backlog)
{
struct socket *sock;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int err, fput_needed;
int somaxconn;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
if ((unsigned)backlog > somaxconn)
backlog = somaxconn;
err = security_socket_listen(sock, backlog);
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
if (!err)
err = sock->ops->listen(sock, backlog);
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
fput_light(sock->file, fput_needed);
}
return err;
}
/*
* For accept, we attempt to create a new socket, set up the link
* with the client, wake up the client, then return the new
* connected fd. We collect the address of the connector in kernel
* space and move it to user at the very end. This is unclean because
* we open the socket then return an error.
*
* 1003.1g adds the ability to recvmsg() to query connection pending
* status to recvmsg. We need to add that support in a way thats
* clean when we restucture accept also.
*/
asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen)
{
struct socket *sock, *newsock;
struct file *newfile;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int err, len, newfd, fput_needed;
char address[MAX_SOCK_ADDR];
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
err = -ENFILE;
if (!(newsock = sock_alloc()))
goto out_put;
newsock->type = sock->type;
newsock->ops = sock->ops;
/*
* We don't need try_module_get here, as the listening socket (sock)
* has the protocol module (sock->ops->owner) held.
*/
__module_get(newsock->ops->owner);
newfd = sock_alloc_fd(&newfile);
if (unlikely(newfd < 0)) {
err = newfd;
sock_release(newsock);
goto out_put;
}
err = sock_attach_fd(newsock, newfile);
if (err < 0)
goto out_fd_simple;
[NET]: Fix module reference counts for loadable protocol modules I have been experimenting with loadable protocol modules, and ran into several issues with module reference counting. The first issue was that __module_get failed at the BUG_ON check at the top of the routine (checking that my module reference count was not zero) when I created the first socket. When sk_alloc() is called, my module reference count was still 0. When I looked at why sctp didn't have this problem, I discovered that sctp creates a control socket during module init (when the module ref count is not 0), which keeps the reference count non-zero. This section has been updated to address the point Stephen raised about checking the return value of try_module_get(). The next problem arose when my socket init routine returned an error. This resulted in my module reference count being decremented below 0. My socket ops->release routine was also being called. The issue here is that sock_release() calls the ops->release routine and decrements the ref count if sock->ops is not NULL. Since the socket probably didn't get correctly initialized, this should not be done, so we will set sock->ops to NULL because we will not call try_module_get(). While searching for another bug, I also noticed that sys_accept() has a possibility of doing a module_put() when it did not do an __module_get so I re-ordered the call to security_socket_accept(). Signed-off-by: Frank Filz <ffilzlnx@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-28 02:23:38 +04:00
err = security_socket_accept(sock, newsock);
if (err)
goto out_fd;
[NET]: Fix module reference counts for loadable protocol modules I have been experimenting with loadable protocol modules, and ran into several issues with module reference counting. The first issue was that __module_get failed at the BUG_ON check at the top of the routine (checking that my module reference count was not zero) when I created the first socket. When sk_alloc() is called, my module reference count was still 0. When I looked at why sctp didn't have this problem, I discovered that sctp creates a control socket during module init (when the module ref count is not 0), which keeps the reference count non-zero. This section has been updated to address the point Stephen raised about checking the return value of try_module_get(). The next problem arose when my socket init routine returned an error. This resulted in my module reference count being decremented below 0. My socket ops->release routine was also being called. The issue here is that sock_release() calls the ops->release routine and decrements the ref count if sock->ops is not NULL. Since the socket probably didn't get correctly initialized, this should not be done, so we will set sock->ops to NULL because we will not call try_module_get(). While searching for another bug, I also noticed that sys_accept() has a possibility of doing a module_put() when it did not do an __module_get so I re-ordered the call to security_socket_accept(). Signed-off-by: Frank Filz <ffilzlnx@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-28 02:23:38 +04:00
err = sock->ops->accept(sock, newsock, sock->file->f_flags);
if (err < 0)
goto out_fd;
if (upeer_sockaddr) {
if (newsock->ops->getname(newsock, (struct sockaddr *)address,
&len, 2) < 0) {
err = -ECONNABORTED;
goto out_fd;
}
err = move_addr_to_user(address, len, upeer_sockaddr,
upeer_addrlen);
if (err < 0)
goto out_fd;
}
/* File flags are not inherited via accept() unlike another OSes. */
fd_install(newfd, newfile);
err = newfd;
security_socket_post_accept(sock, newsock);
out_put:
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
fput_light(sock->file, fput_needed);
out:
return err;
out_fd_simple:
sock_release(newsock);
put_filp(newfile);
put_unused_fd(newfd);
goto out_put;
out_fd:
fput(newfile);
put_unused_fd(newfd);
goto out_put;
}
/*
* Attempt to connect to a socket with the server address. The address
* is in user space so we verify it is OK and move it to kernel space.
*
* For 1003.1g we need to add clean support for a bind to AF_UNSPEC to
* break bindings
*
* NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and
* other SEQPACKET protocols that take time to connect() as it doesn't
* include the -EINPROGRESS status for such sockets.
*/
asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr,
int addrlen)
{
struct socket *sock;
char address[MAX_SOCK_ADDR];
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int err, fput_needed;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
err = move_addr_to_kernel(uservaddr, addrlen, address);
if (err < 0)
goto out_put;
err =
security_socket_connect(sock, (struct sockaddr *)address, addrlen);
if (err)
goto out_put;
err = sock->ops->connect(sock, (struct sockaddr *)address, addrlen,
sock->file->f_flags);
out_put:
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
fput_light(sock->file, fput_needed);
out:
return err;
}
/*
* Get the local address ('name') of a socket object. Move the obtained
* name to user space.
*/
asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr,
int __user *usockaddr_len)
{
struct socket *sock;
char address[MAX_SOCK_ADDR];
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int len, err, fput_needed;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
err = security_socket_getsockname(sock);
if (err)
goto out_put;
err = sock->ops->getname(sock, (struct sockaddr *)address, &len, 0);
if (err)
goto out_put;
err = move_addr_to_user(address, len, usockaddr, usockaddr_len);
out_put:
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
fput_light(sock->file, fput_needed);
out:
return err;
}
/*
* Get the remote address ('name') of a socket object. Move the obtained
* name to user space.
*/
asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr,
int __user *usockaddr_len)
{
struct socket *sock;
char address[MAX_SOCK_ADDR];
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int len, err, fput_needed;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock != NULL) {
err = security_socket_getpeername(sock);
if (err) {
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
fput_light(sock->file, fput_needed);
return err;
}
err =
sock->ops->getname(sock, (struct sockaddr *)address, &len,
1);
if (!err)
err = move_addr_to_user(address, len, usockaddr,
usockaddr_len);
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
fput_light(sock->file, fput_needed);
}
return err;
}
/*
* Send a datagram to a given address. We move the address into kernel
* space and check the user space data area is readable before invoking
* the protocol.
*/
asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
unsigned flags, struct sockaddr __user *addr,
int addr_len)
{
struct socket *sock;
char address[MAX_SOCK_ADDR];
int err;
struct msghdr msg;
struct iovec iov;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int fput_needed;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
iov.iov_base = buff;
iov.iov_len = len;
msg.msg_name = NULL;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
if (addr) {
err = move_addr_to_kernel(addr, addr_len, address);
if (err < 0)
goto out_put;
msg.msg_name = address;
msg.msg_namelen = addr_len;
}
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
msg.msg_flags = flags;
err = sock_sendmsg(sock, &msg, len);
out_put:
fput_light(sock->file, fput_needed);
out:
return err;
}
/*
* Send a datagram down a socket.
*/
asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags)
{
return sys_sendto(fd, buff, len, flags, NULL, 0);
}
/*
* Receive a frame from the socket and optionally record the address of the
* sender. We verify the buffers are writable and if needed move the
* sender address from kernel to user space.
*/
asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
unsigned flags, struct sockaddr __user *addr,
int __user *addr_len)
{
struct socket *sock;
struct iovec iov;
struct msghdr msg;
char address[MAX_SOCK_ADDR];
int err, err2;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int fput_needed;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_iovlen = 1;
msg.msg_iov = &iov;
iov.iov_len = size;
iov.iov_base = ubuf;
msg.msg_name = address;
msg.msg_namelen = MAX_SOCK_ADDR;
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
err = sock_recvmsg(sock, &msg, size, flags);
if (err >= 0 && addr != NULL) {
err2 = move_addr_to_user(address, msg.msg_namelen, addr, addr_len);
if (err2 < 0)
err = err2;
}
fput_light(sock->file, fput_needed);
out:
return err;
}
/*
* Receive a datagram from a socket.
*/
asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
unsigned flags)
{
return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
}
/*
* Set a socket option. Because we don't know the option lengths we have
* to pass the user mode parameter for the protocols to sort out.
*/
asmlinkage long sys_setsockopt(int fd, int level, int optname,
char __user *optval, int optlen)
{
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int err, fput_needed;
struct socket *sock;
if (optlen < 0)
return -EINVAL;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock != NULL) {
err = security_socket_setsockopt(sock, level, optname);
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
if (err)
goto out_put;
if (level == SOL_SOCKET)
err =
sock_setsockopt(sock, level, optname, optval,
optlen);
else
err =
sock->ops->setsockopt(sock, level, optname, optval,
optlen);
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
out_put:
fput_light(sock->file, fput_needed);
}
return err;
}
/*
* Get a socket option. Because we don't know the option lengths we have
* to pass a user mode parameter for the protocols to sort out.
*/
asmlinkage long sys_getsockopt(int fd, int level, int optname,
char __user *optval, int __user *optlen)
{
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int err, fput_needed;
struct socket *sock;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock != NULL) {
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
err = security_socket_getsockopt(sock, level, optname);
if (err)
goto out_put;
if (level == SOL_SOCKET)
err =
sock_getsockopt(sock, level, optname, optval,
optlen);
else
err =
sock->ops->getsockopt(sock, level, optname, optval,
optlen);
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
out_put:
fput_light(sock->file, fput_needed);
}
return err;
}
/*
* Shutdown a socket.
*/
asmlinkage long sys_shutdown(int fd, int how)
{
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int err, fput_needed;
struct socket *sock;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock != NULL) {
err = security_socket_shutdown(sock, how);
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
if (!err)
err = sock->ops->shutdown(sock, how);
fput_light(sock->file, fput_needed);
}
return err;
}
/* A couple of helpful macros for getting the address of the 32/64 bit
* fields which are the same type (int / unsigned) on our platforms.
*/
#define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member)
#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
/*
* BSD sendmsg interface
*/
asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
struct socket *sock;
char address[MAX_SOCK_ADDR];
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
unsigned char ctl[sizeof(struct cmsghdr) + 20]
__attribute__ ((aligned(sizeof(__kernel_size_t))));
/* 20 is size of ipv6_pktinfo */
unsigned char *ctl_buf = ctl;
struct msghdr msg_sys;
int err, ctl_len, iov_size, total_len;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int fput_needed;
err = -EFAULT;
if (MSG_CMSG_COMPAT & flags) {
if (get_compat_msghdr(&msg_sys, msg_compat))
return -EFAULT;
}
else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
return -EFAULT;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
/* do not move before msg_sys is valid */
err = -EMSGSIZE;
if (msg_sys.msg_iovlen > UIO_MAXIOV)
goto out_put;
/* Check whether to allocate the iovec area */
err = -ENOMEM;
iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
if (msg_sys.msg_iovlen > UIO_FASTIOV) {
iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
if (!iov)
goto out_put;
}
/* This will also move the address data into kernel space */
if (MSG_CMSG_COMPAT & flags) {
err = verify_compat_iovec(&msg_sys, iov, address, VERIFY_READ);
} else
err = verify_iovec(&msg_sys, iov, address, VERIFY_READ);
if (err < 0)
goto out_freeiov;
total_len = err;
err = -ENOBUFS;
if (msg_sys.msg_controllen > INT_MAX)
goto out_freeiov;
ctl_len = msg_sys.msg_controllen;
if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
err =
cmsghdr_from_user_compat_to_kern(&msg_sys, sock->sk, ctl,
sizeof(ctl));
if (err)
goto out_freeiov;
ctl_buf = msg_sys.msg_control;
ctl_len = msg_sys.msg_controllen;
} else if (ctl_len) {
if (ctl_len > sizeof(ctl)) {
ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
if (ctl_buf == NULL)
goto out_freeiov;
}
err = -EFAULT;
/*
* Careful! Before this, msg_sys.msg_control contains a user pointer.
* Afterwards, it will be a kernel pointer. Thus the compiler-assisted
* checking falls down on this.
*/
if (copy_from_user(ctl_buf, (void __user *)msg_sys.msg_control,
ctl_len))
goto out_freectl;
msg_sys.msg_control = ctl_buf;
}
msg_sys.msg_flags = flags;
if (sock->file->f_flags & O_NONBLOCK)
msg_sys.msg_flags |= MSG_DONTWAIT;
err = sock_sendmsg(sock, &msg_sys, total_len);
out_freectl:
if (ctl_buf != ctl)
sock_kfree_s(sock->sk, ctl_buf, ctl_len);
out_freeiov:
if (iov != iovstack)
sock_kfree_s(sock->sk, iov, iov_size);
out_put:
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
fput_light(sock->file, fput_needed);
out:
return err;
}
/*
* BSD recvmsg interface
*/
asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg,
unsigned int flags)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
struct socket *sock;
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
struct msghdr msg_sys;
unsigned long cmsg_ptr;
int err, iov_size, total_len, len;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
int fput_needed;
/* kernel mode address */
char addr[MAX_SOCK_ADDR];
/* user mode address pointers */
struct sockaddr __user *uaddr;
int __user *uaddr_len;
if (MSG_CMSG_COMPAT & flags) {
if (get_compat_msghdr(&msg_sys, msg_compat))
return -EFAULT;
}
else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
return -EFAULT;
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
err = -EMSGSIZE;
if (msg_sys.msg_iovlen > UIO_MAXIOV)
goto out_put;
/* Check whether to allocate the iovec area */
err = -ENOMEM;
iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
if (msg_sys.msg_iovlen > UIO_FASTIOV) {
iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
if (!iov)
goto out_put;
}
/*
* Save the user-mode address (verify_iovec will change the
* kernel msghdr to use the kernel address space)
*/
uaddr = (__force void __user *)msg_sys.msg_name;
uaddr_len = COMPAT_NAMELEN(msg);
if (MSG_CMSG_COMPAT & flags) {
err = verify_compat_iovec(&msg_sys, iov, addr, VERIFY_WRITE);
} else
err = verify_iovec(&msg_sys, iov, addr, VERIFY_WRITE);
if (err < 0)
goto out_freeiov;
total_len = err;
cmsg_ptr = (unsigned long)msg_sys.msg_control;
O_CLOEXEC for SCM_RIGHTS Part two in the O_CLOEXEC saga: adding support for file descriptors received through Unix domain sockets. The patch is once again pretty minimal, it introduces a new flag for recvmsg and passes it just like the existing MSG_CMSG_COMPAT flag. I think this bit is not used otherwise but the networking people will know better. This new flag is not recognized by recvfrom and recv. These functions cannot be used for that purpose and the asymmetry this introduces is not worse than the already existing MSG_CMSG_COMPAT situations. The patch must be applied on the patch which introduced O_CLOEXEC. It has to remove static from the new get_unused_fd_flags function but since scm.c cannot live in a module the function still hasn't to be exported. Here's a test program to make sure the code works. It's so much longer than the actual patch... #include <errno.h> #include <error.h> #include <fcntl.h> #include <stdio.h> #include <string.h> #include <unistd.h> #include <sys/socket.h> #include <sys/un.h> #ifndef O_CLOEXEC # define O_CLOEXEC 02000000 #endif #ifndef MSG_CMSG_CLOEXEC # define MSG_CMSG_CLOEXEC 0x40000000 #endif int main (int argc, char *argv[]) { if (argc > 1) { int fd = atol (argv[1]); printf ("child: fd = %d\n", fd); if (fcntl (fd, F_GETFD) == 0 || errno != EBADF) { puts ("file descriptor valid in child"); return 1; } return 0; } struct sockaddr_un sun; strcpy (sun.sun_path, "./testsocket"); sun.sun_family = AF_UNIX; char databuf[] = "hello"; struct iovec iov[1]; iov[0].iov_base = databuf; iov[0].iov_len = sizeof (databuf); union { struct cmsghdr hdr; char bytes[CMSG_SPACE (sizeof (int))]; } buf; struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 1, .msg_control = buf.bytes, .msg_controllen = sizeof (buf) }; struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN (sizeof (int)); msg.msg_controllen = cmsg->cmsg_len; pid_t child = fork (); if (child == -1) error (1, errno, "fork"); if (child == 0) { int sock = socket (PF_UNIX, SOCK_STREAM, 0); if (sock < 0) error (1, errno, "socket"); if (bind (sock, (struct sockaddr *) &sun, sizeof (sun)) < 0) error (1, errno, "bind"); if (listen (sock, SOMAXCONN) < 0) error (1, errno, "listen"); int conn = accept (sock, NULL, NULL); if (conn == -1) error (1, errno, "accept"); *(int *) CMSG_DATA (cmsg) = sock; if (sendmsg (conn, &msg, MSG_NOSIGNAL) < 0) error (1, errno, "sendmsg"); return 0; } /* For a test suite this should be more robust like a barrier in shared memory. */ sleep (1); int sock = socket (PF_UNIX, SOCK_STREAM, 0); if (sock < 0) error (1, errno, "socket"); if (connect (sock, (struct sockaddr *) &sun, sizeof (sun)) < 0) error (1, errno, "connect"); unlink (sun.sun_path); *(int *) CMSG_DATA (cmsg) = -1; if (recvmsg (sock, &msg, MSG_CMSG_CLOEXEC) < 0) error (1, errno, "recvmsg"); int fd = *(int *) CMSG_DATA (cmsg); if (fd == -1) error (1, 0, "no descriptor received"); char fdname[20]; snprintf (fdname, sizeof (fdname), "%d", fd); execl ("/proc/self/exe", argv[0], fdname, NULL); puts ("execl failed"); return 1; } [akpm@linux-foundation.org: Fix fastcall inconsistency noted by Michael Buesch] [akpm@linux-foundation.org: build fix] Signed-off-by: Ulrich Drepper <drepper@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Michael Buesch <mb@bu3sch.de> Cc: Michael Kerrisk <mtk-manpages@gmx.net> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 10:40:34 +04:00
msg_sys.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
err = sock_recvmsg(sock, &msg_sys, total_len, flags);
if (err < 0)
goto out_freeiov;
len = err;
if (uaddr != NULL) {
err = move_addr_to_user(addr, msg_sys.msg_namelen, uaddr,
uaddr_len);
if (err < 0)
goto out_freeiov;
}
err = __put_user((msg_sys.msg_flags & ~MSG_CMSG_COMPAT),
COMPAT_FLAGS(msg));
if (err)
goto out_freeiov;
if (MSG_CMSG_COMPAT & flags)
err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr,
&msg_compat->msg_controllen);
else
err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr,
&msg->msg_controllen);
if (err)
goto out_freeiov;
err = len;
out_freeiov:
if (iov != iovstack)
sock_kfree_s(sock->sk, iov, iov_size);
out_put:
[NET]: use fget_light() in net/socket.c Here's an updated copy of the patch to use fget_light in net/socket.c. Rerunning the tests show a drop of ~80Mbit/s on average, which looks bad until you see the drop in cpu usage from ~89% to ~82%. That will get fixed in another patch... Before: max 8113.70, min 8026.32, avg 8072.34 87380 16384 16384 10.01 8045.55 87.11 87.11 1.774 1.774 87380 16384 16384 10.01 8065.14 90.86 90.86 1.846 1.846 87380 16384 16384 10.00 8077.76 89.85 89.85 1.822 1.822 87380 16384 16384 10.00 8026.32 89.80 89.80 1.833 1.833 87380 16384 16384 10.01 8108.59 89.81 89.81 1.815 1.815 87380 16384 16384 10.01 8034.53 89.01 89.01 1.815 1.815 87380 16384 16384 10.00 8113.70 90.45 90.45 1.827 1.827 87380 16384 16384 10.00 8111.37 89.90 89.90 1.816 1.816 87380 16384 16384 10.01 8077.75 87.96 87.96 1.784 1.784 87380 16384 16384 10.00 8062.70 90.25 90.25 1.834 1.834 After: max 8035.81, min 7963.69, avg 7998.14 87380 16384 16384 10.01 8000.93 82.11 82.11 1.682 1.682 87380 16384 16384 10.01 8016.17 83.67 83.67 1.710 1.710 87380 16384 16384 10.01 7963.69 83.47 83.47 1.717 1.717 87380 16384 16384 10.01 8014.35 81.71 81.71 1.671 1.671 87380 16384 16384 10.00 7967.68 83.41 83.41 1.715 1.715 87380 16384 16384 10.00 7995.22 81.00 81.00 1.660 1.660 87380 16384 16384 10.00 8002.61 83.90 83.90 1.718 1.718 87380 16384 16384 10.00 8035.81 81.71 81.71 1.666 1.666 87380 16384 16384 10.01 8005.36 82.56 82.56 1.690 1.690 87380 16384 16384 10.00 7979.61 82.50 82.50 1.694 1.694 Signed-off-by: Benjamin LaHaise <bcrl@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 09:27:12 +03:00
fput_light(sock->file, fput_needed);
out:
return err;
}
#ifdef __ARCH_WANT_SYS_SOCKETCALL
/* Argument list sizes for sys_socketcall */
#define AL(x) ((x) * sizeof(unsigned long))
static const unsigned char nargs[18]={
AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)
};
#undef AL
/*
* System call vectors.
*
* Argument checking cleaned up. Saved 20% in size.
* This function doesn't need to set the kernel lock because
* it is set by the callees.
*/
asmlinkage long sys_socketcall(int call, unsigned long __user *args)
{
unsigned long a[6];
unsigned long a0, a1;
int err;
if (call < 1 || call > SYS_RECVMSG)
return -EINVAL;
/* copy_from_user should be SMP safe. */
if (copy_from_user(a, args, nargs[call]))
return -EFAULT;
err = audit_socketcall(nargs[call] / sizeof(unsigned long), a);
if (err)
return err;
a0 = a[0];
a1 = a[1];
switch (call) {
case SYS_SOCKET:
err = sys_socket(a0, a1, a[2]);
break;
case SYS_BIND:
err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]);
break;
case SYS_CONNECT:
err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]);
break;
case SYS_LISTEN:
err = sys_listen(a0, a1);
break;
case SYS_ACCEPT:
err =
sys_accept(a0, (struct sockaddr __user *)a1,
(int __user *)a[2]);
break;
case SYS_GETSOCKNAME:
err =
sys_getsockname(a0, (struct sockaddr __user *)a1,
(int __user *)a[2]);
break;
case SYS_GETPEERNAME:
err =
sys_getpeername(a0, (struct sockaddr __user *)a1,
(int __user *)a[2]);
break;
case SYS_SOCKETPAIR:
err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]);
break;
case SYS_SEND:
err = sys_send(a0, (void __user *)a1, a[2], a[3]);
break;
case SYS_SENDTO:
err = sys_sendto(a0, (void __user *)a1, a[2], a[3],
(struct sockaddr __user *)a[4], a[5]);
break;
case SYS_RECV:
err = sys_recv(a0, (void __user *)a1, a[2], a[3]);
break;
case SYS_RECVFROM:
err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3],
(struct sockaddr __user *)a[4],
(int __user *)a[5]);
break;
case SYS_SHUTDOWN:
err = sys_shutdown(a0, a1);
break;
case SYS_SETSOCKOPT:
err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]);
break;
case SYS_GETSOCKOPT:
err =
sys_getsockopt(a0, a1, a[2], (char __user *)a[3],
(int __user *)a[4]);
break;
case SYS_SENDMSG:
err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]);
break;
case SYS_RECVMSG:
err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]);
break;
default:
err = -EINVAL;
break;
}
return err;
}
#endif /* __ARCH_WANT_SYS_SOCKETCALL */
/**
* sock_register - add a socket protocol handler
* @ops: description of protocol
*
* This function is called by a protocol handler that wants to
* advertise its address family, and have it linked into the
* socket interface. The value ops->family coresponds to the
* socket system call protocol family.
*/
int sock_register(const struct net_proto_family *ops)
{
int err;
if (ops->family >= NPROTO) {
printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family,
NPROTO);
return -ENOBUFS;
}
spin_lock(&net_family_lock);
if (net_families[ops->family])
err = -EEXIST;
else {
net_families[ops->family] = ops;
err = 0;
}
spin_unlock(&net_family_lock);
printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family);
return err;
}
/**
* sock_unregister - remove a protocol handler
* @family: protocol family to remove
*
* This function is called by a protocol handler that wants to
* remove its address family, and have it unlinked from the
* new socket creation.
*
* If protocol handler is a module, then it can use module reference
* counts to protect against new references. If protocol handler is not
* a module then it needs to provide its own protection in
* the ops->create routine.
*/
void sock_unregister(int family)
{
BUG_ON(family < 0 || family >= NPROTO);
spin_lock(&net_family_lock);
net_families[family] = NULL;
spin_unlock(&net_family_lock);
synchronize_rcu();
printk(KERN_INFO "NET: Unregistered protocol family %d\n", family);
}
static int __init sock_init(void)
{
/*
* Initialize sock SLAB cache.
*/
sk_init();
/*
* Initialize skbuff SLAB cache
*/
skb_init();
/*
* Initialize the protocols module.
*/
init_inodecache();
register_filesystem(&sock_fs_type);
sock_mnt = kern_mount(&sock_fs_type);
/* The real protocol initialization is performed in later initcalls.
*/
#ifdef CONFIG_NETFILTER
netfilter_init();
#endif
return 0;
}
core_initcall(sock_init); /* early initcall */
#ifdef CONFIG_PROC_FS
void socket_seq_show(struct seq_file *seq)
{
int cpu;
int counter = 0;
for_each_possible_cpu(cpu)
counter += per_cpu(sockets_in_use, cpu);
/* It can be negative, by the way. 8) */
if (counter < 0)
counter = 0;
seq_printf(seq, "sockets: used %d\n", counter);
}
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_COMPAT
static long compat_sock_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
struct socket *sock = file->private_data;
int ret = -ENOIOCTLCMD;
struct sock *sk;
struct net *net;
sk = sock->sk;
net = sock_net(sk);
if (sock->ops->compat_ioctl)
ret = sock->ops->compat_ioctl(sock, cmd, arg);
if (ret == -ENOIOCTLCMD &&
(cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST))
ret = compat_wext_handle_ioctl(net, cmd, arg);
return ret;
}
#endif
int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
{
return sock->ops->bind(sock, addr, addrlen);
}
int kernel_listen(struct socket *sock, int backlog)
{
return sock->ops->listen(sock, backlog);
}
int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
{
struct sock *sk = sock->sk;
int err;
err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
newsock);
if (err < 0)
goto done;
err = sock->ops->accept(sock, *newsock, flags);
if (err < 0) {
sock_release(*newsock);
*newsock = NULL;
goto done;
}
(*newsock)->ops = sock->ops;
done:
return err;
}
int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags)
{
return sock->ops->connect(sock, addr, addrlen, flags);
}
int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
int *addrlen)
{
return sock->ops->getname(sock, addr, addrlen, 0);
}
int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
int *addrlen)
{
return sock->ops->getname(sock, addr, addrlen, 1);
}
int kernel_getsockopt(struct socket *sock, int level, int optname,
char *optval, int *optlen)
{
mm_segment_t oldfs = get_fs();
int err;
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
err = sock_getsockopt(sock, level, optname, optval, optlen);
else
err = sock->ops->getsockopt(sock, level, optname, optval,
optlen);
set_fs(oldfs);
return err;
}
int kernel_setsockopt(struct socket *sock, int level, int optname,
char *optval, int optlen)
{
mm_segment_t oldfs = get_fs();
int err;
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
err = sock_setsockopt(sock, level, optname, optval, optlen);
else
err = sock->ops->setsockopt(sock, level, optname, optval,
optlen);
set_fs(oldfs);
return err;
}
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags)
{
if (sock->ops->sendpage)
return sock->ops->sendpage(sock, page, offset, size, flags);
return sock_no_sendpage(sock, page, offset, size, flags);
}
int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
{
mm_segment_t oldfs = get_fs();
int err;
set_fs(KERNEL_DS);
err = sock->ops->ioctl(sock, cmd, arg);
set_fs(oldfs);
return err;
}
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
{
return sock->ops->shutdown(sock, how);
}
EXPORT_SYMBOL(sock_create);
EXPORT_SYMBOL(sock_create_kern);
EXPORT_SYMBOL(sock_create_lite);
EXPORT_SYMBOL(sock_map_fd);
EXPORT_SYMBOL(sock_recvmsg);
EXPORT_SYMBOL(sock_register);
EXPORT_SYMBOL(sock_release);
EXPORT_SYMBOL(sock_sendmsg);
EXPORT_SYMBOL(sock_unregister);
EXPORT_SYMBOL(sock_wake_async);
EXPORT_SYMBOL(sockfd_lookup);
EXPORT_SYMBOL(kernel_sendmsg);
EXPORT_SYMBOL(kernel_recvmsg);
EXPORT_SYMBOL(kernel_bind);
EXPORT_SYMBOL(kernel_listen);
EXPORT_SYMBOL(kernel_accept);
EXPORT_SYMBOL(kernel_connect);
EXPORT_SYMBOL(kernel_getsockname);
EXPORT_SYMBOL(kernel_getpeername);
EXPORT_SYMBOL(kernel_getsockopt);
EXPORT_SYMBOL(kernel_setsockopt);
EXPORT_SYMBOL(kernel_sendpage);
EXPORT_SYMBOL(kernel_sock_ioctl);
EXPORT_SYMBOL(kernel_sock_shutdown);