2005-04-17 02:20:36 +04:00
/*
* INET An implementation of the TCP / IP protocol suite for the LINUX
* operating system . INET is implemented using the BSD Socket
* interface as the means of communication with the user level .
*
* PACKET - implements raw packet sockets .
*
2005-05-06 03:16:16 +04:00
* Authors : Ross Biro
2005-04-17 02:20:36 +04:00
* Fred N . van Kempen , < waltje @ uWalt . NL . Mugnet . ORG >
* Alan Cox , < gw4pts @ gw4pts . ampr . org >
*
2007-02-09 17:25:10 +03:00
* Fixes :
2005-04-17 02:20:36 +04:00
* Alan Cox : verify_area ( ) now used correctly
* Alan Cox : new skbuff lists , look ma no backlogs !
* Alan Cox : tidied skbuff lists .
* Alan Cox : Now uses generic datagram routines I
* added . Also fixed the peek / read crash
* from all old Linux datagram code .
* Alan Cox : Uses the improved datagram code .
* Alan Cox : Added NULL ' s for socket options .
* Alan Cox : Re - commented the code .
* Alan Cox : Use new kernel side addressing
* Rob Janssen : Correct MTU usage .
* Dave Platt : Counter leaks caused by incorrect
* interrupt locking and some slightly
* dubious gcc output . Can you read
* compiler : it said _VOLATILE_
* Richard Kooijman : Timestamp fixes .
* Alan Cox : New buffers . Use sk - > mac . raw .
* Alan Cox : sendmsg / recvmsg support .
* Alan Cox : Protocol setting support
* Alexey Kuznetsov : Untied from IPv4 stack .
* Cyrus Durgin : Fixed kerneld for kmod .
* Michal Ostrowski : Module initialization cleanup .
2007-02-09 17:25:10 +03:00
* Ulises Alonso : Frame number limit removal and
2005-04-17 02:20:36 +04:00
* packet_set_ring memory leak .
2005-09-21 11:11:37 +04:00
* Eric Biederman : Allow for > 8 byte hardware addresses .
* The convention is that longer addresses
* will simply extend the hardware address
2007-02-09 17:25:10 +03:00
* byte arrays at the end of sockaddr_ll
2005-09-21 11:11:37 +04:00
* and packet_mreq .
2009-05-19 09:11:22 +04:00
* Johann Baudy : Added TX RING .
2005-04-17 02:20:36 +04:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
*/
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
# include <linux/types.h>
# include <linux/mm.h>
2006-01-11 23:17:47 +03:00
# include <linux/capability.h>
2005-04-17 02:20:36 +04:00
# include <linux/fcntl.h>
# include <linux/socket.h>
# include <linux/in.h>
# include <linux/inet.h>
# include <linux/netdevice.h>
# include <linux/if_packet.h>
# include <linux/wireless.h>
2007-02-05 10:33:10 +03:00
# include <linux/kernel.h>
2005-04-17 02:20:36 +04:00
# include <linux/kmod.h>
2007-09-12 14:01:34 +04:00
# include <net/net_namespace.h>
2005-04-17 02:20:36 +04:00
# include <net/ip.h>
# include <net/protocol.h>
# include <linux/skbuff.h>
# include <net/sock.h>
# include <linux/errno.h>
# include <linux/timer.h>
# include <asm/system.h>
# include <asm/uaccess.h>
# include <asm/ioctls.h>
# include <asm/page.h>
2006-10-20 00:08:53 +04:00
# include <asm/cacheflush.h>
2005-04-17 02:20:36 +04:00
# include <asm/io.h>
# include <linux/proc_fs.h>
# include <linux/seq_file.h>
# include <linux/poll.h>
# include <linux/module.h>
# include <linux/init.h>
2009-01-31 01:12:06 +03:00
# include <linux/mutex.h>
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_INET
# include <net/inet_common.h>
# endif
/*
Assumptions :
- if device has no dev - > hard_header routine , it adds and removes ll header
inside itself . In this case ll header is invisible outside of device ,
but higher levels still should reserve dev - > hard_header_len .
Some devices are enough clever to reallocate skb , when header
will not fit to reserved space ( tunnel ) , another ones are silly
( PPP ) .
- packet socket receives packets with pulled ll header ,
so that SOCK_RAW should push it back .
On receive :
- - - - - - - - - - -
Incoming , dev - > hard_header ! = NULL
2007-04-11 08:21:55 +04:00
mac_header - > ll header
data - > data
2005-04-17 02:20:36 +04:00
Outgoing , dev - > hard_header ! = NULL
2007-04-11 08:21:55 +04:00
mac_header - > ll header
data - > ll header
2005-04-17 02:20:36 +04:00
Incoming , dev - > hard_header = = NULL
2007-04-11 08:21:55 +04:00
mac_header - > UNKNOWN position . It is very likely , that it points to ll
header . PPP makes it , that is wrong , because introduce
2007-07-19 05:44:35 +04:00
assymetry between rx and tx paths .
2007-04-11 08:21:55 +04:00
data - > data
2005-04-17 02:20:36 +04:00
Outgoing , dev - > hard_header = = NULL
2007-04-11 08:21:55 +04:00
mac_header - > data . ll header is still not built !
data - > data
2005-04-17 02:20:36 +04:00
Resume
If dev - > hard_header = = NULL we are unlikely to restore sensible ll header .
On transmit :
- - - - - - - - - - - -
dev - > hard_header ! = NULL
2007-04-11 08:21:55 +04:00
mac_header - > ll header
data - > ll header
2005-04-17 02:20:36 +04:00
dev - > hard_header = = NULL ( ll header is added by device , we cannot control it )
2007-04-11 08:21:55 +04:00
mac_header - > data
data - > data
2005-04-17 02:20:36 +04:00
We should set nh . raw on output to correct posistion ,
packet classifier depends on it .
*/
/* Private packet socket structures. */
2009-07-22 01:57:59 +04:00
struct packet_mclist {
2005-04-17 02:20:36 +04:00
struct packet_mclist * next ;
int ifindex ;
int count ;
unsigned short type ;
unsigned short alen ;
2005-09-21 11:11:37 +04:00
unsigned char addr [ MAX_ADDR_LEN ] ;
} ;
/* identical to struct packet_mreq except it has
* a longer address field .
*/
2009-07-22 01:57:59 +04:00
struct packet_mreq_max {
2005-09-21 11:11:37 +04:00
int mr_ifindex ;
unsigned short mr_type ;
unsigned short mr_alen ;
unsigned char mr_address [ MAX_ADDR_LEN ] ;
2005-04-17 02:20:36 +04:00
} ;
2007-05-30 00:12:50 +04:00
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_PACKET_MMAP
2009-05-19 09:11:22 +04:00
static int packet_set_ring ( struct sock * sk , struct tpacket_req * req ,
int closing , int tx_ring ) ;
struct packet_ring_buffer {
2009-07-22 01:57:59 +04:00
char * * pg_vec ;
2009-05-19 09:11:22 +04:00
unsigned int head ;
unsigned int frames_per_block ;
unsigned int frame_size ;
unsigned int frame_max ;
unsigned int pg_vec_order ;
unsigned int pg_vec_pages ;
unsigned int pg_vec_len ;
atomic_t pending ;
} ;
struct packet_sock ;
static int tpacket_snd ( struct packet_sock * po , struct msghdr * msg ) ;
2005-04-17 02:20:36 +04:00
# endif
static void packet_flush_mclist ( struct sock * sk ) ;
struct packet_sock {
/* struct sock has to be the first member of packet_sock */
struct sock sk ;
struct tpacket_stats stats ;
# ifdef CONFIG_PACKET_MMAP
2009-05-19 09:11:22 +04:00
struct packet_ring_buffer rx_ring ;
struct packet_ring_buffer tx_ring ;
2005-04-17 02:20:36 +04:00
int copy_thresh ;
# endif
struct packet_type prot_hook ;
spinlock_t bind_lock ;
2009-01-31 01:12:06 +03:00
struct mutex pg_vec_lock ;
2007-02-05 10:31:32 +03:00
unsigned int running : 1 , /* prot_hook is attached*/
2007-04-21 03:05:39 +04:00
auxdata : 1 ,
origdev : 1 ;
2005-04-17 02:20:36 +04:00
int ifindex ; /* bound device */
2006-11-08 11:26:29 +03:00
__be16 num ;
2005-04-17 02:20:36 +04:00
struct packet_mclist * mclist ;
# ifdef CONFIG_PACKET_MMAP
atomic_t mapped ;
2008-07-15 09:50:15 +04:00
enum tpacket_versions tp_version ;
unsigned int tp_hdrlen ;
2008-07-19 05:05:19 +04:00
unsigned int tp_reserve ;
2009-05-19 09:11:22 +04:00
unsigned int tp_loss : 1 ;
2005-04-17 02:20:36 +04:00
# endif
} ;
2007-02-05 10:33:10 +03:00
struct packet_skb_cb {
unsigned int origlen ;
union {
struct sockaddr_pkt pkt ;
struct sockaddr_ll ll ;
} sa ;
} ;
# define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
2007-02-05 10:31:32 +03:00
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_PACKET_MMAP
2009-05-19 09:11:22 +04:00
static void __packet_set_status ( struct packet_sock * po , void * frame , int status )
2005-04-17 02:20:36 +04:00
{
2008-07-15 09:50:15 +04:00
union {
struct tpacket_hdr * h1 ;
struct tpacket2_hdr * h2 ;
void * raw ;
} h ;
2005-04-17 02:20:36 +04:00
2009-05-19 09:11:22 +04:00
h . raw = frame ;
2008-07-15 09:50:15 +04:00
switch ( po - > tp_version ) {
case TPACKET_V1 :
2009-05-19 09:11:22 +04:00
h . h1 - > tp_status = status ;
flush_dcache_page ( virt_to_page ( & h . h1 - > tp_status ) ) ;
2008-07-15 09:50:15 +04:00
break ;
case TPACKET_V2 :
2009-05-19 09:11:22 +04:00
h . h2 - > tp_status = status ;
flush_dcache_page ( virt_to_page ( & h . h2 - > tp_status ) ) ;
2008-07-15 09:50:15 +04:00
break ;
2009-05-19 09:11:22 +04:00
default :
2009-07-22 01:57:59 +04:00
pr_err ( " TPACKET version not supported \n " ) ;
2009-05-19 09:11:22 +04:00
BUG ( ) ;
2008-07-15 09:50:15 +04:00
}
2009-05-19 09:11:22 +04:00
smp_wmb ( ) ;
2008-07-15 09:50:15 +04:00
}
2009-05-19 09:11:22 +04:00
static int __packet_get_status ( struct packet_sock * po , void * frame )
2008-07-15 09:50:15 +04:00
{
union {
struct tpacket_hdr * h1 ;
struct tpacket2_hdr * h2 ;
void * raw ;
} h ;
2009-05-19 09:11:22 +04:00
smp_rmb ( ) ;
2008-07-15 09:50:15 +04:00
h . raw = frame ;
switch ( po - > tp_version ) {
case TPACKET_V1 :
2009-05-19 09:11:22 +04:00
flush_dcache_page ( virt_to_page ( & h . h1 - > tp_status ) ) ;
return h . h1 - > tp_status ;
2008-07-15 09:50:15 +04:00
case TPACKET_V2 :
2009-05-19 09:11:22 +04:00
flush_dcache_page ( virt_to_page ( & h . h2 - > tp_status ) ) ;
return h . h2 - > tp_status ;
default :
2009-07-22 01:57:59 +04:00
pr_err ( " TPACKET version not supported \n " ) ;
2009-05-19 09:11:22 +04:00
BUG ( ) ;
return 0 ;
2008-07-15 09:50:15 +04:00
}
2005-04-17 02:20:36 +04:00
}
2009-05-19 09:11:22 +04:00
static void * packet_lookup_frame ( struct packet_sock * po ,
struct packet_ring_buffer * rb ,
unsigned int position ,
int status )
{
unsigned int pg_vec_pos , frame_offset ;
union {
struct tpacket_hdr * h1 ;
struct tpacket2_hdr * h2 ;
void * raw ;
} h ;
pg_vec_pos = position / rb - > frames_per_block ;
frame_offset = position % rb - > frames_per_block ;
h . raw = rb - > pg_vec [ pg_vec_pos ] + ( frame_offset * rb - > frame_size ) ;
if ( status ! = __packet_get_status ( po , h . raw ) )
return NULL ;
return h . raw ;
}
static inline void * packet_current_frame ( struct packet_sock * po ,
struct packet_ring_buffer * rb ,
int status )
{
return packet_lookup_frame ( po , rb , rb - > head , status ) ;
}
static inline void * packet_previous_frame ( struct packet_sock * po ,
struct packet_ring_buffer * rb ,
int status )
{
unsigned int previous = rb - > head ? rb - > head - 1 : rb - > frame_max ;
return packet_lookup_frame ( po , rb , previous , status ) ;
}
static inline void packet_increment_head ( struct packet_ring_buffer * buff )
{
buff - > head = buff - > head ! = buff - > frame_max ? buff - > head + 1 : 0 ;
}
2005-04-17 02:20:36 +04:00
# endif
static inline struct packet_sock * pkt_sk ( struct sock * sk )
{
return ( struct packet_sock * ) sk ;
}
static void packet_sock_destruct ( struct sock * sk )
{
2008-07-26 08:43:18 +04:00
WARN_ON ( atomic_read ( & sk - > sk_rmem_alloc ) ) ;
WARN_ON ( atomic_read ( & sk - > sk_wmem_alloc ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! sock_flag ( sk , SOCK_DEAD ) ) {
2009-07-22 01:57:59 +04:00
pr_err ( " Attempt to release alive packet socket: %p \n " , sk ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2007-11-11 08:38:48 +03:00
sk_refcnt_debug_dec ( sk ) ;
2005-04-17 02:20:36 +04:00
}
2005-12-22 23:49:22 +03:00
static const struct proto_ops packet_ops ;
2005-04-17 02:20:36 +04:00
2005-12-22 23:49:22 +03:00
static const struct proto_ops packet_ops_spkt ;
2005-04-17 02:20:36 +04:00
2009-07-22 01:57:59 +04:00
static int packet_rcv_spkt ( struct sk_buff * skb , struct net_device * dev ,
struct packet_type * pt , struct net_device * orig_dev )
2005-04-17 02:20:36 +04:00
{
struct sock * sk ;
struct sockaddr_pkt * spkt ;
/*
* When we registered the protocol we saved the socket in the data
* field for just this event .
*/
sk = pt - > af_packet_priv ;
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
/*
* Yank back the headers [ hope the device set this
* right or kerboom . . . ]
*
* Incoming packets have ll header pulled ,
* push it back .
*
2007-03-20 01:33:04 +03:00
* For outgoing ones skb - > data = = skb_mac_header ( skb )
2005-04-17 02:20:36 +04:00
* so that this procedure is noop .
*/
if ( skb - > pkt_type = = PACKET_LOOPBACK )
goto out ;
2008-03-25 20:26:21 +03:00
if ( dev_net ( dev ) ! = sock_net ( sk ) )
2007-11-20 09:28:35 +03:00
goto out ;
2009-07-22 01:57:59 +04:00
skb = skb_share_check ( skb , GFP_ATOMIC ) ;
if ( skb = = NULL )
2005-04-17 02:20:36 +04:00
goto oom ;
/* drop any routing info */
2009-06-02 09:19:30 +04:00
skb_dst_drop ( skb ) ;
2005-04-17 02:20:36 +04:00
2005-07-12 22:57:52 +04:00
/* drop conntrack reference */
nf_reset ( skb ) ;
2007-02-05 10:33:10 +03:00
spkt = & PACKET_SKB_CB ( skb ) - > sa . pkt ;
2005-04-17 02:20:36 +04:00
2007-03-20 01:33:04 +03:00
skb_push ( skb , skb - > data - skb_mac_header ( skb ) ) ;
2005-04-17 02:20:36 +04:00
/*
* The SOCK_PACKET socket receives _all_ frames .
*/
spkt - > spkt_family = dev - > type ;
strlcpy ( spkt - > spkt_device , dev - > name , sizeof ( spkt - > spkt_device ) ) ;
spkt - > spkt_protocol = skb - > protocol ;
/*
* Charge the memory to the socket . This is done specifically
* to prevent sockets using all the memory up .
*/
2009-07-22 01:57:59 +04:00
if ( sock_queue_rcv_skb ( sk , skb ) = = 0 )
2005-04-17 02:20:36 +04:00
return 0 ;
out :
kfree_skb ( skb ) ;
oom :
return 0 ;
}
/*
* Output a raw packet to a device layer . This bypasses all the other
* protocol layers and you must therefore supply it with a complete frame
*/
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
static int packet_sendmsg_spkt ( struct kiocb * iocb , struct socket * sock ,
struct msghdr * msg , size_t len )
{
struct sock * sk = sock - > sk ;
2009-07-22 01:57:59 +04:00
struct sockaddr_pkt * saddr = ( struct sockaddr_pkt * ) msg - > msg_name ;
2005-04-17 02:20:36 +04:00
struct sk_buff * skb ;
struct net_device * dev ;
2009-07-22 01:57:59 +04:00
__be16 proto = 0 ;
2005-04-17 02:20:36 +04:00
int err ;
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
/*
2007-02-09 17:25:10 +03:00
* Get and verify the address .
2005-04-17 02:20:36 +04:00
*/
2009-07-22 01:57:59 +04:00
if ( saddr ) {
2005-04-17 02:20:36 +04:00
if ( msg - > msg_namelen < sizeof ( struct sockaddr ) )
2009-07-22 01:57:59 +04:00
return - EINVAL ;
if ( msg - > msg_namelen = = sizeof ( struct sockaddr_pkt ) )
proto = saddr - > spkt_protocol ;
} else
return - ENOTCONN ; /* SOCK_PACKET must be sent giving an address */
2005-04-17 02:20:36 +04:00
/*
2007-02-09 17:25:10 +03:00
* Find the device first to size check it
2005-04-17 02:20:36 +04:00
*/
saddr - > spkt_device [ 13 ] = 0 ;
2008-03-25 20:26:21 +03:00
dev = dev_get_by_name ( sock_net ( sk ) , saddr - > spkt_device ) ;
2005-04-17 02:20:36 +04:00
err = - ENODEV ;
if ( dev = = NULL )
goto out_unlock ;
2007-02-09 17:25:10 +03:00
2007-01-26 06:30:36 +03:00
err = - ENETDOWN ;
if ( ! ( dev - > flags & IFF_UP ) )
goto out_unlock ;
2005-04-17 02:20:36 +04:00
/*
2009-07-22 01:57:59 +04:00
* You may not queue a frame bigger than the mtu . This is the lowest level
* raw protocol and you must do your own fragmentation at this level .
2005-04-17 02:20:36 +04:00
*/
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
err = - EMSGSIZE ;
2006-01-24 03:28:02 +03:00
if ( len > dev - > mtu + dev - > hard_header_len )
2005-04-17 02:20:36 +04:00
goto out_unlock ;
err = - ENOBUFS ;
skb = sock_wmalloc ( sk , len + LL_RESERVED_SPACE ( dev ) , 0 , GFP_KERNEL ) ;
/*
2009-07-22 01:57:59 +04:00
* If the write buffer is full , then tough . At this level the user
* gets to deal with the problem - do your own algorithmic backoffs .
* That ' s far more flexible .
2005-04-17 02:20:36 +04:00
*/
2007-02-09 17:25:10 +03:00
if ( skb = = NULL )
2005-04-17 02:20:36 +04:00
goto out_unlock ;
/*
2007-02-09 17:25:10 +03:00
* Fill it in
2005-04-17 02:20:36 +04:00
*/
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
/* FIXME: Save some space for broken drivers that write a
* hard header at transmission time by themselves . PPP is the
* notable one here . This should really be fixed at the driver level .
*/
skb_reserve ( skb , LL_RESERVED_SPACE ( dev ) ) ;
2007-04-11 07:45:18 +04:00
skb_reset_network_header ( skb ) ;
2005-04-17 02:20:36 +04:00
/* Try to align data part correctly */
2007-10-09 12:40:57 +04:00
if ( dev - > header_ops ) {
2005-04-17 02:20:36 +04:00
skb - > data - = dev - > hard_header_len ;
skb - > tail - = dev - > hard_header_len ;
if ( len < dev - > hard_header_len )
2007-04-11 07:45:18 +04:00
skb_reset_network_header ( skb ) ;
2005-04-17 02:20:36 +04:00
}
/* Returns -EFAULT on error */
2009-07-22 01:57:59 +04:00
err = memcpy_fromiovec ( skb_put ( skb , len ) , msg - > msg_iov , len ) ;
2005-04-17 02:20:36 +04:00
skb - > protocol = proto ;
skb - > dev = dev ;
skb - > priority = sk - > sk_priority ;
if ( err )
goto out_free ;
/*
* Now send it
*/
dev_queue_xmit ( skb ) ;
dev_put ( dev ) ;
2009-07-22 01:57:59 +04:00
return len ;
2005-04-17 02:20:36 +04:00
out_free :
kfree_skb ( skb ) ;
out_unlock :
if ( dev )
dev_put ( dev ) ;
return err ;
}
2007-01-25 02:21:02 +03:00
static inline unsigned int run_filter ( struct sk_buff * skb , struct sock * sk ,
unsigned int res )
2005-04-17 02:20:36 +04:00
{
struct sk_filter * filter ;
2006-09-01 02:28:39 +04:00
rcu_read_lock_bh ( ) ;
filter = rcu_dereference ( sk - > sk_filter ) ;
2007-01-25 02:21:02 +03:00
if ( filter ! = NULL )
res = sk_run_filter ( skb , filter - > insns , filter - > len ) ;
2006-09-01 02:28:39 +04:00
rcu_read_unlock_bh ( ) ;
2005-04-17 02:20:36 +04:00
2007-01-25 02:21:02 +03:00
return res ;
2005-04-17 02:20:36 +04:00
}
/*
This function makes lazy skb cloning in hope that most of packets
are discarded by BPF .
Note tricky part : we DO mangle shared skb ! skb - > data , skb - > len
and skb - > cb are mangled . It works because ( and until ) packets
falling here are owned by current CPU . Output packets are cloned
by dev_queue_xmit_nit ( ) , input packets are processed by net_bh
sequencially , so that if we return skb to original state on exit ,
we will not harm anyone .
*/
2009-07-22 01:57:59 +04:00
static int packet_rcv ( struct sk_buff * skb , struct net_device * dev ,
struct packet_type * pt , struct net_device * orig_dev )
2005-04-17 02:20:36 +04:00
{
struct sock * sk ;
struct sockaddr_ll * sll ;
struct packet_sock * po ;
2009-07-22 01:57:59 +04:00
u8 * skb_head = skb - > data ;
2005-04-17 02:20:36 +04:00
int skb_len = skb - > len ;
2007-01-25 02:21:02 +03:00
unsigned int snaplen , res ;
2005-04-17 02:20:36 +04:00
if ( skb - > pkt_type = = PACKET_LOOPBACK )
goto drop ;
sk = pt - > af_packet_priv ;
po = pkt_sk ( sk ) ;
2008-03-25 20:26:21 +03:00
if ( dev_net ( dev ) ! = sock_net ( sk ) )
2007-11-20 09:28:35 +03:00
goto drop ;
2005-04-17 02:20:36 +04:00
skb - > dev = dev ;
2007-10-09 12:40:57 +04:00
if ( dev - > header_ops ) {
2005-04-17 02:20:36 +04:00
/* The device has an explicit notion of ll header,
exported to higher levels .
Otherwise , the device hides datails of it frame
structure , so that corresponding packet head
never delivered to user .
*/
if ( sk - > sk_type ! = SOCK_DGRAM )
2007-03-20 01:33:04 +03:00
skb_push ( skb , skb - > data - skb_mac_header ( skb ) ) ;
2005-04-17 02:20:36 +04:00
else if ( skb - > pkt_type = = PACKET_OUTGOING ) {
/* Special case: outgoing packets have ll header at head */
2007-03-11 04:16:10 +03:00
skb_pull ( skb , skb_network_offset ( skb ) ) ;
2005-04-17 02:20:36 +04:00
}
}
snaplen = skb - > len ;
2007-01-25 02:21:02 +03:00
res = run_filter ( skb , sk , snaplen ) ;
if ( ! res )
2006-09-01 02:28:39 +04:00
goto drop_n_restore ;
2007-01-25 02:21:02 +03:00
if ( snaplen > res )
snaplen = res ;
2005-04-17 02:20:36 +04:00
if ( atomic_read ( & sk - > sk_rmem_alloc ) + skb - > truesize > =
( unsigned ) sk - > sk_rcvbuf )
goto drop_n_acct ;
if ( skb_shared ( skb ) ) {
struct sk_buff * nskb = skb_clone ( skb , GFP_ATOMIC ) ;
if ( nskb = = NULL )
goto drop_n_acct ;
if ( skb_head ! = skb - > data ) {
skb - > data = skb_head ;
skb - > len = skb_len ;
}
kfree_skb ( skb ) ;
skb = nskb ;
}
2007-02-05 10:33:10 +03:00
BUILD_BUG_ON ( sizeof ( * PACKET_SKB_CB ( skb ) ) + MAX_ADDR_LEN - 8 >
sizeof ( skb - > cb ) ) ;
sll = & PACKET_SKB_CB ( skb ) - > sa . ll ;
2005-04-17 02:20:36 +04:00
sll - > sll_family = AF_PACKET ;
sll - > sll_hatype = dev - > type ;
sll - > sll_protocol = skb - > protocol ;
sll - > sll_pkttype = skb - > pkt_type ;
2007-11-11 09:03:25 +03:00
if ( unlikely ( po - > origdev ) )
2007-04-21 03:05:39 +04:00
sll - > sll_ifindex = orig_dev - > ifindex ;
else
sll - > sll_ifindex = dev - > ifindex ;
2005-04-17 02:20:36 +04:00
2007-09-27 09:13:38 +04:00
sll - > sll_halen = dev_parse_header ( skb , sll - > sll_addr ) ;
2005-04-17 02:20:36 +04:00
2007-02-05 10:33:10 +03:00
PACKET_SKB_CB ( skb ) - > origlen = skb - > len ;
2007-02-05 10:31:32 +03:00
2005-04-17 02:20:36 +04:00
if ( pskb_trim ( skb , snaplen ) )
goto drop_n_acct ;
skb_set_owner_r ( skb , sk ) ;
skb - > dev = NULL ;
2009-06-02 09:19:30 +04:00
skb_dst_drop ( skb ) ;
2005-04-17 02:20:36 +04:00
2005-07-12 22:57:52 +04:00
/* drop conntrack reference */
nf_reset ( skb ) ;
2005-04-17 02:20:36 +04:00
spin_lock ( & sk - > sk_receive_queue . lock ) ;
po - > stats . tp_packets + + ;
__skb_queue_tail ( & sk - > sk_receive_queue , skb ) ;
spin_unlock ( & sk - > sk_receive_queue . lock ) ;
sk - > sk_data_ready ( sk , skb - > len ) ;
return 0 ;
drop_n_acct :
spin_lock ( & sk - > sk_receive_queue . lock ) ;
po - > stats . tp_drops + + ;
spin_unlock ( & sk - > sk_receive_queue . lock ) ;
drop_n_restore :
if ( skb_head ! = skb - > data & & skb_shared ( skb ) ) {
skb - > data = skb_head ;
skb - > len = skb_len ;
}
drop :
2009-03-11 12:49:55 +03:00
consume_skb ( skb ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
# ifdef CONFIG_PACKET_MMAP
2009-07-22 01:57:59 +04:00
static int tpacket_rcv ( struct sk_buff * skb , struct net_device * dev ,
struct packet_type * pt , struct net_device * orig_dev )
2005-04-17 02:20:36 +04:00
{
struct sock * sk ;
struct packet_sock * po ;
struct sockaddr_ll * sll ;
2008-07-15 09:50:15 +04:00
union {
struct tpacket_hdr * h1 ;
struct tpacket2_hdr * h2 ;
void * raw ;
} h ;
2009-07-22 01:57:59 +04:00
u8 * skb_head = skb - > data ;
2005-04-17 02:20:36 +04:00
int skb_len = skb - > len ;
2007-01-25 02:21:02 +03:00
unsigned int snaplen , res ;
2005-04-17 02:20:36 +04:00
unsigned long status = TP_STATUS_LOSING | TP_STATUS_USER ;
2008-07-15 09:50:15 +04:00
unsigned short macoff , netoff , hdrlen ;
2005-04-17 02:20:36 +04:00
struct sk_buff * copy_skb = NULL ;
2007-04-20 03:16:32 +04:00
struct timeval tv ;
2008-07-15 09:50:15 +04:00
struct timespec ts ;
2005-04-17 02:20:36 +04:00
if ( skb - > pkt_type = = PACKET_LOOPBACK )
goto drop ;
sk = pt - > af_packet_priv ;
po = pkt_sk ( sk ) ;
2008-03-25 20:26:21 +03:00
if ( dev_net ( dev ) ! = sock_net ( sk ) )
2007-11-20 09:28:35 +03:00
goto drop ;
2007-10-09 12:40:57 +04:00
if ( dev - > header_ops ) {
2005-04-17 02:20:36 +04:00
if ( sk - > sk_type ! = SOCK_DGRAM )
2007-03-20 01:33:04 +03:00
skb_push ( skb , skb - > data - skb_mac_header ( skb ) ) ;
2005-04-17 02:20:36 +04:00
else if ( skb - > pkt_type = = PACKET_OUTGOING ) {
/* Special case: outgoing packets have ll header at head */
2007-03-11 04:16:10 +03:00
skb_pull ( skb , skb_network_offset ( skb ) ) ;
2005-04-17 02:20:36 +04:00
}
}
2007-02-05 10:31:32 +03:00
if ( skb - > ip_summed = = CHECKSUM_PARTIAL )
status | = TP_STATUS_CSUMNOTREADY ;
2005-04-17 02:20:36 +04:00
snaplen = skb - > len ;
2007-01-25 02:21:02 +03:00
res = run_filter ( skb , sk , snaplen ) ;
if ( ! res )
2006-09-01 02:28:39 +04:00
goto drop_n_restore ;
2007-01-25 02:21:02 +03:00
if ( snaplen > res )
snaplen = res ;
2005-04-17 02:20:36 +04:00
if ( sk - > sk_type = = SOCK_DGRAM ) {
2008-07-19 05:05:19 +04:00
macoff = netoff = TPACKET_ALIGN ( po - > tp_hdrlen ) + 16 +
po - > tp_reserve ;
2005-04-17 02:20:36 +04:00
} else {
2007-03-11 04:16:10 +03:00
unsigned maclen = skb_network_offset ( skb ) ;
2008-07-15 09:50:15 +04:00
netoff = TPACKET_ALIGN ( po - > tp_hdrlen +
2008-07-19 05:05:19 +04:00
( maclen < 16 ? 16 : maclen ) ) +
po - > tp_reserve ;
2005-04-17 02:20:36 +04:00
macoff = netoff - maclen ;
}
2009-05-19 09:11:22 +04:00
if ( macoff + snaplen > po - > rx_ring . frame_size ) {
2005-04-17 02:20:36 +04:00
if ( po - > copy_thresh & &
atomic_read ( & sk - > sk_rmem_alloc ) + skb - > truesize <
( unsigned ) sk - > sk_rcvbuf ) {
if ( skb_shared ( skb ) ) {
copy_skb = skb_clone ( skb , GFP_ATOMIC ) ;
} else {
copy_skb = skb_get ( skb ) ;
skb_head = skb - > data ;
}
if ( copy_skb )
skb_set_owner_r ( copy_skb , sk ) ;
}
2009-05-19 09:11:22 +04:00
snaplen = po - > rx_ring . frame_size - macoff ;
2005-04-17 02:20:36 +04:00
if ( ( int ) snaplen < 0 )
snaplen = 0 ;
}
spin_lock ( & sk - > sk_receive_queue . lock ) ;
2009-05-19 09:11:22 +04:00
h . raw = packet_current_frame ( po , & po - > rx_ring , TP_STATUS_KERNEL ) ;
2008-07-15 09:50:15 +04:00
if ( ! h . raw )
2005-04-17 02:20:36 +04:00
goto ring_is_full ;
2009-05-19 09:11:22 +04:00
packet_increment_head ( & po - > rx_ring ) ;
2005-04-17 02:20:36 +04:00
po - > stats . tp_packets + + ;
if ( copy_skb ) {
status | = TP_STATUS_COPY ;
__skb_queue_tail ( & sk - > sk_receive_queue , copy_skb ) ;
}
if ( ! po - > stats . tp_drops )
status & = ~ TP_STATUS_LOSING ;
spin_unlock ( & sk - > sk_receive_queue . lock ) ;
2008-07-15 09:50:15 +04:00
skb_copy_bits ( skb , 0 , h . raw + macoff , snaplen ) ;
2005-04-17 02:20:36 +04:00
2008-07-15 09:50:15 +04:00
switch ( po - > tp_version ) {
case TPACKET_V1 :
h . h1 - > tp_len = skb - > len ;
h . h1 - > tp_snaplen = snaplen ;
h . h1 - > tp_mac = macoff ;
h . h1 - > tp_net = netoff ;
if ( skb - > tstamp . tv64 )
tv = ktime_to_timeval ( skb - > tstamp ) ;
else
do_gettimeofday ( & tv ) ;
h . h1 - > tp_sec = tv . tv_sec ;
h . h1 - > tp_usec = tv . tv_usec ;
hdrlen = sizeof ( * h . h1 ) ;
break ;
case TPACKET_V2 :
h . h2 - > tp_len = skb - > len ;
h . h2 - > tp_snaplen = snaplen ;
h . h2 - > tp_mac = macoff ;
h . h2 - > tp_net = netoff ;
if ( skb - > tstamp . tv64 )
ts = ktime_to_timespec ( skb - > tstamp ) ;
else
getnstimeofday ( & ts ) ;
h . h2 - > tp_sec = ts . tv_sec ;
h . h2 - > tp_nsec = ts . tv_nsec ;
2008-07-15 09:50:39 +04:00
h . h2 - > tp_vlan_tci = skb - > vlan_tci ;
2008-07-15 09:50:15 +04:00
hdrlen = sizeof ( * h . h2 ) ;
break ;
default :
BUG ( ) ;
}
2005-04-17 02:20:36 +04:00
2008-07-15 09:50:15 +04:00
sll = h . raw + TPACKET_ALIGN ( hdrlen ) ;
2007-09-27 09:13:38 +04:00
sll - > sll_halen = dev_parse_header ( skb , sll - > sll_addr ) ;
2005-04-17 02:20:36 +04:00
sll - > sll_family = AF_PACKET ;
sll - > sll_hatype = dev - > type ;
sll - > sll_protocol = skb - > protocol ;
sll - > sll_pkttype = skb - > pkt_type ;
2007-11-11 09:03:25 +03:00
if ( unlikely ( po - > origdev ) )
2007-04-21 03:05:39 +04:00
sll - > sll_ifindex = orig_dev - > ifindex ;
else
sll - > sll_ifindex = dev - > ifindex ;
2005-04-17 02:20:36 +04:00
2008-07-15 09:50:15 +04:00
__packet_set_status ( po , h . raw , status ) ;
2006-12-07 11:11:33 +03:00
smp_mb ( ) ;
2005-04-17 02:20:36 +04:00
{
struct page * p_start , * p_end ;
2008-07-15 09:50:15 +04:00
u8 * h_end = h . raw + macoff + snaplen - 1 ;
2005-04-17 02:20:36 +04:00
2008-07-15 09:50:15 +04:00
p_start = virt_to_page ( h . raw ) ;
2005-04-17 02:20:36 +04:00
p_end = virt_to_page ( h_end ) ;
while ( p_start < = p_end ) {
flush_dcache_page ( p_start ) ;
p_start + + ;
}
}
sk - > sk_data_ready ( sk , 0 ) ;
drop_n_restore :
if ( skb_head ! = skb - > data & & skb_shared ( skb ) ) {
skb - > data = skb_head ;
skb - > len = skb_len ;
}
drop :
2007-02-09 17:25:10 +03:00
kfree_skb ( skb ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
ring_is_full :
po - > stats . tp_drops + + ;
spin_unlock ( & sk - > sk_receive_queue . lock ) ;
sk - > sk_data_ready ( sk , 0 ) ;
2009-02-25 03:36:42 +03:00
kfree_skb ( copy_skb ) ;
2005-04-17 02:20:36 +04:00
goto drop_n_restore ;
}
2009-05-19 09:11:22 +04:00
static void tpacket_destruct_skb ( struct sk_buff * skb )
{
struct packet_sock * po = pkt_sk ( skb - > sk ) ;
2009-07-22 01:57:59 +04:00
void * ph ;
2005-04-17 02:20:36 +04:00
2009-05-19 09:11:22 +04:00
BUG_ON ( skb = = NULL ) ;
2005-04-17 02:20:36 +04:00
2009-05-19 09:11:22 +04:00
if ( likely ( po - > tx_ring . pg_vec ) ) {
ph = skb_shinfo ( skb ) - > destructor_arg ;
BUG_ON ( __packet_get_status ( po , ph ) ! = TP_STATUS_SENDING ) ;
BUG_ON ( atomic_read ( & po - > tx_ring . pending ) = = 0 ) ;
atomic_dec ( & po - > tx_ring . pending ) ;
__packet_set_status ( po , ph , TP_STATUS_AVAILABLE ) ;
}
sock_wfree ( skb ) ;
}
2009-07-22 01:57:59 +04:00
static int tpacket_fill_skb ( struct packet_sock * po , struct sk_buff * skb ,
void * frame , struct net_device * dev , int size_max ,
__be16 proto , unsigned char * addr )
2009-05-19 09:11:22 +04:00
{
union {
struct tpacket_hdr * h1 ;
struct tpacket2_hdr * h2 ;
void * raw ;
} ph ;
int to_write , offset , len , tp_len , nr_frags , len_max ;
struct socket * sock = po - > sk . sk_socket ;
struct page * page ;
void * data ;
int err ;
ph . raw = frame ;
skb - > protocol = proto ;
skb - > dev = dev ;
skb - > priority = po - > sk . sk_priority ;
skb_shinfo ( skb ) - > destructor_arg = ph . raw ;
switch ( po - > tp_version ) {
case TPACKET_V2 :
tp_len = ph . h2 - > tp_len ;
break ;
default :
tp_len = ph . h1 - > tp_len ;
break ;
}
if ( unlikely ( tp_len > size_max ) ) {
2009-07-22 01:57:59 +04:00
pr_err ( " packet size is too long (%d > %d) \n " , tp_len , size_max ) ;
2009-05-19 09:11:22 +04:00
return - EMSGSIZE ;
}
skb_reserve ( skb , LL_RESERVED_SPACE ( dev ) ) ;
skb_reset_network_header ( skb ) ;
data = ph . raw + po - > tp_hdrlen - sizeof ( struct sockaddr_ll ) ;
to_write = tp_len ;
if ( sock - > type = = SOCK_DGRAM ) {
err = dev_hard_header ( skb , dev , ntohs ( proto ) , addr ,
NULL , tp_len ) ;
if ( unlikely ( err < 0 ) )
return - EINVAL ;
2009-07-22 01:57:59 +04:00
} else if ( dev - > hard_header_len ) {
2009-05-19 09:11:22 +04:00
/* net device doesn't like empty head */
if ( unlikely ( tp_len < = dev - > hard_header_len ) ) {
2009-07-22 01:57:59 +04:00
pr_err ( " packet size is too short (%d < %d) \n " ,
tp_len , dev - > hard_header_len ) ;
2009-05-19 09:11:22 +04:00
return - EINVAL ;
}
skb_push ( skb , dev - > hard_header_len ) ;
err = skb_store_bits ( skb , 0 , data ,
dev - > hard_header_len ) ;
if ( unlikely ( err ) )
return err ;
data + = dev - > hard_header_len ;
to_write - = dev - > hard_header_len ;
}
err = - EFAULT ;
page = virt_to_page ( data ) ;
offset = offset_in_page ( data ) ;
len_max = PAGE_SIZE - offset ;
len = ( ( to_write > len_max ) ? len_max : to_write ) ;
skb - > data_len = to_write ;
skb - > len + = to_write ;
skb - > truesize + = to_write ;
atomic_add ( to_write , & po - > sk . sk_wmem_alloc ) ;
while ( likely ( to_write ) ) {
nr_frags = skb_shinfo ( skb ) - > nr_frags ;
if ( unlikely ( nr_frags > = MAX_SKB_FRAGS ) ) {
2009-07-22 01:57:59 +04:00
pr_err ( " Packet exceed the number of skb frags(%lu) \n " ,
MAX_SKB_FRAGS ) ;
2009-05-19 09:11:22 +04:00
return - EFAULT ;
}
flush_dcache_page ( page ) ;
get_page ( page ) ;
skb_fill_page_desc ( skb ,
nr_frags ,
page + + , offset , len ) ;
to_write - = len ;
offset = 0 ;
len_max = PAGE_SIZE ;
len = ( ( to_write > len_max ) ? len_max : to_write ) ;
}
return tp_len ;
}
static int tpacket_snd ( struct packet_sock * po , struct msghdr * msg )
{
struct socket * sock ;
struct sk_buff * skb ;
struct net_device * dev ;
__be16 proto ;
int ifindex , err , reserve = 0 ;
2009-07-22 01:57:59 +04:00
void * ph ;
struct sockaddr_ll * saddr = ( struct sockaddr_ll * ) msg - > msg_name ;
2009-05-19 09:11:22 +04:00
int tp_len , size_max ;
unsigned char * addr ;
int len_sum = 0 ;
int status = 0 ;
sock = po - > sk . sk_socket ;
mutex_lock ( & po - > pg_vec_lock ) ;
err = - EBUSY ;
if ( saddr = = NULL ) {
ifindex = po - > ifindex ;
proto = po - > num ;
addr = NULL ;
} else {
err = - EINVAL ;
if ( msg - > msg_namelen < sizeof ( struct sockaddr_ll ) )
goto out ;
if ( msg - > msg_namelen < ( saddr - > sll_halen
+ offsetof ( struct sockaddr_ll ,
sll_addr ) ) )
goto out ;
ifindex = saddr - > sll_ifindex ;
proto = saddr - > sll_protocol ;
addr = saddr - > sll_addr ;
}
dev = dev_get_by_index ( sock_net ( & po - > sk ) , ifindex ) ;
err = - ENXIO ;
if ( unlikely ( dev = = NULL ) )
goto out ;
reserve = dev - > hard_header_len ;
err = - ENETDOWN ;
if ( unlikely ( ! ( dev - > flags & IFF_UP ) ) )
goto out_put ;
size_max = po - > tx_ring . frame_size
- sizeof ( struct skb_shared_info )
- po - > tp_hdrlen
- LL_ALLOCATED_SPACE ( dev )
- sizeof ( struct sockaddr_ll ) ;
if ( size_max > dev - > mtu + reserve )
size_max = dev - > mtu + reserve ;
do {
ph = packet_current_frame ( po , & po - > tx_ring ,
TP_STATUS_SEND_REQUEST ) ;
if ( unlikely ( ph = = NULL ) ) {
schedule ( ) ;
continue ;
}
status = TP_STATUS_SEND_REQUEST ;
skb = sock_alloc_send_skb ( & po - > sk ,
LL_ALLOCATED_SPACE ( dev )
+ sizeof ( struct sockaddr_ll ) ,
0 , & err ) ;
if ( unlikely ( skb = = NULL ) )
goto out_status ;
tp_len = tpacket_fill_skb ( po , skb , ph , dev , size_max , proto ,
addr ) ;
if ( unlikely ( tp_len < 0 ) ) {
if ( po - > tp_loss ) {
__packet_set_status ( po , ph ,
TP_STATUS_AVAILABLE ) ;
packet_increment_head ( & po - > tx_ring ) ;
kfree_skb ( skb ) ;
continue ;
} else {
status = TP_STATUS_WRONG_FORMAT ;
err = tp_len ;
goto out_status ;
}
}
skb - > destructor = tpacket_destruct_skb ;
__packet_set_status ( po , ph , TP_STATUS_SENDING ) ;
atomic_inc ( & po - > tx_ring . pending ) ;
status = TP_STATUS_SEND_REQUEST ;
err = dev_queue_xmit ( skb ) ;
if ( unlikely ( err > 0 & & ( err = net_xmit_errno ( err ) ) ! = 0 ) )
goto out_xmit ;
packet_increment_head ( & po - > tx_ring ) ;
len_sum + = tp_len ;
2009-07-22 01:57:59 +04:00
} while ( likely ( ( ph ! = NULL ) | | ( ( ! ( msg - > msg_flags & MSG_DONTWAIT ) )
2009-05-19 09:11:22 +04:00
& & ( atomic_read ( & po - > tx_ring . pending ) ) ) )
) ;
err = len_sum ;
goto out_put ;
out_xmit :
skb - > destructor = sock_wfree ;
atomic_dec ( & po - > tx_ring . pending ) ;
out_status :
__packet_set_status ( po , ph , status ) ;
kfree_skb ( skb ) ;
out_put :
dev_put ( dev ) ;
out :
mutex_unlock ( & po - > pg_vec_lock ) ;
return err ;
}
# endif
static int packet_snd ( struct socket * sock ,
2005-04-17 02:20:36 +04:00
struct msghdr * msg , size_t len )
{
struct sock * sk = sock - > sk ;
2009-07-22 01:57:59 +04:00
struct sockaddr_ll * saddr = ( struct sockaddr_ll * ) msg - > msg_name ;
2005-04-17 02:20:36 +04:00
struct sk_buff * skb ;
struct net_device * dev ;
2006-11-08 11:26:29 +03:00
__be16 proto ;
2005-04-17 02:20:36 +04:00
unsigned char * addr ;
int ifindex , err , reserve = 0 ;
/*
2007-02-09 17:25:10 +03:00
* Get and verify the address .
2005-04-17 02:20:36 +04:00
*/
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
if ( saddr = = NULL ) {
struct packet_sock * po = pkt_sk ( sk ) ;
ifindex = po - > ifindex ;
proto = po - > num ;
addr = NULL ;
} else {
err = - EINVAL ;
if ( msg - > msg_namelen < sizeof ( struct sockaddr_ll ) )
goto out ;
2005-09-21 11:11:37 +04:00
if ( msg - > msg_namelen < ( saddr - > sll_halen + offsetof ( struct sockaddr_ll , sll_addr ) ) )
goto out ;
2005-04-17 02:20:36 +04:00
ifindex = saddr - > sll_ifindex ;
proto = saddr - > sll_protocol ;
addr = saddr - > sll_addr ;
}
2008-03-25 20:26:21 +03:00
dev = dev_get_by_index ( sock_net ( sk ) , ifindex ) ;
2005-04-17 02:20:36 +04:00
err = - ENXIO ;
if ( dev = = NULL )
goto out_unlock ;
if ( sock - > type = = SOCK_RAW )
reserve = dev - > hard_header_len ;
2007-01-26 06:30:36 +03:00
err = - ENETDOWN ;
if ( ! ( dev - > flags & IFF_UP ) )
goto out_unlock ;
2005-04-17 02:20:36 +04:00
err = - EMSGSIZE ;
if ( len > dev - > mtu + reserve )
goto out_unlock ;
2008-05-13 07:48:31 +04:00
skb = sock_alloc_send_skb ( sk , len + LL_ALLOCATED_SPACE ( dev ) ,
2005-04-17 02:20:36 +04:00
msg - > msg_flags & MSG_DONTWAIT , & err ) ;
2009-07-22 01:57:59 +04:00
if ( skb = = NULL )
2005-04-17 02:20:36 +04:00
goto out_unlock ;
skb_reserve ( skb , LL_RESERVED_SPACE ( dev ) ) ;
2007-04-11 07:45:18 +04:00
skb_reset_network_header ( skb ) ;
2005-04-17 02:20:36 +04:00
2007-10-09 12:36:32 +04:00
err = - EINVAL ;
if ( sock - > type = = SOCK_DGRAM & &
dev_hard_header ( skb , dev , ntohs ( proto ) , addr , NULL , len ) < 0 )
goto out_free ;
2005-04-17 02:20:36 +04:00
/* Returns -EFAULT on error */
2009-07-22 01:57:59 +04:00
err = memcpy_fromiovec ( skb_put ( skb , len ) , msg - > msg_iov , len ) ;
2005-04-17 02:20:36 +04:00
if ( err )
goto out_free ;
skb - > protocol = proto ;
skb - > dev = dev ;
skb - > priority = sk - > sk_priority ;
/*
* Now send it
*/
err = dev_queue_xmit ( skb ) ;
if ( err > 0 & & ( err = net_xmit_errno ( err ) ) ! = 0 )
goto out_unlock ;
dev_put ( dev ) ;
2009-07-22 01:57:59 +04:00
return len ;
2005-04-17 02:20:36 +04:00
out_free :
kfree_skb ( skb ) ;
out_unlock :
if ( dev )
dev_put ( dev ) ;
out :
return err ;
}
2009-05-19 09:11:22 +04:00
static int packet_sendmsg ( struct kiocb * iocb , struct socket * sock ,
struct msghdr * msg , size_t len )
{
# ifdef CONFIG_PACKET_MMAP
struct sock * sk = sock - > sk ;
struct packet_sock * po = pkt_sk ( sk ) ;
if ( po - > tx_ring . pg_vec )
return tpacket_snd ( po , msg ) ;
else
# endif
return packet_snd ( sock , msg , len ) ;
}
2005-04-17 02:20:36 +04:00
/*
* Close a PACKET socket . This is fairly simple . We immediately go
* to ' closed ' state and remove our protocol entry in the device list .
*/
static int packet_release ( struct socket * sock )
{
struct sock * sk = sock - > sk ;
struct packet_sock * po ;
2007-11-20 09:28:35 +03:00
struct net * net ;
2009-05-19 09:11:22 +04:00
# ifdef CONFIG_PACKET_MMAP
struct tpacket_req req ;
# endif
2005-04-17 02:20:36 +04:00
if ( ! sk )
return 0 ;
2008-03-25 20:26:21 +03:00
net = sock_net ( sk ) ;
2005-04-17 02:20:36 +04:00
po = pkt_sk ( sk ) ;
2007-12-11 15:19:54 +03:00
write_lock_bh ( & net - > packet . sklist_lock ) ;
2005-04-17 02:20:36 +04:00
sk_del_node_init ( sk ) ;
2008-11-24 11:09:29 +03:00
sock_prot_inuse_add ( net , sk - > sk_prot , - 1 ) ;
2007-12-11 15:19:54 +03:00
write_unlock_bh ( & net - > packet . sklist_lock ) ;
2005-04-17 02:20:36 +04:00
/*
* Unhook packet receive handler .
*/
if ( po - > running ) {
/*
* Remove the protocol hook
*/
dev_remove_pack ( & po - > prot_hook ) ;
po - > running = 0 ;
po - > num = 0 ;
__sock_put ( sk ) ;
}
packet_flush_mclist ( sk ) ;
# ifdef CONFIG_PACKET_MMAP
2009-05-19 09:11:22 +04:00
memset ( & req , 0 , sizeof ( req ) ) ;
if ( po - > rx_ring . pg_vec )
packet_set_ring ( sk , & req , 1 , 0 ) ;
if ( po - > tx_ring . pg_vec )
packet_set_ring ( sk , & req , 1 , 1 ) ;
2005-04-17 02:20:36 +04:00
# endif
/*
* Now the socket is dead . No more input will appear .
*/
sock_orphan ( sk ) ;
sock - > sk = NULL ;
/* Purge queues */
skb_queue_purge ( & sk - > sk_receive_queue ) ;
2007-11-11 08:38:48 +03:00
sk_refcnt_debug_release ( sk ) ;
2005-04-17 02:20:36 +04:00
sock_put ( sk ) ;
return 0 ;
}
/*
* Attach a packet hook .
*/
2006-11-08 11:26:29 +03:00
static int packet_do_bind ( struct sock * sk , struct net_device * dev , __be16 protocol )
2005-04-17 02:20:36 +04:00
{
struct packet_sock * po = pkt_sk ( sk ) ;
/*
* Detach an existing hook if present .
*/
lock_sock ( sk ) ;
spin_lock ( & po - > bind_lock ) ;
if ( po - > running ) {
__sock_put ( sk ) ;
po - > running = 0 ;
po - > num = 0 ;
spin_unlock ( & po - > bind_lock ) ;
dev_remove_pack ( & po - > prot_hook ) ;
spin_lock ( & po - > bind_lock ) ;
}
po - > num = protocol ;
po - > prot_hook . type = protocol ;
po - > prot_hook . dev = dev ;
po - > ifindex = dev ? dev - > ifindex : 0 ;
if ( protocol = = 0 )
goto out_unlock ;
2007-11-13 08:05:20 +03:00
if ( ! dev | | ( dev - > flags & IFF_UP ) ) {
2005-04-17 02:20:36 +04:00
dev_add_pack ( & po - > prot_hook ) ;
sock_hold ( sk ) ;
po - > running = 1 ;
2007-11-13 08:05:20 +03:00
} else {
sk - > sk_err = ENETDOWN ;
if ( ! sock_flag ( sk , SOCK_DEAD ) )
sk - > sk_error_report ( sk ) ;
2005-04-17 02:20:36 +04:00
}
out_unlock :
spin_unlock ( & po - > bind_lock ) ;
release_sock ( sk ) ;
return 0 ;
}
/*
* Bind a packet socket to a device
*/
2009-07-22 01:57:59 +04:00
static int packet_bind_spkt ( struct socket * sock , struct sockaddr * uaddr ,
int addr_len )
2005-04-17 02:20:36 +04:00
{
2009-07-22 01:57:59 +04:00
struct sock * sk = sock - > sk ;
2005-04-17 02:20:36 +04:00
char name [ 15 ] ;
struct net_device * dev ;
int err = - ENODEV ;
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
/*
* Check legality
*/
2007-02-09 17:25:10 +03:00
2006-01-24 03:28:02 +03:00
if ( addr_len ! = sizeof ( struct sockaddr ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2009-07-22 01:57:59 +04:00
strlcpy ( name , uaddr - > sa_data , sizeof ( name ) ) ;
2005-04-17 02:20:36 +04:00
2008-03-25 20:26:21 +03:00
dev = dev_get_by_name ( sock_net ( sk ) , name ) ;
2005-04-17 02:20:36 +04:00
if ( dev ) {
err = packet_do_bind ( sk , dev , pkt_sk ( sk ) - > num ) ;
dev_put ( dev ) ;
}
return err ;
}
static int packet_bind ( struct socket * sock , struct sockaddr * uaddr , int addr_len )
{
2009-07-22 01:57:59 +04:00
struct sockaddr_ll * sll = ( struct sockaddr_ll * ) uaddr ;
struct sock * sk = sock - > sk ;
2005-04-17 02:20:36 +04:00
struct net_device * dev = NULL ;
int err ;
/*
* Check legality
*/
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
if ( addr_len < sizeof ( struct sockaddr_ll ) )
return - EINVAL ;
if ( sll - > sll_family ! = AF_PACKET )
return - EINVAL ;
if ( sll - > sll_ifindex ) {
err = - ENODEV ;
2008-03-25 20:26:21 +03:00
dev = dev_get_by_index ( sock_net ( sk ) , sll - > sll_ifindex ) ;
2005-04-17 02:20:36 +04:00
if ( dev = = NULL )
goto out ;
}
err = packet_do_bind ( sk , dev , sll - > sll_protocol ? : pkt_sk ( sk ) - > num ) ;
if ( dev )
dev_put ( dev ) ;
out :
return err ;
}
static struct proto packet_proto = {
. name = " PACKET " ,
. owner = THIS_MODULE ,
. obj_size = sizeof ( struct packet_sock ) ,
} ;
/*
2007-02-09 17:25:10 +03:00
* Create a packet of type SOCK_PACKET .
2005-04-17 02:20:36 +04:00
*/
2007-10-09 10:24:22 +04:00
static int packet_create ( struct net * net , struct socket * sock , int protocol )
2005-04-17 02:20:36 +04:00
{
struct sock * sk ;
struct packet_sock * po ;
2006-11-08 11:26:29 +03:00
__be16 proto = ( __force __be16 ) protocol ; /* weird, but documented */
2005-04-17 02:20:36 +04:00
int err ;
if ( ! capable ( CAP_NET_RAW ) )
return - EPERM ;
2007-05-30 00:16:31 +04:00
if ( sock - > type ! = SOCK_DGRAM & & sock - > type ! = SOCK_RAW & &
sock - > type ! = SOCK_PACKET )
2005-04-17 02:20:36 +04:00
return - ESOCKTNOSUPPORT ;
sock - > state = SS_UNCONNECTED ;
err = - ENOBUFS ;
2007-11-01 10:39:31 +03:00
sk = sk_alloc ( net , PF_PACKET , GFP_KERNEL , & packet_proto ) ;
2005-04-17 02:20:36 +04:00
if ( sk = = NULL )
goto out ;
sock - > ops = & packet_ops ;
if ( sock - > type = = SOCK_PACKET )
sock - > ops = & packet_ops_spkt ;
2007-05-30 00:16:31 +04:00
2005-04-17 02:20:36 +04:00
sock_init_data ( sock , sk ) ;
po = pkt_sk ( sk ) ;
sk - > sk_family = PF_PACKET ;
2006-11-08 11:26:29 +03:00
po - > num = proto ;
2005-04-17 02:20:36 +04:00
sk - > sk_destruct = packet_sock_destruct ;
2007-11-11 08:38:48 +03:00
sk_refcnt_debug_inc ( sk ) ;
2005-04-17 02:20:36 +04:00
/*
* Attach a protocol block
*/
spin_lock_init ( & po - > bind_lock ) ;
2009-01-31 01:12:06 +03:00
mutex_init ( & po - > pg_vec_lock ) ;
2005-04-17 02:20:36 +04:00
po - > prot_hook . func = packet_rcv ;
2007-05-30 00:16:31 +04:00
2005-04-17 02:20:36 +04:00
if ( sock - > type = = SOCK_PACKET )
po - > prot_hook . func = packet_rcv_spkt ;
2007-05-30 00:16:31 +04:00
2005-04-17 02:20:36 +04:00
po - > prot_hook . af_packet_priv = sk ;
2006-11-08 11:26:29 +03:00
if ( proto ) {
po - > prot_hook . type = proto ;
2005-04-17 02:20:36 +04:00
dev_add_pack ( & po - > prot_hook ) ;
sock_hold ( sk ) ;
po - > running = 1 ;
}
2007-12-11 15:19:54 +03:00
write_lock_bh ( & net - > packet . sklist_lock ) ;
sk_add_node ( sk , & net - > packet . sklist ) ;
2008-11-20 01:25:35 +03:00
sock_prot_inuse_add ( net , & packet_proto , 1 ) ;
2008-11-24 11:09:29 +03:00
write_unlock_bh ( & net - > packet . sklist_lock ) ;
2009-07-22 01:57:59 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
out :
return err ;
}
/*
* Pull a packet from our receive queue and hand it to the user .
* If necessary we block .
*/
static int packet_recvmsg ( struct kiocb * iocb , struct socket * sock ,
struct msghdr * msg , size_t len , int flags )
{
struct sock * sk = sock - > sk ;
struct sk_buff * skb ;
int copied , err ;
2005-09-21 11:11:37 +04:00
struct sockaddr_ll * sll ;
2005-04-17 02:20:36 +04:00
err = - EINVAL ;
if ( flags & ~ ( MSG_PEEK | MSG_DONTWAIT | MSG_TRUNC | MSG_CMSG_COMPAT ) )
goto out ;
#if 0
/* What error should we return now? EUNATTACH? */
if ( pkt_sk ( sk ) - > ifindex < 0 )
return - ENODEV ;
# endif
/*
* Call the generic datagram receiver . This handles all sorts
* of horrible races and re - entrancy so we can forget about it
* in the protocol layers .
*
* Now it will return ENETDOWN , if device have just gone down ,
* but then it will block .
*/
2009-07-22 01:57:59 +04:00
skb = skb_recv_datagram ( sk , flags , flags & MSG_DONTWAIT , & err ) ;
2005-04-17 02:20:36 +04:00
/*
2007-02-09 17:25:10 +03:00
* An error occurred so return it . Because skb_recv_datagram ( )
2005-04-17 02:20:36 +04:00
* handles the blocking we don ' t see and worry about blocking
* retries .
*/
2006-01-24 03:28:02 +03:00
if ( skb = = NULL )
2005-04-17 02:20:36 +04:00
goto out ;
2005-09-21 11:11:37 +04:00
/*
* If the address length field is there to be filled in , we fill
* it in now .
*/
2007-02-05 10:33:10 +03:00
sll = & PACKET_SKB_CB ( skb ) - > sa . ll ;
2005-09-21 11:11:37 +04:00
if ( sock - > type = = SOCK_PACKET )
msg - > msg_namelen = sizeof ( struct sockaddr_pkt ) ;
else
msg - > msg_namelen = sll - > sll_halen + offsetof ( struct sockaddr_ll , sll_addr ) ;
2005-04-17 02:20:36 +04:00
/*
* You lose any data beyond the buffer you gave . If it worries a
* user program they can ask the device for its MTU anyway .
*/
copied = skb - > len ;
2009-07-22 01:57:59 +04:00
if ( copied > len ) {
copied = len ;
msg - > msg_flags | = MSG_TRUNC ;
2005-04-17 02:20:36 +04:00
}
err = skb_copy_datagram_iovec ( skb , 0 , msg - > msg_iov , copied ) ;
if ( err )
goto out_free ;
sock_recv_timestamp ( msg , sk , skb ) ;
if ( msg - > msg_name )
2007-02-05 10:33:10 +03:00
memcpy ( msg - > msg_name , & PACKET_SKB_CB ( skb ) - > sa ,
msg - > msg_namelen ) ;
2005-04-17 02:20:36 +04:00
2007-02-05 10:31:32 +03:00
if ( pkt_sk ( sk ) - > auxdata ) {
2007-02-05 10:33:10 +03:00
struct tpacket_auxdata aux ;
aux . tp_status = TP_STATUS_USER ;
if ( skb - > ip_summed = = CHECKSUM_PARTIAL )
aux . tp_status | = TP_STATUS_CSUMNOTREADY ;
aux . tp_len = PACKET_SKB_CB ( skb ) - > origlen ;
aux . tp_snaplen = skb - > len ;
aux . tp_mac = 0 ;
2007-03-11 04:16:10 +03:00
aux . tp_net = skb_network_offset ( skb ) ;
2008-07-15 09:50:39 +04:00
aux . tp_vlan_tci = skb - > vlan_tci ;
2007-02-05 10:33:10 +03:00
put_cmsg ( msg , SOL_PACKET , PACKET_AUXDATA , sizeof ( aux ) , & aux ) ;
2007-02-05 10:31:32 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Free or return the buffer as appropriate . Again this
* hides all the races and re - entrancy issues from us .
*/
err = ( flags & MSG_TRUNC ) ? skb - > len : copied ;
out_free :
skb_free_datagram ( sk , skb ) ;
out :
return err ;
}
static int packet_getname_spkt ( struct socket * sock , struct sockaddr * uaddr ,
int * uaddr_len , int peer )
{
struct net_device * dev ;
struct sock * sk = sock - > sk ;
if ( peer )
return - EOPNOTSUPP ;
uaddr - > sa_family = AF_PACKET ;
2008-03-25 20:26:21 +03:00
dev = dev_get_by_index ( sock_net ( sk ) , pkt_sk ( sk ) - > ifindex ) ;
2005-04-17 02:20:36 +04:00
if ( dev ) {
strlcpy ( uaddr - > sa_data , dev - > name , 15 ) ;
dev_put ( dev ) ;
} else
memset ( uaddr - > sa_data , 0 , 14 ) ;
* uaddr_len = sizeof ( * uaddr ) ;
return 0 ;
}
static int packet_getname ( struct socket * sock , struct sockaddr * uaddr ,
int * uaddr_len , int peer )
{
struct net_device * dev ;
struct sock * sk = sock - > sk ;
struct packet_sock * po = pkt_sk ( sk ) ;
2009-07-22 01:57:59 +04:00
struct sockaddr_ll * sll = ( struct sockaddr_ll * ) uaddr ;
2005-04-17 02:20:36 +04:00
if ( peer )
return - EOPNOTSUPP ;
sll - > sll_family = AF_PACKET ;
sll - > sll_ifindex = po - > ifindex ;
sll - > sll_protocol = po - > num ;
2008-03-25 20:26:21 +03:00
dev = dev_get_by_index ( sock_net ( sk ) , po - > ifindex ) ;
2005-04-17 02:20:36 +04:00
if ( dev ) {
sll - > sll_hatype = dev - > type ;
sll - > sll_halen = dev - > addr_len ;
memcpy ( sll - > sll_addr , dev - > dev_addr , dev - > addr_len ) ;
dev_put ( dev ) ;
} else {
sll - > sll_hatype = 0 ; /* Bad: we have no ARPHRD_UNSPEC */
sll - > sll_halen = 0 ;
}
2005-09-21 11:11:37 +04:00
* uaddr_len = offsetof ( struct sockaddr_ll , sll_addr ) + sll - > sll_halen ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2008-07-15 07:49:46 +04:00
static int packet_dev_mc ( struct net_device * dev , struct packet_mclist * i ,
int what )
2005-04-17 02:20:36 +04:00
{
switch ( i - > type ) {
case PACKET_MR_MULTICAST :
if ( what > 0 )
2009-05-19 22:27:17 +04:00
return dev_mc_add ( dev , i - > addr , i - > alen , 0 ) ;
2005-04-17 02:20:36 +04:00
else
2009-05-19 22:27:17 +04:00
return dev_mc_delete ( dev , i - > addr , i - > alen , 0 ) ;
2005-04-17 02:20:36 +04:00
break ;
case PACKET_MR_PROMISC :
2008-07-15 07:49:46 +04:00
return dev_set_promiscuity ( dev , what ) ;
2005-04-17 02:20:36 +04:00
break ;
case PACKET_MR_ALLMULTI :
2008-07-15 07:49:46 +04:00
return dev_set_allmulti ( dev , what ) ;
2005-04-17 02:20:36 +04:00
break ;
2009-05-19 22:27:17 +04:00
case PACKET_MR_UNICAST :
if ( what > 0 )
2009-05-23 03:22:17 +04:00
return dev_unicast_add ( dev , i - > addr ) ;
2009-05-19 22:27:17 +04:00
else
2009-05-23 03:22:17 +04:00
return dev_unicast_delete ( dev , i - > addr ) ;
2009-05-19 22:27:17 +04:00
break ;
2009-07-22 01:57:59 +04:00
default :
break ;
2005-04-17 02:20:36 +04:00
}
2008-07-15 07:49:46 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static void packet_dev_mclist ( struct net_device * dev , struct packet_mclist * i , int what )
{
2009-07-22 01:57:59 +04:00
for ( ; i ; i = i - > next ) {
2005-04-17 02:20:36 +04:00
if ( i - > ifindex = = dev - > ifindex )
packet_dev_mc ( dev , i , what ) ;
}
}
2005-09-21 11:11:37 +04:00
static int packet_mc_add ( struct sock * sk , struct packet_mreq_max * mreq )
2005-04-17 02:20:36 +04:00
{
struct packet_sock * po = pkt_sk ( sk ) ;
struct packet_mclist * ml , * i ;
struct net_device * dev ;
int err ;
rtnl_lock ( ) ;
err = - ENODEV ;
2008-03-25 20:26:21 +03:00
dev = __dev_get_by_index ( sock_net ( sk ) , mreq - > mr_ifindex ) ;
2005-04-17 02:20:36 +04:00
if ( ! dev )
goto done ;
err = - EINVAL ;
if ( mreq - > mr_alen > dev - > addr_len )
goto done ;
err = - ENOBUFS ;
2006-01-12 02:56:43 +03:00
i = kmalloc ( sizeof ( * i ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( i = = NULL )
goto done ;
err = 0 ;
for ( ml = po - > mclist ; ml ; ml = ml - > next ) {
if ( ml - > ifindex = = mreq - > mr_ifindex & &
ml - > type = = mreq - > mr_type & &
ml - > alen = = mreq - > mr_alen & &
memcmp ( ml - > addr , mreq - > mr_address , ml - > alen ) = = 0 ) {
ml - > count + + ;
/* Free the new element ... */
kfree ( i ) ;
goto done ;
}
}
i - > type = mreq - > mr_type ;
i - > ifindex = mreq - > mr_ifindex ;
i - > alen = mreq - > mr_alen ;
memcpy ( i - > addr , mreq - > mr_address , i - > alen ) ;
i - > count = 1 ;
i - > next = po - > mclist ;
po - > mclist = i ;
2008-07-15 07:49:46 +04:00
err = packet_dev_mc ( dev , i , 1 ) ;
if ( err ) {
po - > mclist = i - > next ;
kfree ( i ) ;
}
2005-04-17 02:20:36 +04:00
done :
rtnl_unlock ( ) ;
return err ;
}
2005-09-21 11:11:37 +04:00
static int packet_mc_drop ( struct sock * sk , struct packet_mreq_max * mreq )
2005-04-17 02:20:36 +04:00
{
struct packet_mclist * ml , * * mlp ;
rtnl_lock ( ) ;
for ( mlp = & pkt_sk ( sk ) - > mclist ; ( ml = * mlp ) ! = NULL ; mlp = & ml - > next ) {
if ( ml - > ifindex = = mreq - > mr_ifindex & &
ml - > type = = mreq - > mr_type & &
ml - > alen = = mreq - > mr_alen & &
memcmp ( ml - > addr , mreq - > mr_address , ml - > alen ) = = 0 ) {
if ( - - ml - > count = = 0 ) {
struct net_device * dev ;
* mlp = ml - > next ;
2008-03-25 20:26:21 +03:00
dev = dev_get_by_index ( sock_net ( sk ) , ml - > ifindex ) ;
2005-04-17 02:20:36 +04:00
if ( dev ) {
packet_dev_mc ( dev , ml , - 1 ) ;
dev_put ( dev ) ;
}
kfree ( ml ) ;
}
rtnl_unlock ( ) ;
return 0 ;
}
}
rtnl_unlock ( ) ;
return - EADDRNOTAVAIL ;
}
static void packet_flush_mclist ( struct sock * sk )
{
struct packet_sock * po = pkt_sk ( sk ) ;
struct packet_mclist * ml ;
if ( ! po - > mclist )
return ;
rtnl_lock ( ) ;
while ( ( ml = po - > mclist ) ! = NULL ) {
struct net_device * dev ;
po - > mclist = ml - > next ;
2009-07-22 01:57:59 +04:00
dev = dev_get_by_index ( sock_net ( sk ) , ml - > ifindex ) ;
if ( dev ! = NULL ) {
2005-04-17 02:20:36 +04:00
packet_dev_mc ( dev , ml , - 1 ) ;
dev_put ( dev ) ;
}
kfree ( ml ) ;
}
rtnl_unlock ( ) ;
}
static int
2009-10-01 03:12:20 +04:00
packet_setsockopt ( struct socket * sock , int level , int optname , char __user * optval , unsigned int optlen )
2005-04-17 02:20:36 +04:00
{
struct sock * sk = sock - > sk ;
2007-02-05 10:31:32 +03:00
struct packet_sock * po = pkt_sk ( sk ) ;
2005-04-17 02:20:36 +04:00
int ret ;
if ( level ! = SOL_PACKET )
return - ENOPROTOOPT ;
2009-05-19 09:11:22 +04:00
switch ( optname ) {
2007-02-09 17:25:10 +03:00
case PACKET_ADD_MEMBERSHIP :
2005-04-17 02:20:36 +04:00
case PACKET_DROP_MEMBERSHIP :
{
2005-09-21 11:11:37 +04:00
struct packet_mreq_max mreq ;
int len = optlen ;
memset ( & mreq , 0 , sizeof ( mreq ) ) ;
if ( len < sizeof ( struct packet_mreq ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2005-09-21 11:11:37 +04:00
if ( len > sizeof ( mreq ) )
len = sizeof ( mreq ) ;
2009-07-22 01:57:59 +04:00
if ( copy_from_user ( & mreq , optval , len ) )
2005-04-17 02:20:36 +04:00
return - EFAULT ;
2005-09-21 11:11:37 +04:00
if ( len < ( mreq . mr_alen + offsetof ( struct packet_mreq , mr_address ) ) )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
if ( optname = = PACKET_ADD_MEMBERSHIP )
ret = packet_mc_add ( sk , & mreq ) ;
else
ret = packet_mc_drop ( sk , & mreq ) ;
return ret ;
}
2007-05-30 00:12:50 +04:00
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_PACKET_MMAP
case PACKET_RX_RING :
2009-05-19 09:11:22 +04:00
case PACKET_TX_RING :
2005-04-17 02:20:36 +04:00
{
struct tpacket_req req ;
2009-07-22 01:57:59 +04:00
if ( optlen < sizeof ( req ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2009-07-22 01:57:59 +04:00
if ( copy_from_user ( & req , optval , sizeof ( req ) ) )
2005-04-17 02:20:36 +04:00
return - EFAULT ;
2009-05-19 09:11:22 +04:00
return packet_set_ring ( sk , & req , 0 , optname = = PACKET_TX_RING ) ;
2005-04-17 02:20:36 +04:00
}
case PACKET_COPY_THRESH :
{
int val ;
2009-07-22 01:57:59 +04:00
if ( optlen ! = sizeof ( val ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2009-07-22 01:57:59 +04:00
if ( copy_from_user ( & val , optval , sizeof ( val ) ) )
2005-04-17 02:20:36 +04:00
return - EFAULT ;
pkt_sk ( sk ) - > copy_thresh = val ;
return 0 ;
}
2008-07-15 09:50:15 +04:00
case PACKET_VERSION :
{
int val ;
if ( optlen ! = sizeof ( val ) )
return - EINVAL ;
2009-05-19 09:11:22 +04:00
if ( po - > rx_ring . pg_vec | | po - > tx_ring . pg_vec )
2008-07-15 09:50:15 +04:00
return - EBUSY ;
if ( copy_from_user ( & val , optval , sizeof ( val ) ) )
return - EFAULT ;
switch ( val ) {
case TPACKET_V1 :
case TPACKET_V2 :
po - > tp_version = val ;
return 0 ;
default :
return - EINVAL ;
}
}
2008-07-19 05:05:19 +04:00
case PACKET_RESERVE :
{
unsigned int val ;
if ( optlen ! = sizeof ( val ) )
return - EINVAL ;
2009-05-19 09:11:22 +04:00
if ( po - > rx_ring . pg_vec | | po - > tx_ring . pg_vec )
2008-07-19 05:05:19 +04:00
return - EBUSY ;
if ( copy_from_user ( & val , optval , sizeof ( val ) ) )
return - EFAULT ;
po - > tp_reserve = val ;
return 0 ;
}
2009-05-19 09:11:22 +04:00
case PACKET_LOSS :
{
unsigned int val ;
if ( optlen ! = sizeof ( val ) )
return - EINVAL ;
if ( po - > rx_ring . pg_vec | | po - > tx_ring . pg_vec )
return - EBUSY ;
if ( copy_from_user ( & val , optval , sizeof ( val ) ) )
return - EFAULT ;
po - > tp_loss = ! ! val ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
# endif
2007-02-05 10:31:32 +03:00
case PACKET_AUXDATA :
{
int val ;
if ( optlen < sizeof ( val ) )
return - EINVAL ;
if ( copy_from_user ( & val , optval , sizeof ( val ) ) )
return - EFAULT ;
po - > auxdata = ! ! val ;
return 0 ;
}
2007-04-21 03:05:39 +04:00
case PACKET_ORIGDEV :
{
int val ;
if ( optlen < sizeof ( val ) )
return - EINVAL ;
if ( copy_from_user ( & val , optval , sizeof ( val ) ) )
return - EFAULT ;
po - > origdev = ! ! val ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
default :
return - ENOPROTOOPT ;
}
}
static int packet_getsockopt ( struct socket * sock , int level , int optname ,
char __user * optval , int __user * optlen )
{
int len ;
2007-02-05 10:31:32 +03:00
int val ;
2005-04-17 02:20:36 +04:00
struct sock * sk = sock - > sk ;
struct packet_sock * po = pkt_sk ( sk ) ;
2007-02-05 10:31:32 +03:00
void * data ;
struct tpacket_stats st ;
2005-04-17 02:20:36 +04:00
if ( level ! = SOL_PACKET )
return - ENOPROTOOPT ;
2006-01-24 03:28:02 +03:00
if ( get_user ( len , optlen ) )
return - EFAULT ;
2005-04-17 02:20:36 +04:00
if ( len < 0 )
return - EINVAL ;
2007-02-09 17:25:10 +03:00
2009-05-19 09:11:22 +04:00
switch ( optname ) {
2005-04-17 02:20:36 +04:00
case PACKET_STATISTICS :
if ( len > sizeof ( struct tpacket_stats ) )
len = sizeof ( struct tpacket_stats ) ;
spin_lock_bh ( & sk - > sk_receive_queue . lock ) ;
st = po - > stats ;
memset ( & po - > stats , 0 , sizeof ( st ) ) ;
spin_unlock_bh ( & sk - > sk_receive_queue . lock ) ;
st . tp_packets + = st . tp_drops ;
2007-02-05 10:31:32 +03:00
data = & st ;
break ;
case PACKET_AUXDATA :
if ( len > sizeof ( int ) )
len = sizeof ( int ) ;
val = po - > auxdata ;
2007-04-21 03:05:39 +04:00
data = & val ;
break ;
case PACKET_ORIGDEV :
if ( len > sizeof ( int ) )
len = sizeof ( int ) ;
val = po - > origdev ;
2007-02-05 10:31:32 +03:00
data = & val ;
2005-04-17 02:20:36 +04:00
break ;
2008-07-15 09:50:15 +04:00
# ifdef CONFIG_PACKET_MMAP
case PACKET_VERSION :
if ( len > sizeof ( int ) )
len = sizeof ( int ) ;
val = po - > tp_version ;
data = & val ;
break ;
case PACKET_HDRLEN :
if ( len > sizeof ( int ) )
len = sizeof ( int ) ;
if ( copy_from_user ( & val , optval , len ) )
return - EFAULT ;
switch ( val ) {
case TPACKET_V1 :
val = sizeof ( struct tpacket_hdr ) ;
break ;
case TPACKET_V2 :
val = sizeof ( struct tpacket2_hdr ) ;
break ;
default :
return - EINVAL ;
}
data = & val ;
break ;
2008-07-19 05:05:19 +04:00
case PACKET_RESERVE :
if ( len > sizeof ( unsigned int ) )
len = sizeof ( unsigned int ) ;
val = po - > tp_reserve ;
data = & val ;
break ;
2009-05-19 09:11:22 +04:00
case PACKET_LOSS :
if ( len > sizeof ( unsigned int ) )
len = sizeof ( unsigned int ) ;
val = po - > tp_loss ;
data = & val ;
break ;
2008-07-15 09:50:15 +04:00
# endif
2005-04-17 02:20:36 +04:00
default :
return - ENOPROTOOPT ;
}
2006-01-24 03:28:02 +03:00
if ( put_user ( len , optlen ) )
return - EFAULT ;
2007-02-05 10:31:32 +03:00
if ( copy_to_user ( optval , data , len ) )
return - EFAULT ;
2006-01-24 03:28:02 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static int packet_notifier ( struct notifier_block * this , unsigned long msg , void * data )
{
struct sock * sk ;
struct hlist_node * node ;
2007-02-21 10:19:54 +03:00
struct net_device * dev = data ;
2008-03-25 15:47:49 +03:00
struct net * net = dev_net ( dev ) ;
2005-04-17 02:20:36 +04:00
2007-12-11 15:19:54 +03:00
read_lock ( & net - > packet . sklist_lock ) ;
sk_for_each ( sk , node , & net - > packet . sklist ) {
2005-04-17 02:20:36 +04:00
struct packet_sock * po = pkt_sk ( sk ) ;
switch ( msg ) {
case NETDEV_UNREGISTER :
if ( po - > mclist )
packet_dev_mclist ( dev , po - > mclist , - 1 ) ;
2007-05-30 00:12:50 +04:00
/* fallthrough */
2005-04-17 02:20:36 +04:00
case NETDEV_DOWN :
if ( dev - > ifindex = = po - > ifindex ) {
spin_lock ( & po - > bind_lock ) ;
if ( po - > running ) {
__dev_remove_pack ( & po - > prot_hook ) ;
__sock_put ( sk ) ;
po - > running = 0 ;
sk - > sk_err = ENETDOWN ;
if ( ! sock_flag ( sk , SOCK_DEAD ) )
sk - > sk_error_report ( sk ) ;
}
if ( msg = = NETDEV_UNREGISTER ) {
po - > ifindex = - 1 ;
po - > prot_hook . dev = NULL ;
}
spin_unlock ( & po - > bind_lock ) ;
}
break ;
case NETDEV_UP :
spin_lock ( & po - > bind_lock ) ;
if ( dev - > ifindex = = po - > ifindex & & po - > num & &
! po - > running ) {
dev_add_pack ( & po - > prot_hook ) ;
sock_hold ( sk ) ;
po - > running = 1 ;
}
spin_unlock ( & po - > bind_lock ) ;
break ;
}
}
2007-12-11 15:19:54 +03:00
read_unlock ( & net - > packet . sklist_lock ) ;
2005-04-17 02:20:36 +04:00
return NOTIFY_DONE ;
}
static int packet_ioctl ( struct socket * sock , unsigned int cmd ,
unsigned long arg )
{
struct sock * sk = sock - > sk ;
2009-05-19 09:11:22 +04:00
switch ( cmd ) {
2009-07-22 01:57:59 +04:00
case SIOCOUTQ :
{
int amount = sk_wmem_alloc_get ( sk ) ;
2009-06-18 06:05:41 +04:00
2009-07-22 01:57:59 +04:00
return put_user ( amount , ( int __user * ) arg ) ;
}
case SIOCINQ :
{
struct sk_buff * skb ;
int amount = 0 ;
spin_lock_bh ( & sk - > sk_receive_queue . lock ) ;
skb = skb_peek ( & sk - > sk_receive_queue ) ;
if ( skb )
amount = skb - > len ;
spin_unlock_bh ( & sk - > sk_receive_queue . lock ) ;
return put_user ( amount , ( int __user * ) arg ) ;
}
case SIOCGSTAMP :
return sock_get_timestamp ( sk , ( struct timeval __user * ) arg ) ;
case SIOCGSTAMPNS :
return sock_get_timestampns ( sk , ( struct timespec __user * ) arg ) ;
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_INET
2009-07-22 01:57:59 +04:00
case SIOCADDRT :
case SIOCDELRT :
case SIOCDARP :
case SIOCGARP :
case SIOCSARP :
case SIOCGIFADDR :
case SIOCSIFADDR :
case SIOCGIFBRDADDR :
case SIOCSIFBRDADDR :
case SIOCGIFNETMASK :
case SIOCSIFNETMASK :
case SIOCGIFDSTADDR :
case SIOCSIFDSTADDR :
case SIOCSIFFLAGS :
if ( ! net_eq ( sock_net ( sk ) , & init_net ) )
return - ENOIOCTLCMD ;
return inet_dgram_ops . ioctl ( sock , cmd , arg ) ;
2005-04-17 02:20:36 +04:00
# endif
2009-07-22 01:57:59 +04:00
default :
return - ENOIOCTLCMD ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
# ifndef CONFIG_PACKET_MMAP
# define packet_mmap sock_no_mmap
# define packet_poll datagram_poll
# else
2009-07-22 01:57:59 +04:00
static unsigned int packet_poll ( struct file * file , struct socket * sock ,
2005-04-17 02:20:36 +04:00
poll_table * wait )
{
struct sock * sk = sock - > sk ;
struct packet_sock * po = pkt_sk ( sk ) ;
unsigned int mask = datagram_poll ( file , sock , wait ) ;
spin_lock_bh ( & sk - > sk_receive_queue . lock ) ;
2009-05-19 09:11:22 +04:00
if ( po - > rx_ring . pg_vec ) {
if ( ! packet_previous_frame ( po , & po - > rx_ring , TP_STATUS_KERNEL ) )
2005-04-17 02:20:36 +04:00
mask | = POLLIN | POLLRDNORM ;
}
spin_unlock_bh ( & sk - > sk_receive_queue . lock ) ;
2009-05-19 09:11:22 +04:00
spin_lock_bh ( & sk - > sk_write_queue . lock ) ;
if ( po - > tx_ring . pg_vec ) {
if ( packet_current_frame ( po , & po - > tx_ring , TP_STATUS_AVAILABLE ) )
mask | = POLLOUT | POLLWRNORM ;
}
spin_unlock_bh ( & sk - > sk_write_queue . lock ) ;
2005-04-17 02:20:36 +04:00
return mask ;
}
/* Dirty? Well, I still did not learn better way to account
* for user mmaps .
*/
static void packet_mm_open ( struct vm_area_struct * vma )
{
struct file * file = vma - > vm_file ;
2009-07-22 01:57:59 +04:00
struct socket * sock = file - > private_data ;
2005-04-17 02:20:36 +04:00
struct sock * sk = sock - > sk ;
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
if ( sk )
atomic_inc ( & pkt_sk ( sk ) - > mapped ) ;
}
static void packet_mm_close ( struct vm_area_struct * vma )
{
struct file * file = vma - > vm_file ;
2009-07-22 01:57:59 +04:00
struct socket * sock = file - > private_data ;
2005-04-17 02:20:36 +04:00
struct sock * sk = sock - > sk ;
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
if ( sk )
atomic_dec ( & pkt_sk ( sk ) - > mapped ) ;
}
static struct vm_operations_struct packet_mmap_ops = {
2009-07-22 01:57:59 +04:00
. open = packet_mm_open ,
. close = packet_mm_close ,
2005-04-17 02:20:36 +04:00
} ;
2005-12-07 03:38:35 +03:00
static void free_pg_vec ( char * * pg_vec , unsigned int order , unsigned int len )
2005-04-17 02:20:36 +04:00
{
int i ;
2005-12-07 03:38:35 +03:00
for ( i = 0 ; i < len ; i + + ) {
if ( likely ( pg_vec [ i ] ) )
free_pages ( ( unsigned long ) pg_vec [ i ] , order ) ;
2005-04-17 02:20:36 +04:00
}
kfree ( pg_vec ) ;
}
2005-12-07 03:38:35 +03:00
static inline char * alloc_one_pg_vec_page ( unsigned long order )
{
2009-04-15 14:39:52 +04:00
gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN ;
return ( char * ) __get_free_pages ( gfp_flags , order ) ;
2005-12-07 03:38:35 +03:00
}
static char * * alloc_pg_vec ( struct tpacket_req * req , int order )
{
unsigned int block_nr = req - > tp_block_nr ;
char * * pg_vec ;
int i ;
pg_vec = kzalloc ( block_nr * sizeof ( char * ) , GFP_KERNEL ) ;
if ( unlikely ( ! pg_vec ) )
goto out ;
for ( i = 0 ; i < block_nr ; i + + ) {
pg_vec [ i ] = alloc_one_pg_vec_page ( order ) ;
if ( unlikely ( ! pg_vec [ i ] ) )
goto out_free_pgvec ;
}
out :
return pg_vec ;
out_free_pgvec :
free_pg_vec ( pg_vec , order , block_nr ) ;
pg_vec = NULL ;
goto out ;
}
2005-04-17 02:20:36 +04:00
2009-05-19 09:11:22 +04:00
static int packet_set_ring ( struct sock * sk , struct tpacket_req * req ,
int closing , int tx_ring )
2005-04-17 02:20:36 +04:00
{
char * * pg_vec = NULL ;
struct packet_sock * po = pkt_sk ( sk ) ;
2006-11-08 11:26:29 +03:00
int was_running , order = 0 ;
2009-05-19 09:11:22 +04:00
struct packet_ring_buffer * rb ;
struct sk_buff_head * rb_queue ;
2006-11-08 11:26:29 +03:00
__be16 num ;
2009-05-19 09:11:22 +04:00
int err ;
2007-02-09 17:25:10 +03:00
2009-05-19 09:11:22 +04:00
rb = tx_ring ? & po - > tx_ring : & po - > rx_ring ;
rb_queue = tx_ring ? & sk - > sk_write_queue : & sk - > sk_receive_queue ;
2005-04-17 02:20:36 +04:00
2009-05-19 09:11:22 +04:00
err = - EBUSY ;
if ( ! closing ) {
if ( atomic_read ( & po - > mapped ) )
goto out ;
if ( atomic_read ( & rb - > pending ) )
goto out ;
}
2005-04-17 02:20:36 +04:00
2009-05-19 09:11:22 +04:00
if ( req - > tp_block_nr ) {
/* Sanity tests and some calculations */
err = - EBUSY ;
if ( unlikely ( rb - > pg_vec ) )
goto out ;
2005-04-17 02:20:36 +04:00
2008-07-15 09:50:15 +04:00
switch ( po - > tp_version ) {
case TPACKET_V1 :
po - > tp_hdrlen = TPACKET_HDRLEN ;
break ;
case TPACKET_V2 :
po - > tp_hdrlen = TPACKET2_HDRLEN ;
break ;
}
2009-05-19 09:11:22 +04:00
err = - EINVAL ;
2005-12-07 03:38:35 +03:00
if ( unlikely ( ( int ) req - > tp_block_size < = 0 ) )
2009-05-19 09:11:22 +04:00
goto out ;
2005-12-07 03:38:35 +03:00
if ( unlikely ( req - > tp_block_size & ( PAGE_SIZE - 1 ) ) )
2009-05-19 09:11:22 +04:00
goto out ;
2008-07-19 05:05:19 +04:00
if ( unlikely ( req - > tp_frame_size < po - > tp_hdrlen +
2009-05-19 09:11:22 +04:00
po - > tp_reserve ) )
goto out ;
2005-12-07 03:38:35 +03:00
if ( unlikely ( req - > tp_frame_size & ( TPACKET_ALIGNMENT - 1 ) ) )
2009-05-19 09:11:22 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
2009-05-19 09:11:22 +04:00
rb - > frames_per_block = req - > tp_block_size / req - > tp_frame_size ;
if ( unlikely ( rb - > frames_per_block < = 0 ) )
goto out ;
if ( unlikely ( ( rb - > frames_per_block * req - > tp_block_nr ) ! =
req - > tp_frame_nr ) )
goto out ;
2005-04-17 02:20:36 +04:00
err = - ENOMEM ;
2005-12-07 03:38:35 +03:00
order = get_order ( req - > tp_block_size ) ;
pg_vec = alloc_pg_vec ( req , order ) ;
if ( unlikely ( ! pg_vec ) )
2005-04-17 02:20:36 +04:00
goto out ;
2009-05-19 09:11:22 +04:00
}
/* Done */
else {
err = - EINVAL ;
2005-12-07 03:38:35 +03:00
if ( unlikely ( req - > tp_frame_nr ) )
2009-05-19 09:11:22 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
}
lock_sock ( sk ) ;
/* Detach socket from network */
spin_lock ( & po - > bind_lock ) ;
was_running = po - > running ;
num = po - > num ;
if ( was_running ) {
__dev_remove_pack ( & po - > prot_hook ) ;
po - > num = 0 ;
po - > running = 0 ;
__sock_put ( sk ) ;
}
spin_unlock ( & po - > bind_lock ) ;
2007-02-09 17:25:10 +03:00
2005-04-17 02:20:36 +04:00
synchronize_net ( ) ;
err = - EBUSY ;
2009-01-31 01:12:06 +03:00
mutex_lock ( & po - > pg_vec_lock ) ;
2005-04-17 02:20:36 +04:00
if ( closing | | atomic_read ( & po - > mapped ) = = 0 ) {
err = 0 ;
# define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
2009-05-19 09:11:22 +04:00
spin_lock_bh ( & rb_queue - > lock ) ;
pg_vec = XC ( rb - > pg_vec , pg_vec ) ;
rb - > frame_max = ( req - > tp_frame_nr - 1 ) ;
rb - > head = 0 ;
rb - > frame_size = req - > tp_frame_size ;
spin_unlock_bh ( & rb_queue - > lock ) ;
order = XC ( rb - > pg_vec_order , order ) ;
req - > tp_block_nr = XC ( rb - > pg_vec_len , req - > tp_block_nr ) ;
rb - > pg_vec_pages = req - > tp_block_size / PAGE_SIZE ;
po - > prot_hook . func = ( po - > rx_ring . pg_vec ) ?
tpacket_rcv : packet_rcv ;
skb_queue_purge ( rb_queue ) ;
2005-04-17 02:20:36 +04:00
# undef XC
if ( atomic_read ( & po - > mapped ) )
2009-07-22 01:57:59 +04:00
pr_err ( " packet_mmap: vma is busy: %d \n " ,
atomic_read ( & po - > mapped ) ) ;
2005-04-17 02:20:36 +04:00
}
2009-01-31 01:12:06 +03:00
mutex_unlock ( & po - > pg_vec_lock ) ;
2005-04-17 02:20:36 +04:00
spin_lock ( & po - > bind_lock ) ;
if ( was_running & & ! po - > running ) {
sock_hold ( sk ) ;
po - > running = 1 ;
po - > num = num ;
dev_add_pack ( & po - > prot_hook ) ;
}
spin_unlock ( & po - > bind_lock ) ;
release_sock ( sk ) ;
if ( pg_vec )
free_pg_vec ( pg_vec , order , req - > tp_block_nr ) ;
out :
return err ;
}
2009-05-19 09:11:22 +04:00
static int packet_mmap ( struct file * file , struct socket * sock ,
struct vm_area_struct * vma )
2005-04-17 02:20:36 +04:00
{
struct sock * sk = sock - > sk ;
struct packet_sock * po = pkt_sk ( sk ) ;
2009-05-19 09:11:22 +04:00
unsigned long size , expected_size ;
struct packet_ring_buffer * rb ;
2005-04-17 02:20:36 +04:00
unsigned long start ;
int err = - EINVAL ;
int i ;
if ( vma - > vm_pgoff )
return - EINVAL ;
2009-01-31 01:12:06 +03:00
mutex_lock ( & po - > pg_vec_lock ) ;
2009-05-19 09:11:22 +04:00
expected_size = 0 ;
for ( rb = & po - > rx_ring ; rb < = & po - > tx_ring ; rb + + ) {
if ( rb - > pg_vec ) {
expected_size + = rb - > pg_vec_len
* rb - > pg_vec_pages
* PAGE_SIZE ;
}
}
if ( expected_size = = 0 )
2005-04-17 02:20:36 +04:00
goto out ;
2009-05-19 09:11:22 +04:00
size = vma - > vm_end - vma - > vm_start ;
if ( size ! = expected_size )
2005-04-17 02:20:36 +04:00
goto out ;
start = vma - > vm_start ;
2009-05-19 09:11:22 +04:00
for ( rb = & po - > rx_ring ; rb < = & po - > tx_ring ; rb + + ) {
if ( rb - > pg_vec = = NULL )
continue ;
for ( i = 0 ; i < rb - > pg_vec_len ; i + + ) {
struct page * page = virt_to_page ( rb - > pg_vec [ i ] ) ;
int pg_num ;
for ( pg_num = 0 ; pg_num < rb - > pg_vec_pages ;
2009-07-22 01:57:59 +04:00
pg_num + + , page + + ) {
2009-05-19 09:11:22 +04:00
err = vm_insert_page ( vma , start , page ) ;
if ( unlikely ( err ) )
goto out ;
start + = PAGE_SIZE ;
}
2005-12-07 03:38:35 +03:00
}
2005-04-17 02:20:36 +04:00
}
2009-05-19 09:11:22 +04:00
2005-12-07 03:38:35 +03:00
atomic_inc ( & po - > mapped ) ;
2005-04-17 02:20:36 +04:00
vma - > vm_ops = & packet_mmap_ops ;
err = 0 ;
out :
2009-01-31 01:12:06 +03:00
mutex_unlock ( & po - > pg_vec_lock ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
# endif
2005-12-22 23:49:22 +03:00
static const struct proto_ops packet_ops_spkt = {
2005-04-17 02:20:36 +04:00
. family = PF_PACKET ,
. owner = THIS_MODULE ,
. release = packet_release ,
. bind = packet_bind_spkt ,
. connect = sock_no_connect ,
. socketpair = sock_no_socketpair ,
. accept = sock_no_accept ,
. getname = packet_getname_spkt ,
. poll = datagram_poll ,
. ioctl = packet_ioctl ,
. listen = sock_no_listen ,
. shutdown = sock_no_shutdown ,
. setsockopt = sock_no_setsockopt ,
. getsockopt = sock_no_getsockopt ,
. sendmsg = packet_sendmsg_spkt ,
. recvmsg = packet_recvmsg ,
. mmap = sock_no_mmap ,
. sendpage = sock_no_sendpage ,
} ;
2005-12-22 23:49:22 +03:00
static const struct proto_ops packet_ops = {
2005-04-17 02:20:36 +04:00
. family = PF_PACKET ,
. owner = THIS_MODULE ,
. release = packet_release ,
. bind = packet_bind ,
. connect = sock_no_connect ,
. socketpair = sock_no_socketpair ,
. accept = sock_no_accept ,
2007-02-09 17:25:10 +03:00
. getname = packet_getname ,
2005-04-17 02:20:36 +04:00
. poll = packet_poll ,
. ioctl = packet_ioctl ,
. listen = sock_no_listen ,
. shutdown = sock_no_shutdown ,
. setsockopt = packet_setsockopt ,
. getsockopt = packet_getsockopt ,
. sendmsg = packet_sendmsg ,
. recvmsg = packet_recvmsg ,
. mmap = packet_mmap ,
. sendpage = sock_no_sendpage ,
} ;
static struct net_proto_family packet_family_ops = {
. family = PF_PACKET ,
. create = packet_create ,
. owner = THIS_MODULE ,
} ;
static struct notifier_block packet_netdev_notifier = {
2009-07-22 01:57:59 +04:00
. notifier_call = packet_notifier ,
2005-04-17 02:20:36 +04:00
} ;
# ifdef CONFIG_PROC_FS
2007-11-20 09:28:35 +03:00
static inline struct sock * packet_seq_idx ( struct net * net , loff_t off )
2005-04-17 02:20:36 +04:00
{
struct sock * s ;
struct hlist_node * node ;
2007-12-11 15:19:54 +03:00
sk_for_each ( s , node , & net - > packet . sklist ) {
2005-04-17 02:20:36 +04:00
if ( ! off - - )
return s ;
}
return NULL ;
}
static void * packet_seq_start ( struct seq_file * seq , loff_t * pos )
2008-01-08 09:39:57 +03:00
__acquires ( seq_file_net ( seq ) - > packet . sklist_lock )
2005-04-17 02:20:36 +04:00
{
2007-11-20 09:31:54 +03:00
struct net * net = seq_file_net ( seq ) ;
2007-12-11 15:19:54 +03:00
read_lock ( & net - > packet . sklist_lock ) ;
2007-11-20 09:28:35 +03:00
return * pos ? packet_seq_idx ( net , * pos - 1 ) : SEQ_START_TOKEN ;
2005-04-17 02:20:36 +04:00
}
static void * packet_seq_next ( struct seq_file * seq , void * v , loff_t * pos )
{
2007-12-17 01:04:02 +03:00
struct net * net = seq_file_net ( seq ) ;
2005-04-17 02:20:36 +04:00
+ + * pos ;
2007-02-09 17:25:10 +03:00
return ( v = = SEQ_START_TOKEN )
2007-12-11 15:19:54 +03:00
? sk_head ( & net - > packet . sklist )
2009-07-22 01:57:59 +04:00
: sk_next ( ( struct sock * ) v ) ;
2005-04-17 02:20:36 +04:00
}
static void packet_seq_stop ( struct seq_file * seq , void * v )
2008-01-08 09:39:57 +03:00
__releases ( seq_file_net ( seq ) - > packet . sklist_lock )
2005-04-17 02:20:36 +04:00
{
2007-12-17 01:04:02 +03:00
struct net * net = seq_file_net ( seq ) ;
2007-12-11 15:19:54 +03:00
read_unlock ( & net - > packet . sklist_lock ) ;
2005-04-17 02:20:36 +04:00
}
2007-02-09 17:25:10 +03:00
static int packet_seq_show ( struct seq_file * seq , void * v )
2005-04-17 02:20:36 +04:00
{
if ( v = = SEQ_START_TOKEN )
seq_puts ( seq , " sk RefCnt Type Proto Iface R Rmem User Inode \n " ) ;
else {
struct sock * s = v ;
const struct packet_sock * po = pkt_sk ( s ) ;
seq_printf ( seq ,
" %p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu \n " ,
s ,
atomic_read ( & s - > sk_refcnt ) ,
s - > sk_type ,
ntohs ( po - > num ) ,
po - > ifindex ,
po - > running ,
atomic_read ( & s - > sk_rmem_alloc ) ,
sock_i_uid ( s ) ,
2009-07-22 01:57:59 +04:00
sock_i_ino ( s ) ) ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
2007-07-11 10:07:31 +04:00
static const struct seq_operations packet_seq_ops = {
2005-04-17 02:20:36 +04:00
. start = packet_seq_start ,
. next = packet_seq_next ,
. stop = packet_seq_stop ,
. show = packet_seq_show ,
} ;
static int packet_seq_open ( struct inode * inode , struct file * file )
{
2007-11-20 09:31:54 +03:00
return seq_open_net ( inode , file , & packet_seq_ops ,
sizeof ( struct seq_net_private ) ) ;
2005-04-17 02:20:36 +04:00
}
2007-02-12 11:55:36 +03:00
static const struct file_operations packet_seq_fops = {
2005-04-17 02:20:36 +04:00
. owner = THIS_MODULE ,
. open = packet_seq_open ,
. read = seq_read ,
. llseek = seq_lseek ,
2007-11-20 09:31:54 +03:00
. release = seq_release_net ,
2005-04-17 02:20:36 +04:00
} ;
# endif
2007-11-20 09:28:35 +03:00
static int packet_net_init ( struct net * net )
{
2007-12-11 15:19:54 +03:00
rwlock_init ( & net - > packet . sklist_lock ) ;
INIT_HLIST_HEAD ( & net - > packet . sklist ) ;
2007-11-20 09:28:35 +03:00
if ( ! proc_net_fops_create ( net , " packet " , 0 , & packet_seq_fops ) )
return - ENOMEM ;
return 0 ;
}
static void packet_net_exit ( struct net * net )
{
proc_net_remove ( net , " packet " ) ;
}
static struct pernet_operations packet_net_ops = {
. init = packet_net_init ,
. exit = packet_net_exit ,
} ;
2005-04-17 02:20:36 +04:00
static void __exit packet_exit ( void )
{
unregister_netdevice_notifier ( & packet_netdev_notifier ) ;
2007-11-20 09:28:35 +03:00
unregister_pernet_subsys ( & packet_net_ops ) ;
2005-04-17 02:20:36 +04:00
sock_unregister ( PF_PACKET ) ;
proto_unregister ( & packet_proto ) ;
}
static int __init packet_init ( void )
{
int rc = proto_register ( & packet_proto , 0 ) ;
if ( rc ! = 0 )
goto out ;
sock_register ( & packet_family_ops ) ;
2007-11-20 09:28:35 +03:00
register_pernet_subsys ( & packet_net_ops ) ;
2005-04-17 02:20:36 +04:00
register_netdevice_notifier ( & packet_netdev_notifier ) ;
out :
return rc ;
}
module_init ( packet_init ) ;
module_exit ( packet_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_ALIAS_NETPROTO ( PF_PACKET ) ;