2005-04-17 02:20:36 +04:00
# ifndef __NET_SCHED_GENERIC_H
# define __NET_SCHED_GENERIC_H
# include <linux/netdevice.h>
# include <linux/types.h>
# include <linux/rcupdate.h>
# include <linux/module.h>
# include <linux/pkt_sched.h>
# include <linux/pkt_cls.h>
# include <net/gen_stats.h>
2007-03-22 21:55:50 +03:00
# include <net/rtnetlink.h>
2005-04-17 02:20:36 +04:00
struct Qdisc_ops ;
struct qdisc_walker ;
struct tcf_walker ;
struct module ;
struct qdisc_rate_table
{
struct tc_ratespec rate ;
u32 data [ 256 ] ;
struct qdisc_rate_table * next ;
int refcnt ;
} ;
2008-07-16 11:56:32 +04:00
enum qdisc_state_t
{
__QDISC_STATE_RUNNING ,
2008-07-16 13:15:04 +04:00
__QDISC_STATE_SCHED ,
2008-08-18 08:51:03 +04:00
__QDISC_STATE_DEACTIVATED ,
2008-07-16 11:56:32 +04:00
} ;
2008-07-20 11:08:47 +04:00
struct qdisc_size_table {
struct list_head list ;
struct tc_sizespec szopts ;
int refcnt ;
u16 data [ ] ;
} ;
2005-04-17 02:20:36 +04:00
struct Qdisc
{
int ( * enqueue ) ( struct sk_buff * skb , struct Qdisc * dev ) ;
struct sk_buff * ( * dequeue ) ( struct Qdisc * dev ) ;
unsigned flags ;
2009-02-01 12:12:42 +03:00
# define TCQ_F_BUILTIN 1
# define TCQ_F_THROTTLED 2
# define TCQ_F_INGRESS 4
2009-08-06 05:44:21 +04:00
# define TCQ_F_CAN_BYPASS 8
2009-02-01 12:12:42 +03:00
# define TCQ_F_WARN_NONWC (1 << 16)
2005-04-17 02:20:36 +04:00
int padded ;
struct Qdisc_ops * ops ;
2008-07-20 11:08:47 +04:00
struct qdisc_size_table * stab ;
net: reorder struct Qdisc for better SMP performance
dev_queue_xmit() needs to dirty fields "state", "q", "bstats" and "qstats"
On x86_64 arch, they currently span three cache lines, involving more
cache line ping pongs than necessary, making longer holding of queue spinlock.
We can reduce this to one cache line, by grouping all read-mostly fields
at the beginning of structure. (Or should I say, all highly modified fields
at the end :) )
Before patch :
offsetof(struct Qdisc, state)=0x38
offsetof(struct Qdisc, q)=0x48
offsetof(struct Qdisc, bstats)=0x80
offsetof(struct Qdisc, qstats)=0x90
sizeof(struct Qdisc)=0xc8
After patch :
offsetof(struct Qdisc, state)=0x80
offsetof(struct Qdisc, q)=0x88
offsetof(struct Qdisc, bstats)=0xa0
offsetof(struct Qdisc, qstats)=0xac
sizeof(struct Qdisc)=0xc0
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-03-20 11:33:32 +03:00
struct list_head list ;
2005-04-17 02:20:36 +04:00
u32 handle ;
u32 parent ;
atomic_t refcnt ;
struct gnet_stats_rate_est rate_est ;
int ( * reshape_fail ) ( struct sk_buff * skb ,
struct Qdisc * q ) ;
2008-07-19 07:54:17 +04:00
void * u32_node ;
2005-04-17 02:20:36 +04:00
/* This field is deprecated, but it is still used by CBQ
* and it will live until better solution will be invented .
*/
struct Qdisc * __parent ;
net: reorder struct Qdisc for better SMP performance
dev_queue_xmit() needs to dirty fields "state", "q", "bstats" and "qstats"
On x86_64 arch, they currently span three cache lines, involving more
cache line ping pongs than necessary, making longer holding of queue spinlock.
We can reduce this to one cache line, by grouping all read-mostly fields
at the beginning of structure. (Or should I say, all highly modified fields
at the end :) )
Before patch :
offsetof(struct Qdisc, state)=0x38
offsetof(struct Qdisc, q)=0x48
offsetof(struct Qdisc, bstats)=0x80
offsetof(struct Qdisc, qstats)=0x90
sizeof(struct Qdisc)=0xc8
After patch :
offsetof(struct Qdisc, state)=0x80
offsetof(struct Qdisc, q)=0x88
offsetof(struct Qdisc, bstats)=0xa0
offsetof(struct Qdisc, qstats)=0xac
sizeof(struct Qdisc)=0xc0
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-03-20 11:33:32 +03:00
struct netdev_queue * dev_queue ;
struct Qdisc * next_sched ;
struct sk_buff * gso_skb ;
/*
* For performance sake on SMP , we put highly modified fields at the end
*/
unsigned long state ;
struct sk_buff_head q ;
2009-08-16 13:36:49 +04:00
struct gnet_stats_basic_packed bstats ;
net: reorder struct Qdisc for better SMP performance
dev_queue_xmit() needs to dirty fields "state", "q", "bstats" and "qstats"
On x86_64 arch, they currently span three cache lines, involving more
cache line ping pongs than necessary, making longer holding of queue spinlock.
We can reduce this to one cache line, by grouping all read-mostly fields
at the beginning of structure. (Or should I say, all highly modified fields
at the end :) )
Before patch :
offsetof(struct Qdisc, state)=0x38
offsetof(struct Qdisc, q)=0x48
offsetof(struct Qdisc, bstats)=0x80
offsetof(struct Qdisc, qstats)=0x90
sizeof(struct Qdisc)=0xc8
After patch :
offsetof(struct Qdisc, state)=0x80
offsetof(struct Qdisc, q)=0x88
offsetof(struct Qdisc, bstats)=0xa0
offsetof(struct Qdisc, qstats)=0xac
sizeof(struct Qdisc)=0xc0
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-03-20 11:33:32 +03:00
struct gnet_stats_queue qstats ;
2005-04-17 02:20:36 +04:00
} ;
struct Qdisc_class_ops
{
/* Child qdisc manipulation */
int ( * graft ) ( struct Qdisc * , unsigned long cl ,
struct Qdisc * , struct Qdisc * * ) ;
struct Qdisc * ( * leaf ) ( struct Qdisc * , unsigned long cl ) ;
2006-11-30 04:35:48 +03:00
void ( * qlen_notify ) ( struct Qdisc * , unsigned long ) ;
2005-04-17 02:20:36 +04:00
/* Class manipulation routines */
unsigned long ( * get ) ( struct Qdisc * , u32 classid ) ;
void ( * put ) ( struct Qdisc * , unsigned long ) ;
int ( * change ) ( struct Qdisc * , u32 , u32 ,
2008-01-23 09:11:17 +03:00
struct nlattr * * , unsigned long * ) ;
2005-04-17 02:20:36 +04:00
int ( * delete ) ( struct Qdisc * , unsigned long ) ;
void ( * walk ) ( struct Qdisc * , struct qdisc_walker * arg ) ;
/* Filter manipulation */
struct tcf_proto * * ( * tcf_chain ) ( struct Qdisc * , unsigned long ) ;
unsigned long ( * bind_tcf ) ( struct Qdisc * , unsigned long ,
u32 classid ) ;
void ( * unbind_tcf ) ( struct Qdisc * , unsigned long ) ;
/* rtnetlink specific */
int ( * dump ) ( struct Qdisc * , unsigned long ,
struct sk_buff * skb , struct tcmsg * ) ;
int ( * dump_stats ) ( struct Qdisc * , unsigned long ,
struct gnet_dump * ) ;
} ;
struct Qdisc_ops
{
struct Qdisc_ops * next ;
2007-11-14 12:44:41 +03:00
const struct Qdisc_class_ops * cl_ops ;
2005-04-17 02:20:36 +04:00
char id [ IFNAMSIZ ] ;
int priv_size ;
int ( * enqueue ) ( struct sk_buff * , struct Qdisc * ) ;
struct sk_buff * ( * dequeue ) ( struct Qdisc * ) ;
2008-10-31 10:43:45 +03:00
struct sk_buff * ( * peek ) ( struct Qdisc * ) ;
2005-04-17 02:20:36 +04:00
unsigned int ( * drop ) ( struct Qdisc * ) ;
2008-01-23 09:11:17 +03:00
int ( * init ) ( struct Qdisc * , struct nlattr * arg ) ;
2005-04-17 02:20:36 +04:00
void ( * reset ) ( struct Qdisc * ) ;
void ( * destroy ) ( struct Qdisc * ) ;
2008-01-23 09:11:17 +03:00
int ( * change ) ( struct Qdisc * , struct nlattr * arg ) ;
2005-04-17 02:20:36 +04:00
int ( * dump ) ( struct Qdisc * , struct sk_buff * ) ;
int ( * dump_stats ) ( struct Qdisc * , struct gnet_dump * ) ;
struct module * owner ;
} ;
struct tcf_result
{
unsigned long class ;
u32 classid ;
} ;
struct tcf_proto_ops
{
struct tcf_proto_ops * next ;
char kind [ IFNAMSIZ ] ;
int ( * classify ) ( struct sk_buff * , struct tcf_proto * ,
struct tcf_result * ) ;
int ( * init ) ( struct tcf_proto * ) ;
void ( * destroy ) ( struct tcf_proto * ) ;
unsigned long ( * get ) ( struct tcf_proto * , u32 handle ) ;
void ( * put ) ( struct tcf_proto * , unsigned long ) ;
int ( * change ) ( struct tcf_proto * , unsigned long ,
2008-01-23 09:11:33 +03:00
u32 handle , struct nlattr * * ,
2005-04-17 02:20:36 +04:00
unsigned long * ) ;
int ( * delete ) ( struct tcf_proto * , unsigned long ) ;
void ( * walk ) ( struct tcf_proto * , struct tcf_walker * arg ) ;
/* rtnetlink specific */
int ( * dump ) ( struct tcf_proto * , unsigned long ,
struct sk_buff * skb , struct tcmsg * ) ;
struct module * owner ;
} ;
struct tcf_proto
{
/* Fast access part */
struct tcf_proto * next ;
void * root ;
int ( * classify ) ( struct sk_buff * , struct tcf_proto * ,
struct tcf_result * ) ;
2006-11-21 05:07:51 +03:00
__be16 protocol ;
2005-04-17 02:20:36 +04:00
/* All the rest */
u32 prio ;
u32 classid ;
struct Qdisc * q ;
void * data ;
struct tcf_proto_ops * ops ;
} ;
2008-07-20 11:08:47 +04:00
struct qdisc_skb_cb {
unsigned int pkt_len ;
char data [ ] ;
} ;
2009-08-06 05:44:21 +04:00
static inline int qdisc_qlen ( struct Qdisc * q )
{
return q - > q . qlen ;
}
2008-07-20 11:08:47 +04:00
static inline struct qdisc_skb_cb * qdisc_skb_cb ( struct sk_buff * skb )
{
return ( struct qdisc_skb_cb * ) skb - > cb ;
}
2008-07-17 11:53:03 +04:00
static inline spinlock_t * qdisc_lock ( struct Qdisc * qdisc )
{
return & qdisc - > q . lock ;
}
2008-07-16 12:42:40 +04:00
static inline struct Qdisc * qdisc_root ( struct Qdisc * qdisc )
{
return qdisc - > dev_queue - > qdisc ;
}
2008-08-21 16:11:14 +04:00
static inline struct Qdisc * qdisc_root_sleeping ( struct Qdisc * qdisc )
{
return qdisc - > dev_queue - > qdisc_sleeping ;
}
2008-08-03 10:27:37 +04:00
/* The qdisc root lock is a mechanism by which to top level
* of a qdisc tree can be locked from any qdisc node in the
* forest . This allows changing the configuration of some
* aspect of the qdisc tree while blocking out asynchronous
* qdisc access in the packet processing paths .
*
* It is only legal to do this when the root will not change
* on us . Otherwise we ' ll potentially lock the wrong qdisc
* root . This is enforced by holding the RTNL semaphore , which
* all users of this lock accessor must do .
*/
2008-07-16 12:42:40 +04:00
static inline spinlock_t * qdisc_root_lock ( struct Qdisc * qdisc )
{
struct Qdisc * root = qdisc_root ( qdisc ) ;
2008-08-03 10:27:37 +04:00
ASSERT_RTNL ( ) ;
2008-07-17 11:53:03 +04:00
return qdisc_lock ( root ) ;
2008-07-16 12:42:40 +04:00
}
2008-08-27 13:25:17 +04:00
static inline spinlock_t * qdisc_root_sleeping_lock ( struct Qdisc * qdisc )
{
struct Qdisc * root = qdisc_root_sleeping ( qdisc ) ;
ASSERT_RTNL ( ) ;
return qdisc_lock ( root ) ;
}
2008-07-09 04:06:30 +04:00
static inline struct net_device * qdisc_dev ( struct Qdisc * qdisc )
{
return qdisc - > dev_queue - > dev ;
}
2005-04-17 02:20:36 +04:00
2008-07-16 14:12:24 +04:00
static inline void sch_tree_lock ( struct Qdisc * q )
{
2008-08-27 13:27:10 +04:00
spin_lock_bh ( qdisc_root_sleeping_lock ( q ) ) ;
2008-07-16 14:12:24 +04:00
}
static inline void sch_tree_unlock ( struct Qdisc * q )
{
2008-08-27 13:27:10 +04:00
spin_unlock_bh ( qdisc_root_sleeping_lock ( q ) ) ;
2008-07-16 14:12:24 +04:00
}
# define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
# define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
2005-04-17 02:20:36 +04:00
2005-07-06 01:14:30 +04:00
extern struct Qdisc noop_qdisc ;
extern struct Qdisc_ops noop_qdisc_ops ;
2008-07-06 10:21:31 +04:00
struct Qdisc_class_common
{
u32 classid ;
struct hlist_node hnode ;
} ;
struct Qdisc_class_hash
{
struct hlist_head * hash ;
unsigned int hashsize ;
unsigned int hashmask ;
unsigned int hashelems ;
} ;
static inline unsigned int qdisc_class_hash ( u32 id , u32 mask )
{
id ^ = id > > 8 ;
id ^ = id > > 4 ;
return id & mask ;
}
static inline struct Qdisc_class_common *
qdisc_class_find ( struct Qdisc_class_hash * hash , u32 id )
{
struct Qdisc_class_common * cl ;
struct hlist_node * n ;
unsigned int h ;
h = qdisc_class_hash ( id , hash - > hashmask ) ;
hlist_for_each_entry ( cl , n , & hash - > hash [ h ] , hnode ) {
if ( cl - > classid = = id )
return cl ;
}
return NULL ;
}
extern int qdisc_class_hash_init ( struct Qdisc_class_hash * ) ;
extern void qdisc_class_hash_insert ( struct Qdisc_class_hash * , struct Qdisc_class_common * ) ;
extern void qdisc_class_hash_remove ( struct Qdisc_class_hash * , struct Qdisc_class_common * ) ;
extern void qdisc_class_hash_grow ( struct Qdisc * , struct Qdisc_class_hash * ) ;
extern void qdisc_class_hash_destroy ( struct Qdisc_class_hash * ) ;
2005-07-06 01:14:30 +04:00
extern void dev_init_scheduler ( struct net_device * dev ) ;
extern void dev_shutdown ( struct net_device * dev ) ;
extern void dev_activate ( struct net_device * dev ) ;
extern void dev_deactivate ( struct net_device * dev ) ;
2009-09-04 10:41:20 +04:00
extern struct Qdisc * dev_graft_qdisc ( struct netdev_queue * dev_queue ,
struct Qdisc * qdisc ) ;
2005-07-06 01:14:30 +04:00
extern void qdisc_reset ( struct Qdisc * qdisc ) ;
extern void qdisc_destroy ( struct Qdisc * qdisc ) ;
2006-11-30 04:35:48 +03:00
extern void qdisc_tree_decrease_qlen ( struct Qdisc * qdisc , unsigned int n ) ;
2008-07-09 04:06:30 +04:00
extern struct Qdisc * qdisc_alloc ( struct netdev_queue * dev_queue ,
2008-07-09 03:55:56 +04:00
struct Qdisc_ops * ops ) ;
2005-07-06 01:14:30 +04:00
extern struct Qdisc * qdisc_create_dflt ( struct net_device * dev ,
2008-07-09 03:55:56 +04:00
struct netdev_queue * dev_queue ,
2006-11-30 04:35:18 +03:00
struct Qdisc_ops * ops , u32 parentid ) ;
2008-07-20 11:08:47 +04:00
extern void qdisc_calculate_pkt_len ( struct sk_buff * skb ,
struct qdisc_size_table * stab ) ;
2007-03-23 21:29:43 +03:00
extern void tcf_destroy ( struct tcf_proto * tp ) ;
2008-07-02 06:52:38 +04:00
extern void tcf_destroy_chain ( struct tcf_proto * * fl ) ;
2005-04-17 02:20:36 +04:00
2008-07-09 09:59:10 +04:00
/* Reset all TX qdiscs of a device. */
static inline void qdisc_reset_all_tx ( struct net_device * dev )
{
2008-07-17 11:34:19 +04:00
unsigned int i ;
for ( i = 0 ; i < dev - > num_tx_queues ; i + + )
qdisc_reset ( netdev_get_tx_queue ( dev , i ) - > qdisc ) ;
2008-07-09 09:59:10 +04:00
}
2008-07-09 10:00:25 +04:00
/* Are all TX queues of the device empty? */
static inline bool qdisc_all_tx_empty ( const struct net_device * dev )
{
2008-07-17 11:34:19 +04:00
unsigned int i ;
for ( i = 0 ; i < dev - > num_tx_queues ; i + + ) {
struct netdev_queue * txq = netdev_get_tx_queue ( dev , i ) ;
const struct Qdisc * q = txq - > qdisc ;
2008-07-09 10:00:25 +04:00
2008-07-17 11:34:19 +04:00
if ( q - > q . qlen )
return false ;
}
return true ;
2008-07-09 10:00:25 +04:00
}
2008-07-09 10:01:06 +04:00
/* Are any of the TX qdiscs changing? */
static inline bool qdisc_tx_changing ( struct net_device * dev )
{
2008-07-17 11:34:19 +04:00
unsigned int i ;
for ( i = 0 ; i < dev - > num_tx_queues ; i + + ) {
struct netdev_queue * txq = netdev_get_tx_queue ( dev , i ) ;
if ( txq - > qdisc ! = txq - > qdisc_sleeping )
return true ;
}
return false ;
2008-07-09 10:01:06 +04:00
}
2008-07-17 11:34:19 +04:00
/* Is the device using the noop qdisc on all queues? */
2008-07-09 10:01:27 +04:00
static inline bool qdisc_tx_is_noop ( const struct net_device * dev )
{
2008-07-17 11:34:19 +04:00
unsigned int i ;
for ( i = 0 ; i < dev - > num_tx_queues ; i + + ) {
struct netdev_queue * txq = netdev_get_tx_queue ( dev , i ) ;
if ( txq - > qdisc ! = & noop_qdisc )
return false ;
}
return true ;
2008-07-09 10:01:27 +04:00
}
2008-07-20 11:08:27 +04:00
static inline unsigned int qdisc_pkt_len ( struct sk_buff * skb )
{
2008-07-20 11:08:47 +04:00
return qdisc_skb_cb ( skb ) - > pkt_len ;
2008-07-20 11:08:27 +04:00
}
2008-08-05 09:39:11 +04:00
/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
2008-08-05 09:31:03 +04:00
enum net_xmit_qdisc_t {
__NET_XMIT_STOLEN = 0x00010000 ,
2008-08-05 09:39:11 +04:00
__NET_XMIT_BYPASS = 0x00020000 ,
2008-08-05 09:31:03 +04:00
} ;
2008-08-05 09:39:11 +04:00
# ifdef CONFIG_NET_CLS_ACT
2008-08-05 09:31:03 +04:00
# define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
# else
# define net_xmit_drop_count(e) (1)
# endif
2008-07-20 11:08:04 +04:00
static inline int qdisc_enqueue ( struct sk_buff * skb , struct Qdisc * sch )
{
2008-07-21 05:13:01 +04:00
# ifdef CONFIG_NET_SCHED
2008-07-20 11:08:47 +04:00
if ( sch - > stab )
qdisc_calculate_pkt_len ( skb , sch - > stab ) ;
2008-07-21 05:13:01 +04:00
# endif
2008-07-20 11:08:04 +04:00
return sch - > enqueue ( skb , sch ) ;
}
static inline int qdisc_enqueue_root ( struct sk_buff * skb , struct Qdisc * sch )
{
2008-07-20 11:08:47 +04:00
qdisc_skb_cb ( skb ) - > pkt_len = skb - > len ;
2008-08-05 09:31:03 +04:00
return qdisc_enqueue ( skb , sch ) & NET_XMIT_MASK ;
2008-07-20 11:08:04 +04:00
}
2009-08-06 05:44:21 +04:00
static inline void __qdisc_update_bstats ( struct Qdisc * sch , unsigned int len )
{
sch - > bstats . bytes + = len ;
sch - > bstats . packets + + ;
}
2005-06-19 09:57:26 +04:00
static inline int __qdisc_enqueue_tail ( struct sk_buff * skb , struct Qdisc * sch ,
struct sk_buff_head * list )
{
__skb_queue_tail ( list , skb ) ;
2008-07-20 11:08:27 +04:00
sch - > qstats . backlog + = qdisc_pkt_len ( skb ) ;
2009-08-06 05:44:21 +04:00
__qdisc_update_bstats ( sch , qdisc_pkt_len ( skb ) ) ;
2005-06-19 09:57:26 +04:00
return NET_XMIT_SUCCESS ;
}
static inline int qdisc_enqueue_tail ( struct sk_buff * skb , struct Qdisc * sch )
{
return __qdisc_enqueue_tail ( skb , sch , & sch - > q ) ;
}
static inline struct sk_buff * __qdisc_dequeue_head ( struct Qdisc * sch ,
struct sk_buff_head * list )
{
struct sk_buff * skb = __skb_dequeue ( list ) ;
if ( likely ( skb ! = NULL ) )
2008-07-20 11:08:27 +04:00
sch - > qstats . backlog - = qdisc_pkt_len ( skb ) ;
2005-06-19 09:57:26 +04:00
return skb ;
}
static inline struct sk_buff * qdisc_dequeue_head ( struct Qdisc * sch )
{
return __qdisc_dequeue_head ( sch , & sch - > q ) ;
}
static inline struct sk_buff * __qdisc_dequeue_tail ( struct Qdisc * sch ,
struct sk_buff_head * list )
{
struct sk_buff * skb = __skb_dequeue_tail ( list ) ;
if ( likely ( skb ! = NULL ) )
2008-07-20 11:08:27 +04:00
sch - > qstats . backlog - = qdisc_pkt_len ( skb ) ;
2005-06-19 09:57:26 +04:00
return skb ;
}
static inline struct sk_buff * qdisc_dequeue_tail ( struct Qdisc * sch )
{
return __qdisc_dequeue_tail ( sch , & sch - > q ) ;
}
2008-10-31 10:44:18 +03:00
static inline struct sk_buff * qdisc_peek_head ( struct Qdisc * sch )
{
return skb_peek ( & sch - > q ) ;
}
2008-10-31 10:47:01 +03:00
/* generic pseudo peek method for non-work-conserving qdisc */
static inline struct sk_buff * qdisc_peek_dequeued ( struct Qdisc * sch )
{
/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
2008-11-06 03:02:34 +03:00
if ( ! sch - > gso_skb ) {
2008-10-31 10:47:01 +03:00
sch - > gso_skb = sch - > dequeue ( sch ) ;
2008-11-06 03:02:34 +03:00
if ( sch - > gso_skb )
/* it's still part of the queue */
sch - > q . qlen + + ;
}
2008-10-31 10:47:01 +03:00
return sch - > gso_skb ;
}
/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
static inline struct sk_buff * qdisc_dequeue_peeked ( struct Qdisc * sch )
{
struct sk_buff * skb = sch - > gso_skb ;
2008-11-06 03:02:34 +03:00
if ( skb ) {
2008-10-31 10:47:01 +03:00
sch - > gso_skb = NULL ;
2008-11-06 03:02:34 +03:00
sch - > q . qlen - - ;
} else {
2008-10-31 10:47:01 +03:00
skb = sch - > dequeue ( sch ) ;
2008-11-06 03:02:34 +03:00
}
2008-10-31 10:47:01 +03:00
return skb ;
}
2005-06-19 09:57:26 +04:00
static inline void __qdisc_reset_queue ( struct Qdisc * sch ,
struct sk_buff_head * list )
{
/*
* We do not know the backlog in bytes of this list , it
* is up to the caller to correct it
*/
2008-07-17 15:03:43 +04:00
__skb_queue_purge ( list ) ;
2005-06-19 09:57:26 +04:00
}
static inline void qdisc_reset_queue ( struct Qdisc * sch )
{
__qdisc_reset_queue ( sch , & sch - > q ) ;
sch - > qstats . backlog = 0 ;
}
static inline unsigned int __qdisc_queue_drop ( struct Qdisc * sch ,
struct sk_buff_head * list )
{
struct sk_buff * skb = __qdisc_dequeue_tail ( sch , list ) ;
if ( likely ( skb ! = NULL ) ) {
2008-07-20 11:08:27 +04:00
unsigned int len = qdisc_pkt_len ( skb ) ;
2005-06-19 09:57:26 +04:00
kfree_skb ( skb ) ;
return len ;
}
return 0 ;
}
static inline unsigned int qdisc_queue_drop ( struct Qdisc * sch )
{
return __qdisc_queue_drop ( sch , & sch - > q ) ;
}
static inline int qdisc_drop ( struct sk_buff * skb , struct Qdisc * sch )
{
kfree_skb ( skb ) ;
sch - > qstats . drops + + ;
return NET_XMIT_DROP ;
}
static inline int qdisc_reshape_fail ( struct sk_buff * skb , struct Qdisc * sch )
{
sch - > qstats . drops + + ;
2007-07-15 11:03:05 +04:00
# ifdef CONFIG_NET_CLS_ACT
2005-06-19 09:57:26 +04:00
if ( sch - > reshape_fail = = NULL | | sch - > reshape_fail ( skb , sch ) )
goto drop ;
return NET_XMIT_SUCCESS ;
drop :
# endif
kfree_skb ( skb ) ;
return NET_XMIT_DROP ;
}
2007-09-12 18:35:24 +04:00
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
long it will take to send a packet given its size .
*/
static inline u32 qdisc_l2t ( struct qdisc_rate_table * rtab , unsigned int pktlen )
{
2007-09-12 18:36:28 +04:00
int slot = pktlen + rtab - > rate . cell_align + rtab - > rate . overhead ;
if ( slot < 0 )
slot = 0 ;
2007-09-12 18:35:24 +04:00
slot > > = rtab - > rate . cell_log ;
if ( slot > 255 )
return ( rtab - > data [ 255 ] * ( slot > > 8 ) + rtab - > data [ slot & 0xFF ] ) ;
return rtab - > data [ slot ] ;
}
2007-10-26 13:47:23 +04:00
# ifdef CONFIG_NET_CLS_ACT
static inline struct sk_buff * skb_act_clone ( struct sk_buff * skb , gfp_t gfp_mask )
{
struct sk_buff * n = skb_clone ( skb , gfp_mask ) ;
if ( n ) {
n - > tc_verd = SET_TC_VERD ( n - > tc_verd , 0 ) ;
n - > tc_verd = CLR_TC_OK2MUNGE ( n - > tc_verd ) ;
n - > tc_verd = CLR_TC_MUNGED ( n - > tc_verd ) ;
}
return n ;
}
# endif
2005-04-17 02:20:36 +04:00
# endif