2005-04-17 02:20:36 +04:00
# ifndef __NET_SCHED_GENERIC_H
# define __NET_SCHED_GENERIC_H
# include <linux/netdevice.h>
# include <linux/types.h>
# include <linux/rcupdate.h>
# include <linux/module.h>
# include <linux/pkt_sched.h>
# include <linux/pkt_cls.h>
# include <net/gen_stats.h>
2007-03-22 21:55:50 +03:00
# include <net/rtnetlink.h>
2005-04-17 02:20:36 +04:00
struct Qdisc_ops ;
struct qdisc_walker ;
struct tcf_walker ;
struct module ;
struct qdisc_rate_table
{
struct tc_ratespec rate ;
u32 data [ 256 ] ;
struct qdisc_rate_table * next ;
int refcnt ;
} ;
2008-07-16 11:56:32 +04:00
enum qdisc_state_t
{
__QDISC_STATE_RUNNING ,
} ;
2005-04-17 02:20:36 +04:00
struct Qdisc
{
int ( * enqueue ) ( struct sk_buff * skb , struct Qdisc * dev ) ;
struct sk_buff * ( * dequeue ) ( struct Qdisc * dev ) ;
unsigned flags ;
# define TCQ_F_BUILTIN 1
# define TCQ_F_THROTTLED 2
# define TCQ_F_INGRESS 4
int padded ;
struct Qdisc_ops * ops ;
u32 handle ;
u32 parent ;
atomic_t refcnt ;
2008-07-16 11:56:32 +04:00
unsigned long state ;
2008-07-16 07:14:35 +04:00
struct sk_buff * gso_skb ;
2005-04-17 02:20:36 +04:00
struct sk_buff_head q ;
2008-07-09 03:55:56 +04:00
struct netdev_queue * dev_queue ;
2005-04-17 02:20:36 +04:00
struct list_head list ;
struct gnet_stats_basic bstats ;
struct gnet_stats_queue qstats ;
struct gnet_stats_rate_est rate_est ;
struct rcu_head q_rcu ;
int ( * reshape_fail ) ( struct sk_buff * skb ,
struct Qdisc * q ) ;
/* This field is deprecated, but it is still used by CBQ
* and it will live until better solution will be invented .
*/
struct Qdisc * __parent ;
} ;
struct Qdisc_class_ops
{
/* Child qdisc manipulation */
int ( * graft ) ( struct Qdisc * , unsigned long cl ,
struct Qdisc * , struct Qdisc * * ) ;
struct Qdisc * ( * leaf ) ( struct Qdisc * , unsigned long cl ) ;
2006-11-30 04:35:48 +03:00
void ( * qlen_notify ) ( struct Qdisc * , unsigned long ) ;
2005-04-17 02:20:36 +04:00
/* Class manipulation routines */
unsigned long ( * get ) ( struct Qdisc * , u32 classid ) ;
void ( * put ) ( struct Qdisc * , unsigned long ) ;
int ( * change ) ( struct Qdisc * , u32 , u32 ,
2008-01-23 09:11:17 +03:00
struct nlattr * * , unsigned long * ) ;
2005-04-17 02:20:36 +04:00
int ( * delete ) ( struct Qdisc * , unsigned long ) ;
void ( * walk ) ( struct Qdisc * , struct qdisc_walker * arg ) ;
/* Filter manipulation */
struct tcf_proto * * ( * tcf_chain ) ( struct Qdisc * , unsigned long ) ;
unsigned long ( * bind_tcf ) ( struct Qdisc * , unsigned long ,
u32 classid ) ;
void ( * unbind_tcf ) ( struct Qdisc * , unsigned long ) ;
/* rtnetlink specific */
int ( * dump ) ( struct Qdisc * , unsigned long ,
struct sk_buff * skb , struct tcmsg * ) ;
int ( * dump_stats ) ( struct Qdisc * , unsigned long ,
struct gnet_dump * ) ;
} ;
struct Qdisc_ops
{
struct Qdisc_ops * next ;
2007-11-14 12:44:41 +03:00
const struct Qdisc_class_ops * cl_ops ;
2005-04-17 02:20:36 +04:00
char id [ IFNAMSIZ ] ;
int priv_size ;
int ( * enqueue ) ( struct sk_buff * , struct Qdisc * ) ;
struct sk_buff * ( * dequeue ) ( struct Qdisc * ) ;
int ( * requeue ) ( struct sk_buff * , struct Qdisc * ) ;
unsigned int ( * drop ) ( struct Qdisc * ) ;
2008-01-23 09:11:17 +03:00
int ( * init ) ( struct Qdisc * , struct nlattr * arg ) ;
2005-04-17 02:20:36 +04:00
void ( * reset ) ( struct Qdisc * ) ;
void ( * destroy ) ( struct Qdisc * ) ;
2008-01-23 09:11:17 +03:00
int ( * change ) ( struct Qdisc * , struct nlattr * arg ) ;
2005-04-17 02:20:36 +04:00
int ( * dump ) ( struct Qdisc * , struct sk_buff * ) ;
int ( * dump_stats ) ( struct Qdisc * , struct gnet_dump * ) ;
struct module * owner ;
} ;
struct tcf_result
{
unsigned long class ;
u32 classid ;
} ;
struct tcf_proto_ops
{
struct tcf_proto_ops * next ;
char kind [ IFNAMSIZ ] ;
int ( * classify ) ( struct sk_buff * , struct tcf_proto * ,
struct tcf_result * ) ;
int ( * init ) ( struct tcf_proto * ) ;
void ( * destroy ) ( struct tcf_proto * ) ;
unsigned long ( * get ) ( struct tcf_proto * , u32 handle ) ;
void ( * put ) ( struct tcf_proto * , unsigned long ) ;
int ( * change ) ( struct tcf_proto * , unsigned long ,
2008-01-23 09:11:33 +03:00
u32 handle , struct nlattr * * ,
2005-04-17 02:20:36 +04:00
unsigned long * ) ;
int ( * delete ) ( struct tcf_proto * , unsigned long ) ;
void ( * walk ) ( struct tcf_proto * , struct tcf_walker * arg ) ;
/* rtnetlink specific */
int ( * dump ) ( struct tcf_proto * , unsigned long ,
struct sk_buff * skb , struct tcmsg * ) ;
struct module * owner ;
} ;
struct tcf_proto
{
/* Fast access part */
struct tcf_proto * next ;
void * root ;
int ( * classify ) ( struct sk_buff * , struct tcf_proto * ,
struct tcf_result * ) ;
2006-11-21 05:07:51 +03:00
__be16 protocol ;
2005-04-17 02:20:36 +04:00
/* All the rest */
u32 prio ;
u32 classid ;
struct Qdisc * q ;
void * data ;
struct tcf_proto_ops * ops ;
} ;
2008-07-16 12:42:40 +04:00
static inline struct Qdisc * qdisc_root ( struct Qdisc * qdisc )
{
return qdisc - > dev_queue - > qdisc ;
}
static inline spinlock_t * qdisc_root_lock ( struct Qdisc * qdisc )
{
struct Qdisc * root = qdisc_root ( qdisc ) ;
return & root - > dev_queue - > lock ;
}
2008-07-09 04:06:30 +04:00
static inline struct net_device * qdisc_dev ( struct Qdisc * qdisc )
{
return qdisc - > dev_queue - > dev ;
}
2005-04-17 02:20:36 +04:00
extern void qdisc_lock_tree ( struct net_device * dev ) ;
extern void qdisc_unlock_tree ( struct net_device * dev ) ;
2008-07-09 04:06:30 +04:00
# define sch_tree_lock(q) qdisc_lock_tree(qdisc_dev(q))
# define sch_tree_unlock(q) qdisc_unlock_tree(qdisc_dev(q))
# define tcf_tree_lock(tp) qdisc_lock_tree(qdisc_dev((tp)->q))
# define tcf_tree_unlock(tp) qdisc_unlock_tree(qdisc_dev((tp)->q))
2005-04-17 02:20:36 +04:00
2005-07-06 01:14:30 +04:00
extern struct Qdisc noop_qdisc ;
extern struct Qdisc_ops noop_qdisc_ops ;
2008-07-06 10:21:31 +04:00
struct Qdisc_class_common
{
u32 classid ;
struct hlist_node hnode ;
} ;
struct Qdisc_class_hash
{
struct hlist_head * hash ;
unsigned int hashsize ;
unsigned int hashmask ;
unsigned int hashelems ;
} ;
static inline unsigned int qdisc_class_hash ( u32 id , u32 mask )
{
id ^ = id > > 8 ;
id ^ = id > > 4 ;
return id & mask ;
}
static inline struct Qdisc_class_common *
qdisc_class_find ( struct Qdisc_class_hash * hash , u32 id )
{
struct Qdisc_class_common * cl ;
struct hlist_node * n ;
unsigned int h ;
h = qdisc_class_hash ( id , hash - > hashmask ) ;
hlist_for_each_entry ( cl , n , & hash - > hash [ h ] , hnode ) {
if ( cl - > classid = = id )
return cl ;
}
return NULL ;
}
extern int qdisc_class_hash_init ( struct Qdisc_class_hash * ) ;
extern void qdisc_class_hash_insert ( struct Qdisc_class_hash * , struct Qdisc_class_common * ) ;
extern void qdisc_class_hash_remove ( struct Qdisc_class_hash * , struct Qdisc_class_common * ) ;
extern void qdisc_class_hash_grow ( struct Qdisc * , struct Qdisc_class_hash * ) ;
extern void qdisc_class_hash_destroy ( struct Qdisc_class_hash * ) ;
2005-07-06 01:14:30 +04:00
extern void dev_init_scheduler ( struct net_device * dev ) ;
extern void dev_shutdown ( struct net_device * dev ) ;
extern void dev_activate ( struct net_device * dev ) ;
extern void dev_deactivate ( struct net_device * dev ) ;
extern void qdisc_reset ( struct Qdisc * qdisc ) ;
extern void qdisc_destroy ( struct Qdisc * qdisc ) ;
2006-11-30 04:35:48 +03:00
extern void qdisc_tree_decrease_qlen ( struct Qdisc * qdisc , unsigned int n ) ;
2008-07-09 04:06:30 +04:00
extern struct Qdisc * qdisc_alloc ( struct netdev_queue * dev_queue ,
2008-07-09 03:55:56 +04:00
struct Qdisc_ops * ops ) ;
2005-07-06 01:14:30 +04:00
extern struct Qdisc * qdisc_create_dflt ( struct net_device * dev ,
2008-07-09 03:55:56 +04:00
struct netdev_queue * dev_queue ,
2006-11-30 04:35:18 +03:00
struct Qdisc_ops * ops , u32 parentid ) ;
2007-03-23 21:29:43 +03:00
extern void tcf_destroy ( struct tcf_proto * tp ) ;
2008-07-02 06:52:38 +04:00
extern void tcf_destroy_chain ( struct tcf_proto * * fl ) ;
2005-04-17 02:20:36 +04:00
2008-07-09 09:59:10 +04:00
/* Reset all TX qdiscs of a device. */
static inline void qdisc_reset_all_tx ( struct net_device * dev )
{
2008-07-17 11:34:19 +04:00
unsigned int i ;
for ( i = 0 ; i < dev - > num_tx_queues ; i + + )
qdisc_reset ( netdev_get_tx_queue ( dev , i ) - > qdisc ) ;
2008-07-09 09:59:10 +04:00
}
2008-07-09 10:00:25 +04:00
/* Are all TX queues of the device empty? */
static inline bool qdisc_all_tx_empty ( const struct net_device * dev )
{
2008-07-17 11:34:19 +04:00
unsigned int i ;
for ( i = 0 ; i < dev - > num_tx_queues ; i + + ) {
struct netdev_queue * txq = netdev_get_tx_queue ( dev , i ) ;
const struct Qdisc * q = txq - > qdisc ;
2008-07-09 10:00:25 +04:00
2008-07-17 11:34:19 +04:00
if ( q - > q . qlen )
return false ;
}
return true ;
2008-07-09 10:00:25 +04:00
}
2008-07-09 10:01:06 +04:00
/* Are any of the TX qdiscs changing? */
static inline bool qdisc_tx_changing ( struct net_device * dev )
{
2008-07-17 11:34:19 +04:00
unsigned int i ;
for ( i = 0 ; i < dev - > num_tx_queues ; i + + ) {
struct netdev_queue * txq = netdev_get_tx_queue ( dev , i ) ;
if ( txq - > qdisc ! = txq - > qdisc_sleeping )
return true ;
}
return false ;
2008-07-09 10:01:06 +04:00
}
2008-07-17 11:34:19 +04:00
/* Is the device using the noop qdisc on all queues? */
2008-07-09 10:01:27 +04:00
static inline bool qdisc_tx_is_noop ( const struct net_device * dev )
{
2008-07-17 11:34:19 +04:00
unsigned int i ;
for ( i = 0 ; i < dev - > num_tx_queues ; i + + ) {
struct netdev_queue * txq = netdev_get_tx_queue ( dev , i ) ;
if ( txq - > qdisc ! = & noop_qdisc )
return false ;
}
return true ;
2008-07-09 10:01:27 +04:00
}
2005-06-19 09:57:26 +04:00
static inline int __qdisc_enqueue_tail ( struct sk_buff * skb , struct Qdisc * sch ,
struct sk_buff_head * list )
{
__skb_queue_tail ( list , skb ) ;
sch - > qstats . backlog + = skb - > len ;
sch - > bstats . bytes + = skb - > len ;
sch - > bstats . packets + + ;
return NET_XMIT_SUCCESS ;
}
static inline int qdisc_enqueue_tail ( struct sk_buff * skb , struct Qdisc * sch )
{
return __qdisc_enqueue_tail ( skb , sch , & sch - > q ) ;
}
static inline struct sk_buff * __qdisc_dequeue_head ( struct Qdisc * sch ,
struct sk_buff_head * list )
{
struct sk_buff * skb = __skb_dequeue ( list ) ;
if ( likely ( skb ! = NULL ) )
sch - > qstats . backlog - = skb - > len ;
return skb ;
}
static inline struct sk_buff * qdisc_dequeue_head ( struct Qdisc * sch )
{
return __qdisc_dequeue_head ( sch , & sch - > q ) ;
}
static inline struct sk_buff * __qdisc_dequeue_tail ( struct Qdisc * sch ,
struct sk_buff_head * list )
{
struct sk_buff * skb = __skb_dequeue_tail ( list ) ;
if ( likely ( skb ! = NULL ) )
sch - > qstats . backlog - = skb - > len ;
return skb ;
}
static inline struct sk_buff * qdisc_dequeue_tail ( struct Qdisc * sch )
{
return __qdisc_dequeue_tail ( sch , & sch - > q ) ;
}
static inline int __qdisc_requeue ( struct sk_buff * skb , struct Qdisc * sch ,
struct sk_buff_head * list )
{
__skb_queue_head ( list , skb ) ;
sch - > qstats . backlog + = skb - > len ;
sch - > qstats . requeues + + ;
return NET_XMIT_SUCCESS ;
}
static inline int qdisc_requeue ( struct sk_buff * skb , struct Qdisc * sch )
{
return __qdisc_requeue ( skb , sch , & sch - > q ) ;
}
static inline void __qdisc_reset_queue ( struct Qdisc * sch ,
struct sk_buff_head * list )
{
/*
* We do not know the backlog in bytes of this list , it
* is up to the caller to correct it
*/
skb_queue_purge ( list ) ;
}
static inline void qdisc_reset_queue ( struct Qdisc * sch )
{
__qdisc_reset_queue ( sch , & sch - > q ) ;
sch - > qstats . backlog = 0 ;
}
static inline unsigned int __qdisc_queue_drop ( struct Qdisc * sch ,
struct sk_buff_head * list )
{
struct sk_buff * skb = __qdisc_dequeue_tail ( sch , list ) ;
if ( likely ( skb ! = NULL ) ) {
unsigned int len = skb - > len ;
kfree_skb ( skb ) ;
return len ;
}
return 0 ;
}
static inline unsigned int qdisc_queue_drop ( struct Qdisc * sch )
{
return __qdisc_queue_drop ( sch , & sch - > q ) ;
}
static inline int qdisc_drop ( struct sk_buff * skb , struct Qdisc * sch )
{
kfree_skb ( skb ) ;
sch - > qstats . drops + + ;
return NET_XMIT_DROP ;
}
static inline int qdisc_reshape_fail ( struct sk_buff * skb , struct Qdisc * sch )
{
sch - > qstats . drops + + ;
2007-07-15 11:03:05 +04:00
# ifdef CONFIG_NET_CLS_ACT
2005-06-19 09:57:26 +04:00
if ( sch - > reshape_fail = = NULL | | sch - > reshape_fail ( skb , sch ) )
goto drop ;
return NET_XMIT_SUCCESS ;
drop :
# endif
kfree_skb ( skb ) ;
return NET_XMIT_DROP ;
}
2007-09-12 18:35:24 +04:00
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
long it will take to send a packet given its size .
*/
static inline u32 qdisc_l2t ( struct qdisc_rate_table * rtab , unsigned int pktlen )
{
2007-09-12 18:36:28 +04:00
int slot = pktlen + rtab - > rate . cell_align + rtab - > rate . overhead ;
if ( slot < 0 )
slot = 0 ;
2007-09-12 18:35:24 +04:00
slot > > = rtab - > rate . cell_log ;
if ( slot > 255 )
return ( rtab - > data [ 255 ] * ( slot > > 8 ) + rtab - > data [ slot & 0xFF ] ) ;
return rtab - > data [ slot ] ;
}
2007-10-26 13:47:23 +04:00
# ifdef CONFIG_NET_CLS_ACT
static inline struct sk_buff * skb_act_clone ( struct sk_buff * skb , gfp_t gfp_mask )
{
struct sk_buff * n = skb_clone ( skb , gfp_mask ) ;
if ( n ) {
n - > tc_verd = SET_TC_VERD ( n - > tc_verd , 0 ) ;
n - > tc_verd = CLR_TC_OK2MUNGE ( n - > tc_verd ) ;
n - > tc_verd = CLR_TC_MUNGED ( n - > tc_verd ) ;
}
return n ;
}
# endif
2005-04-17 02:20:36 +04:00
# endif