2006-08-11 10:35:16 +04:00
/*
2005-04-17 02:20:36 +04:00
* net / sched / sch_htb . c Hierarchical token bucket , feed tree version
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
* Authors : Martin Devera , < devik @ cdi . cz >
*
* Credits ( in time order ) for older HTB versions :
* Stef Coene < stef . coene @ docum . org >
* HTB support at LARTC mailing list
* Ondrej Kraus , < krauso @ barr . cz >
* found missing INIT_QDISC ( htb )
* Vladimir Smelhaus , Aamer Akhter , Bert Hubert
* helped a lot to locate nasty class stall bug
* Andi Kleen , Jamal Hadi , Bert Hubert
* code review and helpful comments on shaping
* Tomasz Wrona , < tw @ eter . tym . pl >
* created test case so that I was able to fix nasty bug
* Wilfried Weissmann
* spotted bug in dequeue code and helped with fix
* Jiri Fojtasek
* fixed requeue routine
* and many others . thanks .
*
* $ Id : sch_htb . c , v 1.25 2003 / 12 / 07 11 : 08 : 25 devik Exp devik $
*/
# include <linux/module.h>
# include <asm/uaccess.h>
# include <asm/system.h>
# include <linux/bitops.h>
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/sched.h>
# include <linux/string.h>
# include <linux/mm.h>
# include <linux/socket.h>
# include <linux/sockios.h>
# include <linux/in.h>
# include <linux/errno.h>
# include <linux/interrupt.h>
# include <linux/if_ether.h>
# include <linux/inet.h>
# include <linux/netdevice.h>
# include <linux/etherdevice.h>
# include <linux/notifier.h>
# include <net/ip.h>
# include <net/route.h>
# include <linux/skbuff.h>
# include <linux/list.h>
# include <linux/compiler.h>
# include <net/sock.h>
# include <net/pkt_sched.h>
# include <linux/rbtree.h>
/* HTB algorithm.
Author : devik @ cdi . cz
= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
HTB is like TBF with multiple classes . It is also similar to CBQ because
it allows to assign priority to each class in hierarchy .
In fact it is another implementation of Floyd ' s formal sharing .
Levels :
Each class is assigned level . Leaf has ALWAYS level 0 and root
classes have level TC_HTB_MAXDEPTH - 1. Interior nodes has level
one less than their parent .
*/
2006-08-11 10:35:16 +04:00
# define HTB_HSIZE 16 /* classid hash size */
# define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
# define HTB_RATECM 1 /* whether to use rate computer */
# define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
# define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
2005-04-17 02:20:36 +04:00
# if HTB_VER >> 16 != TC_HTB_PROTOVER
# error "Mismatched sch_htb.c and pkt_sch.h"
# endif
/* used internaly to keep status of single class */
enum htb_cmode {
2006-08-11 10:35:16 +04:00
HTB_CANT_SEND , /* class can't send and can't borrow */
HTB_MAY_BORROW , /* class can't send but may borrow */
HTB_CAN_SEND /* class can send */
2005-04-17 02:20:36 +04:00
} ;
/* interior & leaf nodes; props specific to leaves are marked L: */
2006-08-11 10:35:16 +04:00
struct htb_class {
/* general class parameters */
u32 classid ;
struct gnet_stats_basic bstats ;
struct gnet_stats_queue qstats ;
struct gnet_stats_rate_est rate_est ;
struct tc_htb_xstats xstats ; /* our special stats */
int refcnt ; /* usage count of this class */
2005-04-17 02:20:36 +04:00
# ifdef HTB_RATECM
2006-08-11 10:35:16 +04:00
/* rate measurement counters */
unsigned long rate_bytes , sum_bytes ;
unsigned long rate_packets , sum_packets ;
2005-04-17 02:20:36 +04:00
# endif
2006-08-11 10:35:16 +04:00
/* topology */
int level ; /* our level (see above) */
struct htb_class * parent ; /* parent class */
2006-08-11 10:35:38 +04:00
struct hlist_node hlist ; /* classid hash list item */
2006-08-11 10:35:16 +04:00
struct list_head sibling ; /* sibling list item */
struct list_head children ; /* children list */
union {
struct htb_class_leaf {
struct Qdisc * q ;
int prio ;
int aprio ;
int quantum ;
int deficit [ TC_HTB_MAXDEPTH ] ;
struct list_head drop_list ;
} leaf ;
struct htb_class_inner {
struct rb_root feed [ TC_HTB_NUMPRIO ] ; /* feed trees */
struct rb_node * ptr [ TC_HTB_NUMPRIO ] ; /* current class ptr */
/* When class changes from state 1->2 and disconnects from
parent ' s feed then we lost ptr value and start from the
first child again . Here we store classid of the
last valid ptr ( used when ptr is NULL ) . */
u32 last_ptr_id [ TC_HTB_NUMPRIO ] ;
} inner ;
} un ;
struct rb_node node [ TC_HTB_NUMPRIO ] ; /* node for self or feed tree */
struct rb_node pq_node ; /* node for event queue */
unsigned long pq_key ; /* the same type as jiffies global */
int prio_activity ; /* for which prios are we active */
enum htb_cmode cmode ; /* current mode of the class */
/* class attached filters */
struct tcf_proto * filter_list ;
int filter_cnt ;
int warned ; /* only one warning about non work conserving .. */
/* token bucket parameters */
struct qdisc_rate_table * rate ; /* rate table of the class itself */
struct qdisc_rate_table * ceil ; /* ceiling rate (limits borrows too) */
long buffer , cbuffer ; /* token bucket depth/rate */
psched_tdiff_t mbuffer ; /* max wait time */
long tokens , ctokens ; /* current number of tokens */
psched_time_t t_c ; /* checkpoint time */
2005-04-17 02:20:36 +04:00
} ;
/* TODO: maybe compute rate when size is too large .. or drop ? */
2006-08-11 10:35:16 +04:00
static inline long L2T ( struct htb_class * cl , struct qdisc_rate_table * rate ,
int size )
{
int slot = size > > rate - > rate . cell_log ;
if ( slot > 255 ) {
cl - > xstats . giants + + ;
slot = 255 ;
}
return rate - > data [ slot ] ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
struct htb_sched {
struct list_head root ; /* root classes list */
2006-08-11 10:35:38 +04:00
struct hlist_head hash [ HTB_HSIZE ] ; /* hashed by classid */
struct list_head drops [ TC_HTB_NUMPRIO ] ; /* active leaves (for drops) */
2006-08-11 10:35:16 +04:00
/* self list - roots of self generating tree */
struct rb_root row [ TC_HTB_MAXDEPTH ] [ TC_HTB_NUMPRIO ] ;
int row_mask [ TC_HTB_MAXDEPTH ] ;
struct rb_node * ptr [ TC_HTB_MAXDEPTH ] [ TC_HTB_NUMPRIO ] ;
u32 last_ptr_id [ TC_HTB_MAXDEPTH ] [ TC_HTB_NUMPRIO ] ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
/* self wait list - roots of wait PQs per row */
struct rb_root wait_pq [ TC_HTB_MAXDEPTH ] ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
/* time of nearest event per level (row) */
unsigned long near_ev_cache [ TC_HTB_MAXDEPTH ] ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
/* cached value of jiffies in dequeue */
unsigned long jiffies ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
/* whether we hit non-work conserving class during this dequeue; we use */
int nwc_hit ; /* this to disable mindelay complaint in dequeue */
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
int defcls ; /* class where unclassified flows go to */
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
/* filters for qdisc itself */
struct tcf_proto * filter_list ;
int filter_cnt ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
int rate2quantum ; /* quant = rate / rate2quantum */
psched_time_t now ; /* cached dequeue time */
struct timer_list timer ; /* send delay timer */
2005-04-17 02:20:36 +04:00
# ifdef HTB_RATECM
2006-08-11 10:35:16 +04:00
struct timer_list rttim ; /* rate computer timer */
int recmp_bucket ; /* which hash bucket to recompute next */
2005-04-17 02:20:36 +04:00
# endif
2006-08-11 10:35:16 +04:00
/* non shaped skbs; let them go directly thru */
struct sk_buff_head direct_queue ;
int direct_qlen ; /* max qlen of above */
long direct_pkts ;
2005-04-17 02:20:36 +04:00
} ;
/* compute hash of size HTB_HSIZE for given handle */
2006-08-11 10:35:16 +04:00
static inline int htb_hash ( u32 h )
2005-04-17 02:20:36 +04:00
{
# if HTB_HSIZE != 16
2006-08-11 10:35:16 +04:00
# error "Declare new hash for your HTB_HSIZE"
2005-04-17 02:20:36 +04:00
# endif
2006-08-11 10:35:16 +04:00
h ^ = h > > 8 ; /* stolen from cbq_hash */
h ^ = h > > 4 ;
return h & 0xf ;
2005-04-17 02:20:36 +04:00
}
/* find class in global hash table using given handle */
2006-08-11 10:35:16 +04:00
static inline struct htb_class * htb_find ( u32 handle , struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2006-08-11 10:35:38 +04:00
struct hlist_node * p ;
struct htb_class * cl ;
2006-08-11 10:35:16 +04:00
if ( TC_H_MAJ ( handle ) ! = sch - > handle )
2005-04-17 02:20:36 +04:00
return NULL ;
2006-08-11 10:35:16 +04:00
2006-08-11 10:35:38 +04:00
hlist_for_each_entry ( cl , p , q - > hash + htb_hash ( handle ) , hlist ) {
2005-04-17 02:20:36 +04:00
if ( cl - > classid = = handle )
return cl ;
}
return NULL ;
}
/**
* htb_classify - classify a packet into class
*
* It returns NULL if the packet should be dropped or - 1 if the packet
* should be passed directly thru . In all other cases leaf class is returned .
* We allow direct class selection by classid in priority . The we examine
* filters in qdisc and in inner nodes ( if higher filter points to the inner
* node ) . If we end up with classid MAJOR : 0 we enqueue the skb into special
* internal fifo ( direct ) . These packets then go directly thru . If we still
* have no valid leaf we try to use MAJOR : default leaf . It still unsuccessfull
* then finish and return direct queue .
*/
# define HTB_DIRECT (struct htb_class*)-1
static inline u32 htb_classid ( struct htb_class * cl )
{
return ( cl & & cl ! = HTB_DIRECT ) ? cl - > classid : TC_H_UNSPEC ;
}
2006-08-11 10:35:16 +04:00
static struct htb_class * htb_classify ( struct sk_buff * skb , struct Qdisc * sch ,
int * qerr )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
struct htb_class * cl ;
struct tcf_result res ;
struct tcf_proto * tcf ;
int result ;
/* allow to select class by setting skb->priority to valid classid;
note that nfmark can be used too by attaching filter fw with no
rules in it */
if ( skb - > priority = = sch - > handle )
2006-08-11 10:35:16 +04:00
return HTB_DIRECT ; /* X:0 (direct flow) selected */
if ( ( cl = htb_find ( skb - > priority , sch ) ) ! = NULL & & cl - > level = = 0 )
2005-04-17 02:20:36 +04:00
return cl ;
2006-01-09 09:35:55 +03:00
* qerr = NET_XMIT_BYPASS ;
2005-04-17 02:20:36 +04:00
tcf = q - > filter_list ;
while ( tcf & & ( result = tc_classify ( skb , tcf , & res ) ) > = 0 ) {
# ifdef CONFIG_NET_CLS_ACT
switch ( result ) {
case TC_ACT_QUEUED :
2006-08-11 10:35:16 +04:00
case TC_ACT_STOLEN :
2005-04-17 02:20:36 +04:00
* qerr = NET_XMIT_SUCCESS ;
case TC_ACT_SHOT :
return NULL ;
}
# elif defined(CONFIG_NET_CLS_POLICE)
if ( result = = TC_POLICE_SHOT )
return HTB_DIRECT ;
# endif
2006-08-11 10:35:16 +04:00
if ( ( cl = ( void * ) res . class ) = = NULL ) {
2005-04-17 02:20:36 +04:00
if ( res . classid = = sch - > handle )
2006-08-11 10:35:16 +04:00
return HTB_DIRECT ; /* X:0 (direct flow) */
if ( ( cl = htb_find ( res . classid , sch ) ) = = NULL )
break ; /* filter selected invalid classid */
2005-04-17 02:20:36 +04:00
}
if ( ! cl - > level )
2006-08-11 10:35:16 +04:00
return cl ; /* we hit leaf; return it */
2005-04-17 02:20:36 +04:00
/* we have got inner class; apply inner filter chain */
tcf = cl - > filter_list ;
}
/* classification failed; try to use default class */
2006-08-11 10:35:16 +04:00
cl = htb_find ( TC_H_MAKE ( TC_H_MAJ ( sch - > handle ) , q - > defcls ) , sch ) ;
2005-04-17 02:20:36 +04:00
if ( ! cl | | cl - > level )
2006-08-11 10:35:16 +04:00
return HTB_DIRECT ; /* bad default .. this is safe bet */
2005-04-17 02:20:36 +04:00
return cl ;
}
/**
* htb_add_to_id_tree - adds class to the round robin list
*
* Routine adds class to the list ( actually tree ) sorted by classid .
* Make sure that class is not already on such list for given prio .
*/
2006-08-11 10:35:16 +04:00
static void htb_add_to_id_tree ( struct rb_root * root ,
struct htb_class * cl , int prio )
2005-04-17 02:20:36 +04:00
{
struct rb_node * * p = & root - > rb_node , * parent = NULL ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
while ( * p ) {
2006-08-11 10:35:16 +04:00
struct htb_class * c ;
parent = * p ;
2005-04-17 02:20:36 +04:00
c = rb_entry ( parent , struct htb_class , node [ prio ] ) ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
if ( cl - > classid > c - > classid )
p = & parent - > rb_right ;
2006-08-11 10:35:16 +04:00
else
2005-04-17 02:20:36 +04:00
p = & parent - > rb_left ;
}
rb_link_node ( & cl - > node [ prio ] , parent , p ) ;
rb_insert_color ( & cl - > node [ prio ] , root ) ;
}
/**
* htb_add_to_wait_tree - adds class to the event queue with delay
*
* The class is added to priority event queue to indicate that class will
* change its mode in cl - > pq_key microseconds . Make sure that class is not
* already in the queue .
*/
2006-08-11 10:35:16 +04:00
static void htb_add_to_wait_tree ( struct htb_sched * q ,
struct htb_class * cl , long delay )
2005-04-17 02:20:36 +04:00
{
struct rb_node * * p = & q - > wait_pq [ cl - > level ] . rb_node , * parent = NULL ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
cl - > pq_key = q - > jiffies + PSCHED_US2JIFFIE ( delay ) ;
if ( cl - > pq_key = = q - > jiffies )
cl - > pq_key + + ;
/* update the nearest event cache */
if ( time_after ( q - > near_ev_cache [ cl - > level ] , cl - > pq_key ) )
q - > near_ev_cache [ cl - > level ] = cl - > pq_key ;
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
while ( * p ) {
2006-08-11 10:35:16 +04:00
struct htb_class * c ;
parent = * p ;
2005-04-17 02:20:36 +04:00
c = rb_entry ( parent , struct htb_class , pq_node ) ;
if ( time_after_eq ( cl - > pq_key , c - > pq_key ) )
p = & parent - > rb_right ;
2006-08-11 10:35:16 +04:00
else
2005-04-17 02:20:36 +04:00
p = & parent - > rb_left ;
}
rb_link_node ( & cl - > pq_node , parent , p ) ;
rb_insert_color ( & cl - > pq_node , & q - > wait_pq [ cl - > level ] ) ;
}
/**
* htb_next_rb_node - finds next node in binary tree
*
* When we are past last key we return NULL .
* Average complexity is 2 steps per call .
*/
2006-08-11 10:36:01 +04:00
static inline void htb_next_rb_node ( struct rb_node * * n )
2005-04-17 02:20:36 +04:00
{
* n = rb_next ( * n ) ;
}
/**
* htb_add_class_to_row - add class to its row
*
* The class is added to row at priorities marked in mask .
* It does nothing if mask = = 0.
*/
2006-08-11 10:35:16 +04:00
static inline void htb_add_class_to_row ( struct htb_sched * q ,
struct htb_class * cl , int mask )
2005-04-17 02:20:36 +04:00
{
q - > row_mask [ cl - > level ] | = mask ;
while ( mask ) {
int prio = ffz ( ~ mask ) ;
mask & = ~ ( 1 < < prio ) ;
2006-08-11 10:35:16 +04:00
htb_add_to_id_tree ( q - > row [ cl - > level ] + prio , cl , prio ) ;
2005-04-17 02:20:36 +04:00
}
}
2006-08-11 10:36:01 +04:00
/* If this triggers, it is a bug in this code, but it need not be fatal */
static void htb_safe_rb_erase ( struct rb_node * rb , struct rb_root * root )
{
2006-09-28 03:36:23 +04:00
if ( ! RB_EMPTY_NODE ( rb ) ) {
2006-08-11 10:36:01 +04:00
WARN_ON ( 1 ) ;
} else {
rb_erase ( rb , root ) ;
RB_CLEAR_NODE ( rb ) ;
}
}
2005-04-17 02:20:36 +04:00
/**
* htb_remove_class_from_row - removes class from its row
*
* The class is removed from row at priorities marked in mask .
* It does nothing if mask = = 0.
*/
2006-08-11 10:35:16 +04:00
static inline void htb_remove_class_from_row ( struct htb_sched * q ,
struct htb_class * cl , int mask )
2005-04-17 02:20:36 +04:00
{
int m = 0 ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
while ( mask ) {
int prio = ffz ( ~ mask ) ;
2006-08-11 10:36:01 +04:00
2005-04-17 02:20:36 +04:00
mask & = ~ ( 1 < < prio ) ;
2006-08-11 10:35:16 +04:00
if ( q - > ptr [ cl - > level ] [ prio ] = = cl - > node + prio )
htb_next_rb_node ( q - > ptr [ cl - > level ] + prio ) ;
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( cl - > node + prio , q - > row [ cl - > level ] + prio ) ;
2006-08-11 10:35:16 +04:00
if ( ! q - > row [ cl - > level ] [ prio ] . rb_node )
2005-04-17 02:20:36 +04:00
m | = 1 < < prio ;
}
q - > row_mask [ cl - > level ] & = ~ m ;
}
/**
* htb_activate_prios - creates active classe ' s feed chain
*
* The class is connected to ancestors and / or appropriate rows
* for priorities it is participating on . cl - > cmode must be new
* ( activated ) mode . It does nothing if cl - > prio_activity = = 0.
*/
2006-08-11 10:35:16 +04:00
static void htb_activate_prios ( struct htb_sched * q , struct htb_class * cl )
2005-04-17 02:20:36 +04:00
{
struct htb_class * p = cl - > parent ;
2006-08-11 10:35:16 +04:00
long m , mask = cl - > prio_activity ;
2005-04-17 02:20:36 +04:00
while ( cl - > cmode = = HTB_MAY_BORROW & & p & & mask ) {
2006-08-11 10:35:16 +04:00
m = mask ;
while ( m ) {
2005-04-17 02:20:36 +04:00
int prio = ffz ( ~ m ) ;
m & = ~ ( 1 < < prio ) ;
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
if ( p - > un . inner . feed [ prio ] . rb_node )
/* parent already has its feed in use so that
reset bit in mask as parent is already ok */
mask & = ~ ( 1 < < prio ) ;
2006-08-11 10:35:16 +04:00
htb_add_to_id_tree ( p - > un . inner . feed + prio , cl , prio ) ;
2005-04-17 02:20:36 +04:00
}
p - > prio_activity | = mask ;
2006-08-11 10:35:16 +04:00
cl = p ;
p = cl - > parent ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
}
if ( cl - > cmode = = HTB_CAN_SEND & & mask )
2006-08-11 10:35:16 +04:00
htb_add_class_to_row ( q , cl , mask ) ;
2005-04-17 02:20:36 +04:00
}
/**
* htb_deactivate_prios - remove class from feed chain
*
* cl - > cmode must represent old mode ( before deactivation ) . It does
* nothing if cl - > prio_activity = = 0. Class is removed from all feed
* chains and rows .
*/
static void htb_deactivate_prios ( struct htb_sched * q , struct htb_class * cl )
{
struct htb_class * p = cl - > parent ;
2006-08-11 10:35:16 +04:00
long m , mask = cl - > prio_activity ;
2005-04-17 02:20:36 +04:00
while ( cl - > cmode = = HTB_MAY_BORROW & & p & & mask ) {
2006-08-11 10:35:16 +04:00
m = mask ;
mask = 0 ;
2005-04-17 02:20:36 +04:00
while ( m ) {
int prio = ffz ( ~ m ) ;
m & = ~ ( 1 < < prio ) ;
2006-08-11 10:35:16 +04:00
if ( p - > un . inner . ptr [ prio ] = = cl - > node + prio ) {
2005-04-17 02:20:36 +04:00
/* we are removing child which is pointed to from
parent feed - forget the pointer but remember
classid */
p - > un . inner . last_ptr_id [ prio ] = cl - > classid ;
p - > un . inner . ptr [ prio ] = NULL ;
}
2006-08-11 10:35:16 +04:00
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( cl - > node + prio , p - > un . inner . feed + prio ) ;
2006-08-11 10:35:16 +04:00
if ( ! p - > un . inner . feed [ prio ] . rb_node )
2005-04-17 02:20:36 +04:00
mask | = 1 < < prio ;
}
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
p - > prio_activity & = ~ mask ;
2006-08-11 10:35:16 +04:00
cl = p ;
p = cl - > parent ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
if ( cl - > cmode = = HTB_CAN_SEND & & mask )
htb_remove_class_from_row ( q , cl , mask ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:34:02 +04:00
# if HTB_HYSTERESIS
static inline long htb_lowater ( const struct htb_class * cl )
{
return cl - > cmode ! = HTB_CANT_SEND ? - cl - > cbuffer : 0 ;
}
static inline long htb_hiwater ( const struct htb_class * cl )
{
return cl - > cmode = = HTB_CAN_SEND ? - cl - > buffer : 0 ;
}
# else
# define htb_lowater(cl) (0)
# define htb_hiwater(cl) (0)
# endif
2005-04-17 02:20:36 +04:00
/**
* htb_class_mode - computes and returns current class mode
*
* It computes cl ' s mode at time cl - > t_c + diff and returns it . If mode
* is not HTB_CAN_SEND then cl - > pq_key is updated to time difference
* from now to time when cl will change its state .
* Also it is worth to note that class mode doesn ' t change simply
* at cl - > { c , } tokens = = 0 but there can rather be hysteresis of
* 0 . . - cl - > { c , } buffer range . It is meant to limit number of
* mode transitions per time unit . The speed gain is about 1 / 6.
*/
2006-08-11 10:35:16 +04:00
static inline enum htb_cmode
htb_class_mode ( struct htb_class * cl , long * diff )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
long toks ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
if ( ( toks = ( cl - > ctokens + * diff ) ) < htb_lowater ( cl ) ) {
* diff = - toks ;
return HTB_CANT_SEND ;
}
2006-08-11 10:34:02 +04:00
2006-08-11 10:35:16 +04:00
if ( ( toks = ( cl - > tokens + * diff ) ) > = htb_hiwater ( cl ) )
return HTB_CAN_SEND ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
* diff = - toks ;
return HTB_MAY_BORROW ;
2005-04-17 02:20:36 +04:00
}
/**
* htb_change_class_mode - changes classe ' s mode
*
* This should be the only way how to change classe ' s mode under normal
* cirsumstances . Routine will update feed lists linkage , change mode
* and add class to the wait event queue if appropriate . New mode should
* be different from old one and cl - > pq_key has to be valid if changing
* to mode other than HTB_CAN_SEND ( see htb_add_to_wait_tree ) .
*/
2006-08-11 10:35:16 +04:00
static void
2005-04-17 02:20:36 +04:00
htb_change_class_mode ( struct htb_sched * q , struct htb_class * cl , long * diff )
2006-08-11 10:35:16 +04:00
{
enum htb_cmode new_mode = htb_class_mode ( cl , diff ) ;
2005-04-17 02:20:36 +04:00
if ( new_mode = = cl - > cmode )
2006-08-11 10:35:16 +04:00
return ;
if ( cl - > prio_activity ) { /* not necessary: speed optimization */
if ( cl - > cmode ! = HTB_CANT_SEND )
htb_deactivate_prios ( q , cl ) ;
2005-04-17 02:20:36 +04:00
cl - > cmode = new_mode ;
2006-08-11 10:35:16 +04:00
if ( new_mode ! = HTB_CANT_SEND )
htb_activate_prios ( q , cl ) ;
} else
2005-04-17 02:20:36 +04:00
cl - > cmode = new_mode ;
}
/**
* htb_activate - inserts leaf cl into appropriate active feeds
*
* Routine learns ( new ) priority of leaf and activates feed chain
* for the prio . It can be called on already active leaf safely .
* It also adds leaf into droplist .
*/
2006-08-11 10:35:16 +04:00
static inline void htb_activate ( struct htb_sched * q , struct htb_class * cl )
2005-04-17 02:20:36 +04:00
{
BUG_TRAP ( ! cl - > level & & cl - > un . leaf . q & & cl - > un . leaf . q - > q . qlen ) ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
if ( ! cl - > prio_activity ) {
cl - > prio_activity = 1 < < ( cl - > un . leaf . aprio = cl - > un . leaf . prio ) ;
2006-08-11 10:35:16 +04:00
htb_activate_prios ( q , cl ) ;
list_add_tail ( & cl - > un . leaf . drop_list ,
q - > drops + cl - > un . leaf . aprio ) ;
2005-04-17 02:20:36 +04:00
}
}
/**
* htb_deactivate - remove leaf cl from active feeds
*
* Make sure that leaf is active . In the other words it can ' t be called
* with non - active leaf . It also removes class from the drop list .
*/
2006-08-11 10:35:16 +04:00
static inline void htb_deactivate ( struct htb_sched * q , struct htb_class * cl )
2005-04-17 02:20:36 +04:00
{
BUG_TRAP ( cl - > prio_activity ) ;
2006-08-11 10:31:08 +04:00
2006-08-11 10:35:16 +04:00
htb_deactivate_prios ( q , cl ) ;
2005-04-17 02:20:36 +04:00
cl - > prio_activity = 0 ;
list_del_init ( & cl - > un . leaf . drop_list ) ;
}
static int htb_enqueue ( struct sk_buff * skb , struct Qdisc * sch )
{
2006-08-11 10:35:16 +04:00
int ret ;
struct htb_sched * q = qdisc_priv ( sch ) ;
struct htb_class * cl = htb_classify ( skb , sch , & ret ) ;
if ( cl = = HTB_DIRECT ) {
/* enqueue to helper queue */
if ( q - > direct_queue . qlen < q - > direct_qlen ) {
__skb_queue_tail ( & q - > direct_queue , skb ) ;
q - > direct_pkts + + ;
} else {
kfree_skb ( skb ) ;
sch - > qstats . drops + + ;
return NET_XMIT_DROP ;
}
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_NET_CLS_ACT
2006-08-11 10:35:16 +04:00
} else if ( ! cl ) {
if ( ret = = NET_XMIT_BYPASS )
sch - > qstats . drops + + ;
kfree_skb ( skb ) ;
return ret ;
2005-04-17 02:20:36 +04:00
# endif
2006-08-11 10:35:16 +04:00
} else if ( cl - > un . leaf . q - > enqueue ( skb , cl - > un . leaf . q ) ! =
NET_XMIT_SUCCESS ) {
sch - > qstats . drops + + ;
cl - > qstats . drops + + ;
return NET_XMIT_DROP ;
} else {
cl - > bstats . packets + + ;
cl - > bstats . bytes + = skb - > len ;
htb_activate ( q , cl ) ;
}
sch - > q . qlen + + ;
sch - > bstats . packets + + ;
sch - > bstats . bytes + = skb - > len ;
return NET_XMIT_SUCCESS ;
2005-04-17 02:20:36 +04:00
}
/* TODO: requeuing packet charges it to policers again !! */
static int htb_requeue ( struct sk_buff * skb , struct Qdisc * sch )
{
2006-08-11 10:35:16 +04:00
struct htb_sched * q = qdisc_priv ( sch ) ;
int ret = NET_XMIT_SUCCESS ;
struct htb_class * cl = htb_classify ( skb , sch , & ret ) ;
struct sk_buff * tskb ;
if ( cl = = HTB_DIRECT | | ! cl ) {
/* enqueue to helper queue */
if ( q - > direct_queue . qlen < q - > direct_qlen & & cl ) {
__skb_queue_head ( & q - > direct_queue , skb ) ;
} else {
__skb_queue_head ( & q - > direct_queue , skb ) ;
tskb = __skb_dequeue_tail ( & q - > direct_queue ) ;
kfree_skb ( tskb ) ;
sch - > qstats . drops + + ;
return NET_XMIT_CN ;
}
} else if ( cl - > un . leaf . q - > ops - > requeue ( skb , cl - > un . leaf . q ) ! =
NET_XMIT_SUCCESS ) {
sch - > qstats . drops + + ;
cl - > qstats . drops + + ;
return NET_XMIT_DROP ;
} else
htb_activate ( q , cl ) ;
sch - > q . qlen + + ;
sch - > qstats . requeues + + ;
return NET_XMIT_SUCCESS ;
2005-04-17 02:20:36 +04:00
}
static void htb_timer ( unsigned long arg )
{
2006-08-11 10:35:16 +04:00
struct Qdisc * sch = ( struct Qdisc * ) arg ;
sch - > flags & = ~ TCQ_F_THROTTLED ;
wmb ( ) ;
netif_schedule ( sch - > dev ) ;
2005-04-17 02:20:36 +04:00
}
# ifdef HTB_RATECM
# define RT_GEN(D,R) R+=D-(R / HTB_EWMAC);D=0
static void htb_rate_timer ( unsigned long arg )
{
2006-08-11 10:35:16 +04:00
struct Qdisc * sch = ( struct Qdisc * ) arg ;
2005-04-17 02:20:36 +04:00
struct htb_sched * q = qdisc_priv ( sch ) ;
2006-08-11 10:35:38 +04:00
struct hlist_node * p ;
struct htb_class * cl ;
2005-04-17 02:20:36 +04:00
/* lock queue so that we can muck with it */
2006-08-11 10:33:16 +04:00
spin_lock_bh ( & sch - > dev - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
q - > rttim . expires = jiffies + HZ ;
add_timer ( & q - > rttim ) ;
/* scan and recompute one bucket at time */
2006-08-11 10:35:16 +04:00
if ( + + q - > recmp_bucket > = HTB_HSIZE )
2005-04-17 02:20:36 +04:00
q - > recmp_bucket = 0 ;
2006-08-11 10:31:08 +04:00
2006-08-11 10:35:38 +04:00
hlist_for_each_entry ( cl , p , q - > hash + q - > recmp_bucket , hlist ) {
2006-08-11 10:35:16 +04:00
RT_GEN ( cl - > sum_bytes , cl - > rate_bytes ) ;
RT_GEN ( cl - > sum_packets , cl - > rate_packets ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:33:16 +04:00
spin_unlock_bh ( & sch - > dev - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
}
# endif
/**
* htb_charge_class - charges amount " bytes " to leaf and ancestors
*
* Routine assumes that packet " bytes " long was dequeued from leaf cl
* borrowing from " level " . It accounts bytes to ceil leaky bucket for
* leaf and all ancestors and to rate bucket for ancestors at levels
* " level " and higher . It also handles possible change of mode resulting
* from the update . Note that mode can also increase here ( MAY_BORROW to
* CAN_SEND ) because we can use more precise clock that event queue here .
* In such case we remove class from event queue first .
*/
2006-08-11 10:35:16 +04:00
static void htb_charge_class ( struct htb_sched * q , struct htb_class * cl ,
int level , int bytes )
{
long toks , diff ;
2005-04-17 02:20:36 +04:00
enum htb_cmode old_mode ;
# define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
if ( toks > cl - > B ) toks = cl - > B ; \
toks - = L2T ( cl , cl - > R , bytes ) ; \
if ( toks < = - cl - > mbuffer ) toks = 1 - cl - > mbuffer ; \
cl - > T = toks
while ( cl ) {
2006-08-11 10:35:16 +04:00
diff = PSCHED_TDIFF_SAFE ( q - > now , cl - > t_c , ( u32 ) cl - > mbuffer ) ;
2005-04-17 02:20:36 +04:00
if ( cl - > level > = level ) {
2006-08-11 10:35:16 +04:00
if ( cl - > level = = level )
cl - > xstats . lends + + ;
HTB_ACCNT ( tokens , buffer , rate ) ;
2005-04-17 02:20:36 +04:00
} else {
cl - > xstats . borrows + + ;
2006-08-11 10:35:16 +04:00
cl - > tokens + = diff ; /* we moved t_c; update tokens */
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
HTB_ACCNT ( ctokens , cbuffer , ceil ) ;
2005-04-17 02:20:36 +04:00
cl - > t_c = q - > now ;
2006-08-11 10:35:16 +04:00
old_mode = cl - > cmode ;
diff = 0 ;
htb_change_class_mode ( q , cl , & diff ) ;
2005-04-17 02:20:36 +04:00
if ( old_mode ! = cl - > cmode ) {
if ( old_mode ! = HTB_CAN_SEND )
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( & cl - > pq_node , q - > wait_pq + cl - > level ) ;
2005-04-17 02:20:36 +04:00
if ( cl - > cmode ! = HTB_CAN_SEND )
2006-08-11 10:35:16 +04:00
htb_add_to_wait_tree ( q , cl , diff ) ;
2005-04-17 02:20:36 +04:00
}
# ifdef HTB_RATECM
/* update rate counters */
2006-08-11 10:35:16 +04:00
cl - > sum_bytes + = bytes ;
cl - > sum_packets + + ;
2005-04-17 02:20:36 +04:00
# endif
/* update byte stats except for leaves which are already updated */
if ( cl - > level ) {
cl - > bstats . bytes + = bytes ;
cl - > bstats . packets + + ;
}
cl = cl - > parent ;
}
}
/**
* htb_do_events - make mode changes to classes at the level
*
* Scans event queue for pending events and applies them . Returns jiffies to
* next pending event ( 0 for no event in pq ) .
* Note : Aplied are events whose have cl - > pq_key < = jiffies .
*/
2006-08-11 10:35:16 +04:00
static long htb_do_events ( struct htb_sched * q , int level )
2005-04-17 02:20:36 +04:00
{
int i ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < 500 ; i + + ) {
struct htb_class * cl ;
long diff ;
struct rb_node * p = q - > wait_pq [ level ] . rb_node ;
2006-08-11 10:35:16 +04:00
if ( ! p )
return 0 ;
while ( p - > rb_left )
p = p - > rb_left ;
2005-04-17 02:20:36 +04:00
cl = rb_entry ( p , struct htb_class , pq_node ) ;
if ( time_after ( cl - > pq_key , q - > jiffies ) ) {
return cl - > pq_key - q - > jiffies ;
}
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( p , q - > wait_pq + level ) ;
2006-08-11 10:35:16 +04:00
diff = PSCHED_TDIFF_SAFE ( q - > now , cl - > t_c , ( u32 ) cl - > mbuffer ) ;
htb_change_class_mode ( q , cl , & diff ) ;
2005-04-17 02:20:36 +04:00
if ( cl - > cmode ! = HTB_CAN_SEND )
2006-08-11 10:35:16 +04:00
htb_add_to_wait_tree ( q , cl , diff ) ;
2005-04-17 02:20:36 +04:00
}
if ( net_ratelimit ( ) )
printk ( KERN_WARNING " htb: too many events ! \n " ) ;
2006-08-11 10:35:16 +04:00
return HZ / 10 ;
2005-04-17 02:20:36 +04:00
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
is no such one exists . */
2006-08-11 10:35:16 +04:00
static struct rb_node * htb_id_find_next_upper ( int prio , struct rb_node * n ,
u32 id )
2005-04-17 02:20:36 +04:00
{
struct rb_node * r = NULL ;
while ( n ) {
2006-08-11 10:35:16 +04:00
struct htb_class * cl =
rb_entry ( n , struct htb_class , node [ prio ] ) ;
if ( id = = cl - > classid )
return n ;
2005-04-17 02:20:36 +04:00
if ( id > cl - > classid ) {
n = n - > rb_right ;
} else {
r = n ;
n = n - > rb_left ;
}
}
return r ;
}
/**
* htb_lookup_leaf - returns next leaf class in DRR order
*
* Find leaf where current feed pointers points to .
*/
2006-08-11 10:35:16 +04:00
static struct htb_class * htb_lookup_leaf ( struct rb_root * tree , int prio ,
struct rb_node * * pptr , u32 * pid )
2005-04-17 02:20:36 +04:00
{
int i ;
struct {
struct rb_node * root ;
struct rb_node * * pptr ;
u32 * pid ;
2006-08-11 10:35:16 +04:00
} stk [ TC_HTB_MAXDEPTH ] , * sp = stk ;
2005-04-17 02:20:36 +04:00
BUG_TRAP ( tree - > rb_node ) ;
sp - > root = tree - > rb_node ;
sp - > pptr = pptr ;
sp - > pid = pid ;
for ( i = 0 ; i < 65535 ; i + + ) {
2006-08-11 10:35:16 +04:00
if ( ! * sp - > pptr & & * sp - > pid ) {
2005-04-17 02:20:36 +04:00
/* ptr was invalidated but id is valid - try to recover
the original or next ptr */
2006-08-11 10:35:16 +04:00
* sp - > pptr =
htb_id_find_next_upper ( prio , sp - > root , * sp - > pid ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
* sp - > pid = 0 ; /* ptr is valid now so that remove this hint as it
can become out of date quickly */
if ( ! * sp - > pptr ) { /* we are at right end; rewind & go up */
2005-04-17 02:20:36 +04:00
* sp - > pptr = sp - > root ;
2006-08-11 10:35:16 +04:00
while ( ( * sp - > pptr ) - > rb_left )
2005-04-17 02:20:36 +04:00
* sp - > pptr = ( * sp - > pptr ) - > rb_left ;
if ( sp > stk ) {
sp - - ;
2006-08-11 10:35:16 +04:00
BUG_TRAP ( * sp - > pptr ) ;
if ( ! * sp - > pptr )
return NULL ;
htb_next_rb_node ( sp - > pptr ) ;
2005-04-17 02:20:36 +04:00
}
} else {
struct htb_class * cl ;
2006-08-11 10:35:16 +04:00
cl = rb_entry ( * sp - > pptr , struct htb_class , node [ prio ] ) ;
if ( ! cl - > level )
2005-04-17 02:20:36 +04:00
return cl ;
( + + sp ) - > root = cl - > un . inner . feed [ prio ] . rb_node ;
2006-08-11 10:35:16 +04:00
sp - > pptr = cl - > un . inner . ptr + prio ;
sp - > pid = cl - > un . inner . last_ptr_id + prio ;
2005-04-17 02:20:36 +04:00
}
}
BUG_TRAP ( 0 ) ;
return NULL ;
}
/* dequeues packet at given priority and level; call only if
you are sure that there is active class at prio / level */
2006-08-11 10:35:16 +04:00
static struct sk_buff * htb_dequeue_tree ( struct htb_sched * q , int prio ,
int level )
2005-04-17 02:20:36 +04:00
{
struct sk_buff * skb = NULL ;
2006-08-11 10:35:16 +04:00
struct htb_class * cl , * start ;
2005-04-17 02:20:36 +04:00
/* look initial class up in the row */
2006-08-11 10:35:16 +04:00
start = cl = htb_lookup_leaf ( q - > row [ level ] + prio , prio ,
q - > ptr [ level ] + prio ,
q - > last_ptr_id [ level ] + prio ) ;
2005-04-17 02:20:36 +04:00
do {
next :
2006-08-11 10:35:16 +04:00
BUG_TRAP ( cl ) ;
if ( ! cl )
return NULL ;
2005-04-17 02:20:36 +04:00
/* class can be empty - it is unlikely but can be true if leaf
qdisc drops packets in enqueue routine or if someone used
graft operation on the leaf since last dequeue ;
simply deactivate and skip such class */
if ( unlikely ( cl - > un . leaf . q - > q . qlen = = 0 ) ) {
struct htb_class * next ;
2006-08-11 10:35:16 +04:00
htb_deactivate ( q , cl ) ;
2005-04-17 02:20:36 +04:00
/* row/level might become empty */
if ( ( q - > row_mask [ level ] & ( 1 < < prio ) ) = = 0 )
2006-08-11 10:35:16 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
next = htb_lookup_leaf ( q - > row [ level ] + prio ,
prio , q - > ptr [ level ] + prio ,
q - > last_ptr_id [ level ] + prio ) ;
if ( cl = = start ) /* fix start if we just deleted it */
2005-04-17 02:20:36 +04:00
start = next ;
cl = next ;
goto next ;
}
2006-08-11 10:35:16 +04:00
skb = cl - > un . leaf . q - > dequeue ( cl - > un . leaf . q ) ;
if ( likely ( skb ! = NULL ) )
2005-04-17 02:20:36 +04:00
break ;
if ( ! cl - > warned ) {
2006-08-11 10:35:16 +04:00
printk ( KERN_WARNING
" htb: class %X isn't work conserving ?! \n " ,
cl - > classid ) ;
2005-04-17 02:20:36 +04:00
cl - > warned = 1 ;
}
q - > nwc_hit + + ;
2006-08-11 10:35:16 +04:00
htb_next_rb_node ( ( level ? cl - > parent - > un . inner . ptr : q - >
ptr [ 0 ] ) + prio ) ;
cl = htb_lookup_leaf ( q - > row [ level ] + prio , prio ,
q - > ptr [ level ] + prio ,
q - > last_ptr_id [ level ] + prio ) ;
2005-04-17 02:20:36 +04:00
} while ( cl ! = start ) ;
if ( likely ( skb ! = NULL ) ) {
if ( ( cl - > un . leaf . deficit [ level ] - = skb - > len ) < 0 ) {
cl - > un . leaf . deficit [ level ] + = cl - > un . leaf . quantum ;
2006-08-11 10:35:16 +04:00
htb_next_rb_node ( ( level ? cl - > parent - > un . inner . ptr : q - >
ptr [ 0 ] ) + prio ) ;
2005-04-17 02:20:36 +04:00
}
/* this used to be after charge_class but this constelation
gives us slightly better performance */
if ( ! cl - > un . leaf . q - > q . qlen )
2006-08-11 10:35:16 +04:00
htb_deactivate ( q , cl ) ;
htb_charge_class ( q , cl , level , skb - > len ) ;
2005-04-17 02:20:36 +04:00
}
return skb ;
}
2006-08-11 10:35:16 +04:00
static void htb_delay_by ( struct Qdisc * sch , long delay )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2006-08-11 10:35:16 +04:00
if ( delay < = 0 )
delay = 1 ;
if ( unlikely ( delay > 5 * HZ ) ) {
2005-04-17 02:20:36 +04:00
if ( net_ratelimit ( ) )
printk ( KERN_INFO " HTB delay %ld > 5sec \n " , delay ) ;
2006-08-11 10:35:16 +04:00
delay = 5 * HZ ;
2005-04-17 02:20:36 +04:00
}
/* why don't use jiffies here ? because expires can be in past */
mod_timer ( & q - > timer , q - > jiffies + delay ) ;
sch - > flags | = TCQ_F_THROTTLED ;
sch - > qstats . overlimits + + ;
}
static struct sk_buff * htb_dequeue ( struct Qdisc * sch )
{
struct sk_buff * skb = NULL ;
struct htb_sched * q = qdisc_priv ( sch ) ;
int level ;
long min_delay ;
q - > jiffies = jiffies ;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
2006-08-11 10:35:16 +04:00
skb = __skb_dequeue ( & q - > direct_queue ) ;
if ( skb ! = NULL ) {
2005-04-17 02:20:36 +04:00
sch - > flags & = ~ TCQ_F_THROTTLED ;
sch - > q . qlen - - ;
return skb ;
}
2006-08-11 10:35:16 +04:00
if ( ! sch - > q . qlen )
goto fin ;
2005-04-17 02:20:36 +04:00
PSCHED_GET_TIME ( q - > now ) ;
min_delay = LONG_MAX ;
q - > nwc_hit = 0 ;
for ( level = 0 ; level < TC_HTB_MAXDEPTH ; level + + ) {
/* common case optimization - skip event handler quickly */
int m ;
long delay ;
if ( time_after_eq ( q - > jiffies , q - > near_ev_cache [ level ] ) ) {
2006-08-11 10:35:16 +04:00
delay = htb_do_events ( q , level ) ;
q - > near_ev_cache [ level ] =
q - > jiffies + ( delay ? delay : HZ ) ;
2005-04-17 02:20:36 +04:00
} else
2006-08-11 10:35:16 +04:00
delay = q - > near_ev_cache [ level ] - q - > jiffies ;
if ( delay & & min_delay > delay )
2005-04-17 02:20:36 +04:00
min_delay = delay ;
m = ~ q - > row_mask [ level ] ;
while ( m ! = ( int ) ( - 1 ) ) {
2006-08-11 10:35:16 +04:00
int prio = ffz ( m ) ;
2005-04-17 02:20:36 +04:00
m | = 1 < < prio ;
2006-08-11 10:35:16 +04:00
skb = htb_dequeue_tree ( q , prio , level ) ;
2005-04-17 02:20:36 +04:00
if ( likely ( skb ! = NULL ) ) {
sch - > q . qlen - - ;
sch - > flags & = ~ TCQ_F_THROTTLED ;
goto fin ;
}
}
}
2006-08-11 10:35:16 +04:00
htb_delay_by ( sch , min_delay > 5 * HZ ? 5 * HZ : min_delay ) ;
2005-04-17 02:20:36 +04:00
fin :
return skb ;
}
/* try to drop from each class (by prio) until one succeed */
2006-08-11 10:35:16 +04:00
static unsigned int htb_drop ( struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
int prio ;
for ( prio = TC_HTB_NUMPRIO - 1 ; prio > = 0 ; prio - - ) {
struct list_head * p ;
2006-08-11 10:35:16 +04:00
list_for_each ( p , q - > drops + prio ) {
2005-04-17 02:20:36 +04:00
struct htb_class * cl = list_entry ( p , struct htb_class ,
un . leaf . drop_list ) ;
unsigned int len ;
2006-08-11 10:35:16 +04:00
if ( cl - > un . leaf . q - > ops - > drop & &
( len = cl - > un . leaf . q - > ops - > drop ( cl - > un . leaf . q ) ) ) {
2005-04-17 02:20:36 +04:00
sch - > q . qlen - - ;
if ( ! cl - > un . leaf . q - > q . qlen )
2006-08-11 10:35:16 +04:00
htb_deactivate ( q , cl ) ;
2005-04-17 02:20:36 +04:00
return len ;
}
}
}
return 0 ;
}
/* reset all classes */
/* always caled under BH & queue lock */
2006-08-11 10:35:16 +04:00
static void htb_reset ( struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
int i ;
for ( i = 0 ; i < HTB_HSIZE ; i + + ) {
2006-08-11 10:35:38 +04:00
struct hlist_node * p ;
struct htb_class * cl ;
hlist_for_each_entry ( cl , p , q - > hash + i , hlist ) {
2005-04-17 02:20:36 +04:00
if ( cl - > level )
2006-08-11 10:35:16 +04:00
memset ( & cl - > un . inner , 0 , sizeof ( cl - > un . inner ) ) ;
2005-04-17 02:20:36 +04:00
else {
2006-08-11 10:35:16 +04:00
if ( cl - > un . leaf . q )
2005-04-17 02:20:36 +04:00
qdisc_reset ( cl - > un . leaf . q ) ;
INIT_LIST_HEAD ( & cl - > un . leaf . drop_list ) ;
}
cl - > prio_activity = 0 ;
cl - > cmode = HTB_CAN_SEND ;
}
}
sch - > flags & = ~ TCQ_F_THROTTLED ;
del_timer ( & q - > timer ) ;
__skb_queue_purge ( & q - > direct_queue ) ;
sch - > q . qlen = 0 ;
2006-08-11 10:35:16 +04:00
memset ( q - > row , 0 , sizeof ( q - > row ) ) ;
memset ( q - > row_mask , 0 , sizeof ( q - > row_mask ) ) ;
memset ( q - > wait_pq , 0 , sizeof ( q - > wait_pq ) ) ;
memset ( q - > ptr , 0 , sizeof ( q - > ptr ) ) ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < TC_HTB_NUMPRIO ; i + + )
2006-08-11 10:35:16 +04:00
INIT_LIST_HEAD ( q - > drops + i ) ;
2005-04-17 02:20:36 +04:00
}
static int htb_init ( struct Qdisc * sch , struct rtattr * opt )
{
struct htb_sched * q = qdisc_priv ( sch ) ;
struct rtattr * tb [ TCA_HTB_INIT ] ;
struct tc_htb_glob * gopt ;
int i ;
if ( ! opt | | rtattr_parse_nested ( tb , TCA_HTB_INIT , opt ) | |
2006-08-11 10:35:16 +04:00
tb [ TCA_HTB_INIT - 1 ] = = NULL | |
RTA_PAYLOAD ( tb [ TCA_HTB_INIT - 1 ] ) < sizeof ( * gopt ) ) {
2005-04-17 02:20:36 +04:00
printk ( KERN_ERR " HTB: hey probably you have bad tc tool ? \n " ) ;
return - EINVAL ;
}
2006-08-11 10:35:16 +04:00
gopt = RTA_DATA ( tb [ TCA_HTB_INIT - 1 ] ) ;
2005-04-17 02:20:36 +04:00
if ( gopt - > version ! = HTB_VER > > 16 ) {
2006-08-11 10:35:16 +04:00
printk ( KERN_ERR
" HTB: need tc/htb version %d (minor is %d), you have %d \n " ,
HTB_VER > > 16 , HTB_VER & 0xffff , gopt - > version ) ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
}
INIT_LIST_HEAD ( & q - > root ) ;
for ( i = 0 ; i < HTB_HSIZE ; i + + )
2006-08-11 10:35:38 +04:00
INIT_HLIST_HEAD ( q - > hash + i ) ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < TC_HTB_NUMPRIO ; i + + )
2006-08-11 10:35:16 +04:00
INIT_LIST_HEAD ( q - > drops + i ) ;
2005-04-17 02:20:36 +04:00
init_timer ( & q - > timer ) ;
skb_queue_head_init ( & q - > direct_queue ) ;
q - > direct_qlen = sch - > dev - > tx_queue_len ;
2006-08-11 10:35:16 +04:00
if ( q - > direct_qlen < 2 ) /* some devices have zero tx_queue_len */
2005-04-17 02:20:36 +04:00
q - > direct_qlen = 2 ;
q - > timer . function = htb_timer ;
q - > timer . data = ( unsigned long ) sch ;
# ifdef HTB_RATECM
init_timer ( & q - > rttim ) ;
q - > rttim . function = htb_rate_timer ;
q - > rttim . data = ( unsigned long ) sch ;
q - > rttim . expires = jiffies + HZ ;
add_timer ( & q - > rttim ) ;
# endif
if ( ( q - > rate2quantum = gopt - > rate2quantum ) < 1 )
q - > rate2quantum = 1 ;
q - > defcls = gopt - > defcls ;
return 0 ;
}
static int htb_dump ( struct Qdisc * sch , struct sk_buff * skb )
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2006-08-11 10:35:16 +04:00
unsigned char * b = skb - > tail ;
2005-04-17 02:20:36 +04:00
struct rtattr * rta ;
struct tc_htb_glob gopt ;
2006-08-11 10:33:16 +04:00
spin_lock_bh ( & sch - > dev - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
gopt . direct_pkts = q - > direct_pkts ;
gopt . version = HTB_VER ;
gopt . rate2quantum = q - > rate2quantum ;
gopt . defcls = q - > defcls ;
2006-08-11 10:31:08 +04:00
gopt . debug = 0 ;
2006-08-11 10:35:16 +04:00
rta = ( struct rtattr * ) b ;
2005-04-17 02:20:36 +04:00
RTA_PUT ( skb , TCA_OPTIONS , 0 , NULL ) ;
RTA_PUT ( skb , TCA_HTB_INIT , sizeof ( gopt ) , & gopt ) ;
rta - > rta_len = skb - > tail - b ;
2006-08-11 10:33:16 +04:00
spin_unlock_bh ( & sch - > dev - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
return skb - > len ;
rtattr_failure :
2006-08-11 10:33:16 +04:00
spin_unlock_bh ( & sch - > dev - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
skb_trim ( skb , skb - > tail - skb - > data ) ;
return - 1 ;
}
static int htb_dump_class ( struct Qdisc * sch , unsigned long arg ,
2006-08-11 10:35:16 +04:00
struct sk_buff * skb , struct tcmsg * tcm )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
unsigned char * b = skb - > tail ;
2005-04-17 02:20:36 +04:00
struct rtattr * rta ;
struct tc_htb_opt opt ;
2006-08-11 10:33:16 +04:00
spin_lock_bh ( & sch - > dev - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
tcm - > tcm_parent = cl - > parent ? cl - > parent - > classid : TC_H_ROOT ;
tcm - > tcm_handle = cl - > classid ;
if ( ! cl - > level & & cl - > un . leaf . q )
tcm - > tcm_info = cl - > un . leaf . q - > handle ;
2006-08-11 10:35:16 +04:00
rta = ( struct rtattr * ) b ;
2005-04-17 02:20:36 +04:00
RTA_PUT ( skb , TCA_OPTIONS , 0 , NULL ) ;
2006-08-11 10:35:16 +04:00
memset ( & opt , 0 , sizeof ( opt ) ) ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
opt . rate = cl - > rate - > rate ;
opt . buffer = cl - > buffer ;
opt . ceil = cl - > ceil - > rate ;
opt . cbuffer = cl - > cbuffer ;
opt . quantum = cl - > un . leaf . quantum ;
opt . prio = cl - > un . leaf . prio ;
opt . level = cl - > level ;
2005-04-17 02:20:36 +04:00
RTA_PUT ( skb , TCA_HTB_PARMS , sizeof ( opt ) , & opt ) ;
rta - > rta_len = skb - > tail - b ;
2006-08-11 10:33:16 +04:00
spin_unlock_bh ( & sch - > dev - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
return skb - > len ;
rtattr_failure :
2006-08-11 10:33:16 +04:00
spin_unlock_bh ( & sch - > dev - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
skb_trim ( skb , b - skb - > data ) ;
return - 1 ;
}
static int
2006-08-11 10:35:16 +04:00
htb_dump_class_stats ( struct Qdisc * sch , unsigned long arg , struct gnet_dump * d )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2005-04-17 02:20:36 +04:00
# ifdef HTB_RATECM
2006-08-11 10:35:16 +04:00
cl - > rate_est . bps = cl - > rate_bytes / ( HTB_EWMAC * HTB_HSIZE ) ;
cl - > rate_est . pps = cl - > rate_packets / ( HTB_EWMAC * HTB_HSIZE ) ;
2005-04-17 02:20:36 +04:00
# endif
if ( ! cl - > level & & cl - > un . leaf . q )
cl - > qstats . qlen = cl - > un . leaf . q - > q . qlen ;
cl - > xstats . tokens = cl - > tokens ;
cl - > xstats . ctokens = cl - > ctokens ;
if ( gnet_stats_copy_basic ( d , & cl - > bstats ) < 0 | |
gnet_stats_copy_rate_est ( d , & cl - > rate_est ) < 0 | |
gnet_stats_copy_queue ( d , & cl - > qstats ) < 0 )
return - 1 ;
return gnet_stats_copy_app ( d , & cl - > xstats , sizeof ( cl - > xstats ) ) ;
}
static int htb_graft ( struct Qdisc * sch , unsigned long arg , struct Qdisc * new ,
2006-08-11 10:35:16 +04:00
struct Qdisc * * old )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2005-04-17 02:20:36 +04:00
if ( cl & & ! cl - > level ) {
2006-08-11 10:35:16 +04:00
if ( new = = NULL & & ( new = qdisc_create_dflt ( sch - > dev ,
& pfifo_qdisc_ops ) )
= = NULL )
return - ENOBUFS ;
2005-04-17 02:20:36 +04:00
sch_tree_lock ( sch ) ;
if ( ( * old = xchg ( & cl - > un . leaf . q , new ) ) ! = NULL ) {
if ( cl - > prio_activity )
2006-08-11 10:35:16 +04:00
htb_deactivate ( qdisc_priv ( sch ) , cl ) ;
2005-04-17 02:20:36 +04:00
/* TODO: is it correct ? Why CBQ doesn't do it ? */
2006-08-11 10:35:16 +04:00
sch - > q . qlen - = ( * old ) - > q . qlen ;
2005-04-17 02:20:36 +04:00
qdisc_reset ( * old ) ;
}
sch_tree_unlock ( sch ) ;
return 0 ;
}
return - ENOENT ;
}
2006-08-11 10:35:16 +04:00
static struct Qdisc * htb_leaf ( struct Qdisc * sch , unsigned long arg )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2005-04-17 02:20:36 +04:00
return ( cl & & ! cl - > level ) ? cl - > un . leaf . q : NULL ;
}
static unsigned long htb_get ( struct Qdisc * sch , u32 classid )
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = htb_find ( classid , sch ) ;
if ( cl )
2005-04-17 02:20:36 +04:00
cl - > refcnt + + ;
return ( unsigned long ) cl ;
}
static void htb_destroy_filters ( struct tcf_proto * * fl )
{
struct tcf_proto * tp ;
while ( ( tp = * fl ) ! = NULL ) {
* fl = tp - > next ;
tcf_destroy ( tp ) ;
}
}
2006-08-11 10:35:16 +04:00
static void htb_destroy_class ( struct Qdisc * sch , struct htb_class * cl )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
if ( ! cl - > level ) {
BUG_TRAP ( cl - > un . leaf . q ) ;
sch - > q . qlen - = cl - > un . leaf . q - > q . qlen ;
qdisc_destroy ( cl - > un . leaf . q ) ;
}
qdisc_put_rtab ( cl - > rate ) ;
qdisc_put_rtab ( cl - > ceil ) ;
2006-08-11 10:35:16 +04:00
htb_destroy_filters ( & cl - > filter_list ) ;
while ( ! list_empty ( & cl - > children ) )
htb_destroy_class ( sch , list_entry ( cl - > children . next ,
struct htb_class , sibling ) ) ;
2005-04-17 02:20:36 +04:00
/* note: this delete may happen twice (see htb_delete) */
2006-08-11 10:35:38 +04:00
if ( ! hlist_unhashed ( & cl - > hlist ) )
hlist_del ( & cl - > hlist ) ;
2005-04-17 02:20:36 +04:00
list_del ( & cl - > sibling ) ;
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
if ( cl - > prio_activity )
2006-08-11 10:35:16 +04:00
htb_deactivate ( q , cl ) ;
2005-04-17 02:20:36 +04:00
if ( cl - > cmode ! = HTB_CAN_SEND )
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( & cl - > pq_node , q - > wait_pq + cl - > level ) ;
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
kfree ( cl ) ;
}
/* always caled under BH & queue lock */
2006-08-11 10:35:16 +04:00
static void htb_destroy ( struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2006-08-11 10:35:16 +04:00
del_timer_sync ( & q - > timer ) ;
2005-04-17 02:20:36 +04:00
# ifdef HTB_RATECM
2006-08-11 10:35:16 +04:00
del_timer_sync ( & q - > rttim ) ;
2005-04-17 02:20:36 +04:00
# endif
/* This line used to be after htb_destroy_class call below
and surprisingly it worked in 2.4 . But it must precede it
because filter need its target class alive to be able to call
unbind_filter on it ( without Oops ) . */
htb_destroy_filters ( & q - > filter_list ) ;
2006-08-11 10:35:16 +04:00
while ( ! list_empty ( & q - > root ) )
htb_destroy_class ( sch , list_entry ( q - > root . next ,
struct htb_class , sibling ) ) ;
2005-04-17 02:20:36 +04:00
__skb_queue_purge ( & q - > direct_queue ) ;
}
static int htb_delete ( struct Qdisc * sch , unsigned long arg )
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2005-04-17 02:20:36 +04:00
// TODO: why don't allow to delete subtree ? references ? does
// tc subsys quarantee us that in htb_destroy it holds no class
// refs so that we can remove children safely there ?
if ( ! list_empty ( & cl - > children ) | | cl - > filter_cnt )
return - EBUSY ;
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
sch_tree_lock ( sch ) ;
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
/* delete from hash and active; remainder in destroy_class */
2006-08-11 10:35:38 +04:00
if ( ! hlist_unhashed ( & cl - > hlist ) )
hlist_del ( & cl - > hlist ) ;
2005-04-17 02:20:36 +04:00
if ( cl - > prio_activity )
2006-08-11 10:35:16 +04:00
htb_deactivate ( q , cl ) ;
2005-04-17 02:20:36 +04:00
if ( - - cl - > refcnt = = 0 )
2006-08-11 10:35:16 +04:00
htb_destroy_class ( sch , cl ) ;
2005-04-17 02:20:36 +04:00
sch_tree_unlock ( sch ) ;
return 0 ;
}
static void htb_put ( struct Qdisc * sch , unsigned long arg )
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2005-04-17 02:20:36 +04:00
if ( - - cl - > refcnt = = 0 )
2006-08-11 10:35:16 +04:00
htb_destroy_class ( sch , cl ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
static int htb_change_class ( struct Qdisc * sch , u32 classid ,
u32 parentid , struct rtattr * * tca ,
unsigned long * arg )
2005-04-17 02:20:36 +04:00
{
int err = - EINVAL ;
struct htb_sched * q = qdisc_priv ( sch ) ;
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) * arg , * parent ;
struct rtattr * opt = tca [ TCA_OPTIONS - 1 ] ;
2005-04-17 02:20:36 +04:00
struct qdisc_rate_table * rtab = NULL , * ctab = NULL ;
struct rtattr * tb [ TCA_HTB_RTAB ] ;
struct tc_htb_opt * hopt ;
/* extract all subattrs from opt attr */
if ( ! opt | | rtattr_parse_nested ( tb , TCA_HTB_RTAB , opt ) | |
2006-08-11 10:35:16 +04:00
tb [ TCA_HTB_PARMS - 1 ] = = NULL | |
RTA_PAYLOAD ( tb [ TCA_HTB_PARMS - 1 ] ) < sizeof ( * hopt ) )
2005-04-17 02:20:36 +04:00
goto failure ;
2006-08-11 10:35:16 +04:00
parent = parentid = = TC_H_ROOT ? NULL : htb_find ( parentid , sch ) ;
hopt = RTA_DATA ( tb [ TCA_HTB_PARMS - 1 ] ) ;
2006-08-11 10:31:08 +04:00
2006-08-11 10:35:16 +04:00
rtab = qdisc_get_rtab ( & hopt - > rate , tb [ TCA_HTB_RTAB - 1 ] ) ;
ctab = qdisc_get_rtab ( & hopt - > ceil , tb [ TCA_HTB_CTAB - 1 ] ) ;
if ( ! rtab | | ! ctab )
goto failure ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
if ( ! cl ) { /* new class */
2005-04-17 02:20:36 +04:00
struct Qdisc * new_q ;
2006-08-11 10:36:01 +04:00
int prio ;
2005-04-17 02:20:36 +04:00
/* check for valid classid */
2006-08-11 10:35:16 +04:00
if ( ! classid | | TC_H_MAJ ( classid ^ sch - > handle )
| | htb_find ( classid , sch ) )
2005-04-17 02:20:36 +04:00
goto failure ;
/* check maximal depth */
if ( parent & & parent - > parent & & parent - > parent - > level < 2 ) {
printk ( KERN_ERR " htb: tree is too deep \n " ) ;
goto failure ;
}
err = - ENOBUFS ;
2006-07-22 01:51:30 +04:00
if ( ( cl = kzalloc ( sizeof ( * cl ) , GFP_KERNEL ) ) = = NULL )
2005-04-17 02:20:36 +04:00
goto failure ;
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
cl - > refcnt = 1 ;
INIT_LIST_HEAD ( & cl - > sibling ) ;
2006-08-11 10:35:38 +04:00
INIT_HLIST_NODE ( & cl - > hlist ) ;
2005-04-17 02:20:36 +04:00
INIT_LIST_HEAD ( & cl - > children ) ;
INIT_LIST_HEAD ( & cl - > un . leaf . drop_list ) ;
2006-08-11 10:36:01 +04:00
RB_CLEAR_NODE ( & cl - > pq_node ) ;
for ( prio = 0 ; prio < TC_HTB_NUMPRIO ; prio + + )
RB_CLEAR_NODE ( & cl - > node [ prio ] ) ;
2005-04-17 02:20:36 +04:00
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
so that can ' t be used inside of sch_tree_lock
- - thanks to Karlis Peisenieks */
new_q = qdisc_create_dflt ( sch - > dev , & pfifo_qdisc_ops ) ;
sch_tree_lock ( sch ) ;
if ( parent & & ! parent - > level ) {
/* turn parent into inner node */
sch - > q . qlen - = parent - > un . leaf . q - > q . qlen ;
2006-08-11 10:35:16 +04:00
qdisc_destroy ( parent - > un . leaf . q ) ;
if ( parent - > prio_activity )
htb_deactivate ( q , parent ) ;
2005-04-17 02:20:36 +04:00
/* remove from evt list because of level change */
if ( parent - > cmode ! = HTB_CAN_SEND ) {
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( & parent - > pq_node , q - > wait_pq ) ;
2005-04-17 02:20:36 +04:00
parent - > cmode = HTB_CAN_SEND ;
}
parent - > level = ( parent - > parent ? parent - > parent - > level
2006-08-11 10:35:16 +04:00
: TC_HTB_MAXDEPTH ) - 1 ;
memset ( & parent - > un . inner , 0 , sizeof ( parent - > un . inner ) ) ;
2005-04-17 02:20:36 +04:00
}
/* leaf (we) needs elementary qdisc */
cl - > un . leaf . q = new_q ? new_q : & noop_qdisc ;
2006-08-11 10:35:16 +04:00
cl - > classid = classid ;
cl - > parent = parent ;
2005-04-17 02:20:36 +04:00
/* set class to be in HTB_CAN_SEND state */
cl - > tokens = hopt - > buffer ;
cl - > ctokens = hopt - > cbuffer ;
2006-08-11 10:35:16 +04:00
cl - > mbuffer = PSCHED_JIFFIE2US ( HZ * 60 ) ; /* 1min */
2005-04-17 02:20:36 +04:00
PSCHED_GET_TIME ( cl - > t_c ) ;
cl - > cmode = HTB_CAN_SEND ;
/* attach to the hash list and parent's family */
2006-08-11 10:35:38 +04:00
hlist_add_head ( & cl - > hlist , q - > hash + htb_hash ( classid ) ) ;
2006-08-11 10:35:16 +04:00
list_add_tail ( & cl - > sibling ,
parent ? & parent - > children : & q - > root ) ;
} else
sch_tree_lock ( sch ) ;
2005-04-17 02:20:36 +04:00
/* it used to be a nasty bug here, we have to check that node
2006-08-11 10:35:16 +04:00
is really leaf before changing cl - > un . leaf ! */
2005-04-17 02:20:36 +04:00
if ( ! cl - > level ) {
cl - > un . leaf . quantum = rtab - > rate . rate / q - > rate2quantum ;
if ( ! hopt - > quantum & & cl - > un . leaf . quantum < 1000 ) {
2006-08-11 10:35:16 +04:00
printk ( KERN_WARNING
" HTB: quantum of class %X is small. Consider r2q change. \n " ,
cl - > classid ) ;
2005-04-17 02:20:36 +04:00
cl - > un . leaf . quantum = 1000 ;
}
if ( ! hopt - > quantum & & cl - > un . leaf . quantum > 200000 ) {
2006-08-11 10:35:16 +04:00
printk ( KERN_WARNING
" HTB: quantum of class %X is big. Consider r2q change. \n " ,
cl - > classid ) ;
2005-04-17 02:20:36 +04:00
cl - > un . leaf . quantum = 200000 ;
}
if ( hopt - > quantum )
cl - > un . leaf . quantum = hopt - > quantum ;
if ( ( cl - > un . leaf . prio = hopt - > prio ) > = TC_HTB_NUMPRIO )
cl - > un . leaf . prio = TC_HTB_NUMPRIO - 1 ;
}
cl - > buffer = hopt - > buffer ;
cl - > cbuffer = hopt - > cbuffer ;
2006-08-11 10:35:16 +04:00
if ( cl - > rate )
qdisc_put_rtab ( cl - > rate ) ;
cl - > rate = rtab ;
if ( cl - > ceil )
qdisc_put_rtab ( cl - > ceil ) ;
cl - > ceil = ctab ;
2005-04-17 02:20:36 +04:00
sch_tree_unlock ( sch ) ;
* arg = ( unsigned long ) cl ;
return 0 ;
failure :
2006-08-11 10:35:16 +04:00
if ( rtab )
qdisc_put_rtab ( rtab ) ;
if ( ctab )
qdisc_put_rtab ( ctab ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
static struct tcf_proto * * htb_find_tcf ( struct Qdisc * sch , unsigned long arg )
{
struct htb_sched * q = qdisc_priv ( sch ) ;
struct htb_class * cl = ( struct htb_class * ) arg ;
struct tcf_proto * * fl = cl ? & cl - > filter_list : & q - > filter_list ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
return fl ;
}
static unsigned long htb_bind_filter ( struct Qdisc * sch , unsigned long parent ,
2006-08-11 10:35:16 +04:00
u32 classid )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2006-08-11 10:35:16 +04:00
struct htb_class * cl = htb_find ( classid , sch ) ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
/*if (cl && !cl->level) return 0;
2006-08-11 10:35:16 +04:00
The line above used to be there to prevent attaching filters to
leaves . But at least tc_index filter uses this just to get class
for other reasons so that we have to allow for it .
- - - -
19.6 .2002 As Werner explained it is ok - bind filter is just
another way to " lock " the class - unlike " get " this lock can
be broken by class during destroy IIUC .
2005-04-17 02:20:36 +04:00
*/
2006-08-11 10:35:16 +04:00
if ( cl )
cl - > filter_cnt + + ;
else
2005-04-17 02:20:36 +04:00
q - > filter_cnt + + ;
return ( unsigned long ) cl ;
}
static void htb_unbind_filter ( struct Qdisc * sch , unsigned long arg )
{
struct htb_sched * q = qdisc_priv ( sch ) ;
struct htb_class * cl = ( struct htb_class * ) arg ;
2006-08-11 10:31:08 +04:00
2006-08-11 10:35:16 +04:00
if ( cl )
cl - > filter_cnt - - ;
else
2005-04-17 02:20:36 +04:00
q - > filter_cnt - - ;
}
static void htb_walk ( struct Qdisc * sch , struct qdisc_walker * arg )
{
struct htb_sched * q = qdisc_priv ( sch ) ;
int i ;
if ( arg - > stop )
return ;
for ( i = 0 ; i < HTB_HSIZE ; i + + ) {
2006-08-11 10:35:38 +04:00
struct hlist_node * p ;
struct htb_class * cl ;
hlist_for_each_entry ( cl , p , q - > hash + i , hlist ) {
2005-04-17 02:20:36 +04:00
if ( arg - > count < arg - > skip ) {
arg - > count + + ;
continue ;
}
if ( arg - > fn ( sch , ( unsigned long ) cl , arg ) < 0 ) {
arg - > stop = 1 ;
return ;
}
arg - > count + + ;
}
}
}
static struct Qdisc_class_ops htb_class_ops = {
. graft = htb_graft ,
. leaf = htb_leaf ,
. get = htb_get ,
. put = htb_put ,
. change = htb_change_class ,
. delete = htb_delete ,
. walk = htb_walk ,
. tcf_chain = htb_find_tcf ,
. bind_tcf = htb_bind_filter ,
. unbind_tcf = htb_unbind_filter ,
. dump = htb_dump_class ,
. dump_stats = htb_dump_class_stats ,
} ;
static struct Qdisc_ops htb_qdisc_ops = {
. next = NULL ,
. cl_ops = & htb_class_ops ,
. id = " htb " ,
. priv_size = sizeof ( struct htb_sched ) ,
. enqueue = htb_enqueue ,
. dequeue = htb_dequeue ,
. requeue = htb_requeue ,
. drop = htb_drop ,
. init = htb_init ,
. reset = htb_reset ,
. destroy = htb_destroy ,
. change = NULL /* htb_change */ ,
. dump = htb_dump ,
. owner = THIS_MODULE ,
} ;
static int __init htb_module_init ( void )
{
2006-08-11 10:35:16 +04:00
return register_qdisc ( & htb_qdisc_ops ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
static void __exit htb_module_exit ( void )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
unregister_qdisc ( & htb_qdisc_ops ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
module_init ( htb_module_init )
module_exit ( htb_module_exit )
MODULE_LICENSE ( " GPL " ) ;