2006-08-11 10:35:16 +04:00
/*
2005-04-17 02:20:36 +04:00
* net / sched / sch_htb . c Hierarchical token bucket , feed tree version
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
* Authors : Martin Devera , < devik @ cdi . cz >
*
* Credits ( in time order ) for older HTB versions :
* Stef Coene < stef . coene @ docum . org >
* HTB support at LARTC mailing list
2007-02-09 17:25:16 +03:00
* Ondrej Kraus , < krauso @ barr . cz >
2005-04-17 02:20:36 +04:00
* found missing INIT_QDISC ( htb )
* Vladimir Smelhaus , Aamer Akhter , Bert Hubert
* helped a lot to locate nasty class stall bug
* Andi Kleen , Jamal Hadi , Bert Hubert
* code review and helpful comments on shaping
* Tomasz Wrona , < tw @ eter . tym . pl >
* created test case so that I was able to fix nasty bug
* Wilfried Weissmann
* spotted bug in dequeue code and helped with fix
* Jiri Fojtasek
* fixed requeue routine
* and many others . thanks .
*/
# include <linux/module.h>
2008-06-17 03:39:32 +04:00
# include <linux/moduleparam.h>
2005-04-17 02:20:36 +04:00
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/string.h>
# include <linux/errno.h>
# include <linux/skbuff.h>
# include <linux/list.h>
# include <linux/compiler.h>
2007-07-03 09:49:07 +04:00
# include <linux/rbtree.h>
2009-02-01 12:13:22 +03:00
# include <linux/workqueue.h>
2007-03-26 10:06:12 +04:00
# include <net/netlink.h>
2005-04-17 02:20:36 +04:00
# include <net/pkt_sched.h>
/* HTB algorithm.
Author : devik @ cdi . cz
= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
HTB is like TBF with multiple classes . It is also similar to CBQ because
2007-02-09 17:25:16 +03:00
it allows to assign priority to each class in hierarchy .
2005-04-17 02:20:36 +04:00
In fact it is another implementation of Floyd ' s formal sharing .
Levels :
2007-02-09 17:25:16 +03:00
Each class is assigned level . Leaf has ALWAYS level 0 and root
2005-04-17 02:20:36 +04:00
classes have level TC_HTB_MAXDEPTH - 1. Interior nodes has level
one less than their parent .
*/
2008-06-17 03:39:32 +04:00
static int htb_hysteresis __read_mostly = 0 ; /* whether to use mode hysteresis for speedup */
2006-08-11 10:35:16 +04:00
# define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
2005-04-17 02:20:36 +04:00
# if HTB_VER >> 16 != TC_HTB_PROTOVER
# error "Mismatched sch_htb.c and pkt_sch.h"
# endif
2008-06-17 03:39:32 +04:00
/* Module parameter and sysfs export */
module_param ( htb_hysteresis , int , 0640 ) ;
MODULE_PARM_DESC ( htb_hysteresis , " Hysteresis mode, less CPU load, less accurate " ) ;
2005-04-17 02:20:36 +04:00
/* used internaly to keep status of single class */
enum htb_cmode {
2006-08-11 10:35:16 +04:00
HTB_CANT_SEND , /* class can't send and can't borrow */
HTB_MAY_BORROW , /* class can't send but may borrow */
HTB_CAN_SEND /* class can send */
2005-04-17 02:20:36 +04:00
} ;
/* interior & leaf nodes; props specific to leaves are marked L: */
2006-08-11 10:35:16 +04:00
struct htb_class {
2008-07-06 10:22:35 +04:00
struct Qdisc_class_common common ;
2006-08-11 10:35:16 +04:00
/* general class parameters */
struct gnet_stats_basic bstats ;
struct gnet_stats_queue qstats ;
struct gnet_stats_rate_est rate_est ;
struct tc_htb_xstats xstats ; /* our special stats */
int refcnt ; /* usage count of this class */
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
/* topology */
int level ; /* our level (see above) */
2008-07-06 10:22:53 +04:00
unsigned int children ;
2006-08-11 10:35:16 +04:00
struct htb_class * parent ; /* parent class */
2008-12-04 08:09:45 +03:00
int prio ; /* these two are used only by leaves... */
int quantum ; /* but stored for parent-to-leaf return */
2006-08-11 10:35:16 +04:00
union {
struct htb_class_leaf {
struct Qdisc * q ;
int deficit [ TC_HTB_MAXDEPTH ] ;
struct list_head drop_list ;
} leaf ;
struct htb_class_inner {
struct rb_root feed [ TC_HTB_NUMPRIO ] ; /* feed trees */
struct rb_node * ptr [ TC_HTB_NUMPRIO ] ; /* current class ptr */
/* When class changes from state 1->2 and disconnects from
parent ' s feed then we lost ptr value and start from the
first child again . Here we store classid of the
last valid ptr ( used when ptr is NULL ) . */
u32 last_ptr_id [ TC_HTB_NUMPRIO ] ;
} inner ;
} un ;
struct rb_node node [ TC_HTB_NUMPRIO ] ; /* node for self or feed tree */
struct rb_node pq_node ; /* node for event queue */
2007-03-16 11:22:39 +03:00
psched_time_t pq_key ;
2006-08-11 10:35:16 +04:00
int prio_activity ; /* for which prios are we active */
enum htb_cmode cmode ; /* current mode of the class */
/* class attached filters */
struct tcf_proto * filter_list ;
int filter_cnt ;
/* token bucket parameters */
struct qdisc_rate_table * rate ; /* rate table of the class itself */
struct qdisc_rate_table * ceil ; /* ceiling rate (limits borrows too) */
long buffer , cbuffer ; /* token bucket depth/rate */
psched_tdiff_t mbuffer ; /* max wait time */
long tokens , ctokens ; /* current number of tokens */
psched_time_t t_c ; /* checkpoint time */
2005-04-17 02:20:36 +04:00
} ;
2006-08-11 10:35:16 +04:00
struct htb_sched {
2008-07-06 10:22:35 +04:00
struct Qdisc_class_hash clhash ;
2006-08-11 10:35:38 +04:00
struct list_head drops [ TC_HTB_NUMPRIO ] ; /* active leaves (for drops) */
2006-08-11 10:35:16 +04:00
/* self list - roots of self generating tree */
struct rb_root row [ TC_HTB_MAXDEPTH ] [ TC_HTB_NUMPRIO ] ;
int row_mask [ TC_HTB_MAXDEPTH ] ;
struct rb_node * ptr [ TC_HTB_MAXDEPTH ] [ TC_HTB_NUMPRIO ] ;
u32 last_ptr_id [ TC_HTB_MAXDEPTH ] [ TC_HTB_NUMPRIO ] ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
/* self wait list - roots of wait PQs per row */
struct rb_root wait_pq [ TC_HTB_MAXDEPTH ] ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
/* time of nearest event per level (row) */
2007-03-16 11:22:39 +03:00
psched_time_t near_ev_cache [ TC_HTB_MAXDEPTH ] ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
int defcls ; /* class where unclassified flows go to */
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
/* filters for qdisc itself */
struct tcf_proto * filter_list ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
int rate2quantum ; /* quant = rate / rate2quantum */
psched_time_t now ; /* cached dequeue time */
2007-03-16 11:22:39 +03:00
struct qdisc_watchdog watchdog ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
/* non shaped skbs; let them go directly thru */
struct sk_buff_head direct_queue ;
int direct_qlen ; /* max qlen of above */
long direct_pkts ;
2009-02-01 12:13:05 +03:00
# define HTB_WARN_TOOMANYEVENTS 0x1
unsigned int warned ; /* only one warning */
2009-02-01 12:13:22 +03:00
struct work_struct work ;
2005-04-17 02:20:36 +04:00
} ;
/* find class in global hash table using given handle */
2006-08-11 10:35:16 +04:00
static inline struct htb_class * htb_find ( u32 handle , struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2008-07-06 10:22:35 +04:00
struct Qdisc_class_common * clc ;
2006-08-11 10:35:38 +04:00
2008-07-06 10:22:35 +04:00
clc = qdisc_class_find ( & q - > clhash , handle ) ;
if ( clc = = NULL )
2005-04-17 02:20:36 +04:00
return NULL ;
2008-07-06 10:22:35 +04:00
return container_of ( clc , struct htb_class , common ) ;
2005-04-17 02:20:36 +04:00
}
/**
* htb_classify - classify a packet into class
*
* It returns NULL if the packet should be dropped or - 1 if the packet
* should be passed directly thru . In all other cases leaf class is returned .
* We allow direct class selection by classid in priority . The we examine
* filters in qdisc and in inner nodes ( if higher filter points to the inner
* node ) . If we end up with classid MAJOR : 0 we enqueue the skb into special
2007-02-09 17:25:16 +03:00
* internal fifo ( direct ) . These packets then go directly thru . If we still
2005-04-17 02:20:36 +04:00
* have no valid leaf we try to use MAJOR : default leaf . It still unsuccessfull
* then finish and return direct queue .
*/
# define HTB_DIRECT (struct htb_class*)-1
2006-08-11 10:35:16 +04:00
static struct htb_class * htb_classify ( struct sk_buff * skb , struct Qdisc * sch ,
int * qerr )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
struct htb_class * cl ;
struct tcf_result res ;
struct tcf_proto * tcf ;
int result ;
/* allow to select class by setting skb->priority to valid classid;
note that nfmark can be used too by attaching filter fw with no
rules in it */
if ( skb - > priority = = sch - > handle )
2006-08-11 10:35:16 +04:00
return HTB_DIRECT ; /* X:0 (direct flow) selected */
if ( ( cl = htb_find ( skb - > priority , sch ) ) ! = NULL & & cl - > level = = 0 )
2005-04-17 02:20:36 +04:00
return cl ;
2008-08-05 09:39:11 +04:00
* qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS ;
2005-04-17 02:20:36 +04:00
tcf = q - > filter_list ;
while ( tcf & & ( result = tc_classify ( skb , tcf , & res ) ) > = 0 ) {
# ifdef CONFIG_NET_CLS_ACT
switch ( result ) {
case TC_ACT_QUEUED :
2006-08-11 10:35:16 +04:00
case TC_ACT_STOLEN :
2008-08-05 09:31:03 +04:00
* qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN ;
2005-04-17 02:20:36 +04:00
case TC_ACT_SHOT :
return NULL ;
}
# endif
2006-08-11 10:35:16 +04:00
if ( ( cl = ( void * ) res . class ) = = NULL ) {
2005-04-17 02:20:36 +04:00
if ( res . classid = = sch - > handle )
2006-08-11 10:35:16 +04:00
return HTB_DIRECT ; /* X:0 (direct flow) */
if ( ( cl = htb_find ( res . classid , sch ) ) = = NULL )
break ; /* filter selected invalid classid */
2005-04-17 02:20:36 +04:00
}
if ( ! cl - > level )
2006-08-11 10:35:16 +04:00
return cl ; /* we hit leaf; return it */
2005-04-17 02:20:36 +04:00
/* we have got inner class; apply inner filter chain */
tcf = cl - > filter_list ;
}
/* classification failed; try to use default class */
2006-08-11 10:35:16 +04:00
cl = htb_find ( TC_H_MAKE ( TC_H_MAJ ( sch - > handle ) , q - > defcls ) , sch ) ;
2005-04-17 02:20:36 +04:00
if ( ! cl | | cl - > level )
2006-08-11 10:35:16 +04:00
return HTB_DIRECT ; /* bad default .. this is safe bet */
2005-04-17 02:20:36 +04:00
return cl ;
}
/**
* htb_add_to_id_tree - adds class to the round robin list
*
* Routine adds class to the list ( actually tree ) sorted by classid .
* Make sure that class is not already on such list for given prio .
*/
2006-08-11 10:35:16 +04:00
static void htb_add_to_id_tree ( struct rb_root * root ,
struct htb_class * cl , int prio )
2005-04-17 02:20:36 +04:00
{
struct rb_node * * p = & root - > rb_node , * parent = NULL ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
while ( * p ) {
2006-08-11 10:35:16 +04:00
struct htb_class * c ;
parent = * p ;
2005-04-17 02:20:36 +04:00
c = rb_entry ( parent , struct htb_class , node [ prio ] ) ;
2006-08-11 10:31:08 +04:00
2008-07-06 10:22:35 +04:00
if ( cl - > common . classid > c - > common . classid )
2005-04-17 02:20:36 +04:00
p = & parent - > rb_right ;
2006-08-11 10:35:16 +04:00
else
2005-04-17 02:20:36 +04:00
p = & parent - > rb_left ;
}
rb_link_node ( & cl - > node [ prio ] , parent , p ) ;
rb_insert_color ( & cl - > node [ prio ] , root ) ;
}
/**
* htb_add_to_wait_tree - adds class to the event queue with delay
*
* The class is added to priority event queue to indicate that class will
* change its mode in cl - > pq_key microseconds . Make sure that class is not
* already in the queue .
*/
2006-08-11 10:35:16 +04:00
static void htb_add_to_wait_tree ( struct htb_sched * q ,
struct htb_class * cl , long delay )
2005-04-17 02:20:36 +04:00
{
struct rb_node * * p = & q - > wait_pq [ cl - > level ] . rb_node , * parent = NULL ;
2006-08-11 10:31:08 +04:00
2007-03-16 11:22:39 +03:00
cl - > pq_key = q - > now + delay ;
if ( cl - > pq_key = = q - > now )
2005-04-17 02:20:36 +04:00
cl - > pq_key + + ;
/* update the nearest event cache */
2007-03-16 11:22:39 +03:00
if ( q - > near_ev_cache [ cl - > level ] > cl - > pq_key )
2005-04-17 02:20:36 +04:00
q - > near_ev_cache [ cl - > level ] = cl - > pq_key ;
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
while ( * p ) {
2006-08-11 10:35:16 +04:00
struct htb_class * c ;
parent = * p ;
2005-04-17 02:20:36 +04:00
c = rb_entry ( parent , struct htb_class , pq_node ) ;
2007-03-16 11:22:39 +03:00
if ( cl - > pq_key > = c - > pq_key )
2005-04-17 02:20:36 +04:00
p = & parent - > rb_right ;
2006-08-11 10:35:16 +04:00
else
2005-04-17 02:20:36 +04:00
p = & parent - > rb_left ;
}
rb_link_node ( & cl - > pq_node , parent , p ) ;
rb_insert_color ( & cl - > pq_node , & q - > wait_pq [ cl - > level ] ) ;
}
/**
* htb_next_rb_node - finds next node in binary tree
*
* When we are past last key we return NULL .
* Average complexity is 2 steps per call .
*/
2006-08-11 10:36:01 +04:00
static inline void htb_next_rb_node ( struct rb_node * * n )
2005-04-17 02:20:36 +04:00
{
* n = rb_next ( * n ) ;
}
/**
* htb_add_class_to_row - add class to its row
*
* The class is added to row at priorities marked in mask .
* It does nothing if mask = = 0.
*/
2006-08-11 10:35:16 +04:00
static inline void htb_add_class_to_row ( struct htb_sched * q ,
struct htb_class * cl , int mask )
2005-04-17 02:20:36 +04:00
{
q - > row_mask [ cl - > level ] | = mask ;
while ( mask ) {
int prio = ffz ( ~ mask ) ;
mask & = ~ ( 1 < < prio ) ;
2006-08-11 10:35:16 +04:00
htb_add_to_id_tree ( q - > row [ cl - > level ] + prio , cl , prio ) ;
2005-04-17 02:20:36 +04:00
}
}
2006-08-11 10:36:01 +04:00
/* If this triggers, it is a bug in this code, but it need not be fatal */
static void htb_safe_rb_erase ( struct rb_node * rb , struct rb_root * root )
{
2006-10-04 00:49:10 +04:00
if ( RB_EMPTY_NODE ( rb ) ) {
2006-08-11 10:36:01 +04:00
WARN_ON ( 1 ) ;
} else {
rb_erase ( rb , root ) ;
RB_CLEAR_NODE ( rb ) ;
}
}
2005-04-17 02:20:36 +04:00
/**
* htb_remove_class_from_row - removes class from its row
*
* The class is removed from row at priorities marked in mask .
* It does nothing if mask = = 0.
*/
2006-08-11 10:35:16 +04:00
static inline void htb_remove_class_from_row ( struct htb_sched * q ,
struct htb_class * cl , int mask )
2005-04-17 02:20:36 +04:00
{
int m = 0 ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
while ( mask ) {
int prio = ffz ( ~ mask ) ;
2006-08-11 10:36:01 +04:00
2005-04-17 02:20:36 +04:00
mask & = ~ ( 1 < < prio ) ;
2006-08-11 10:35:16 +04:00
if ( q - > ptr [ cl - > level ] [ prio ] = = cl - > node + prio )
htb_next_rb_node ( q - > ptr [ cl - > level ] + prio ) ;
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( cl - > node + prio , q - > row [ cl - > level ] + prio ) ;
2006-08-11 10:35:16 +04:00
if ( ! q - > row [ cl - > level ] [ prio ] . rb_node )
2005-04-17 02:20:36 +04:00
m | = 1 < < prio ;
}
q - > row_mask [ cl - > level ] & = ~ m ;
}
/**
* htb_activate_prios - creates active classe ' s feed chain
*
* The class is connected to ancestors and / or appropriate rows
2007-02-09 17:25:16 +03:00
* for priorities it is participating on . cl - > cmode must be new
2005-04-17 02:20:36 +04:00
* ( activated ) mode . It does nothing if cl - > prio_activity = = 0.
*/
2006-08-11 10:35:16 +04:00
static void htb_activate_prios ( struct htb_sched * q , struct htb_class * cl )
2005-04-17 02:20:36 +04:00
{
struct htb_class * p = cl - > parent ;
2006-08-11 10:35:16 +04:00
long m , mask = cl - > prio_activity ;
2005-04-17 02:20:36 +04:00
while ( cl - > cmode = = HTB_MAY_BORROW & & p & & mask ) {
2006-08-11 10:35:16 +04:00
m = mask ;
while ( m ) {
2005-04-17 02:20:36 +04:00
int prio = ffz ( ~ m ) ;
m & = ~ ( 1 < < prio ) ;
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
if ( p - > un . inner . feed [ prio ] . rb_node )
/* parent already has its feed in use so that
reset bit in mask as parent is already ok */
mask & = ~ ( 1 < < prio ) ;
2006-08-11 10:35:16 +04:00
htb_add_to_id_tree ( p - > un . inner . feed + prio , cl , prio ) ;
2005-04-17 02:20:36 +04:00
}
p - > prio_activity | = mask ;
2006-08-11 10:35:16 +04:00
cl = p ;
p = cl - > parent ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
}
if ( cl - > cmode = = HTB_CAN_SEND & & mask )
2006-08-11 10:35:16 +04:00
htb_add_class_to_row ( q , cl , mask ) ;
2005-04-17 02:20:36 +04:00
}
/**
* htb_deactivate_prios - remove class from feed chain
*
2007-02-09 17:25:16 +03:00
* cl - > cmode must represent old mode ( before deactivation ) . It does
2005-04-17 02:20:36 +04:00
* nothing if cl - > prio_activity = = 0. Class is removed from all feed
* chains and rows .
*/
static void htb_deactivate_prios ( struct htb_sched * q , struct htb_class * cl )
{
struct htb_class * p = cl - > parent ;
2006-08-11 10:35:16 +04:00
long m , mask = cl - > prio_activity ;
2005-04-17 02:20:36 +04:00
while ( cl - > cmode = = HTB_MAY_BORROW & & p & & mask ) {
2006-08-11 10:35:16 +04:00
m = mask ;
mask = 0 ;
2005-04-17 02:20:36 +04:00
while ( m ) {
int prio = ffz ( ~ m ) ;
m & = ~ ( 1 < < prio ) ;
2006-08-11 10:35:16 +04:00
if ( p - > un . inner . ptr [ prio ] = = cl - > node + prio ) {
2005-04-17 02:20:36 +04:00
/* we are removing child which is pointed to from
parent feed - forget the pointer but remember
classid */
2008-07-06 10:22:35 +04:00
p - > un . inner . last_ptr_id [ prio ] = cl - > common . classid ;
2005-04-17 02:20:36 +04:00
p - > un . inner . ptr [ prio ] = NULL ;
}
2006-08-11 10:35:16 +04:00
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( cl - > node + prio , p - > un . inner . feed + prio ) ;
2006-08-11 10:35:16 +04:00
if ( ! p - > un . inner . feed [ prio ] . rb_node )
2005-04-17 02:20:36 +04:00
mask | = 1 < < prio ;
}
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
p - > prio_activity & = ~ mask ;
2006-08-11 10:35:16 +04:00
cl = p ;
p = cl - > parent ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
if ( cl - > cmode = = HTB_CAN_SEND & & mask )
htb_remove_class_from_row ( q , cl , mask ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:34:02 +04:00
static inline long htb_lowater ( const struct htb_class * cl )
{
2008-06-17 03:39:32 +04:00
if ( htb_hysteresis )
return cl - > cmode ! = HTB_CANT_SEND ? - cl - > cbuffer : 0 ;
else
return 0 ;
2006-08-11 10:34:02 +04:00
}
static inline long htb_hiwater ( const struct htb_class * cl )
{
2008-06-17 03:39:32 +04:00
if ( htb_hysteresis )
return cl - > cmode = = HTB_CAN_SEND ? - cl - > buffer : 0 ;
else
return 0 ;
2006-08-11 10:34:02 +04:00
}
2008-06-17 03:39:32 +04:00
2006-08-11 10:34:02 +04:00
2005-04-17 02:20:36 +04:00
/**
* htb_class_mode - computes and returns current class mode
*
* It computes cl ' s mode at time cl - > t_c + diff and returns it . If mode
* is not HTB_CAN_SEND then cl - > pq_key is updated to time difference
2007-02-09 17:25:16 +03:00
* from now to time when cl will change its state .
2005-04-17 02:20:36 +04:00
* Also it is worth to note that class mode doesn ' t change simply
2007-02-09 17:25:16 +03:00
* at cl - > { c , } tokens = = 0 but there can rather be hysteresis of
2005-04-17 02:20:36 +04:00
* 0 . . - cl - > { c , } buffer range . It is meant to limit number of
* mode transitions per time unit . The speed gain is about 1 / 6.
*/
2006-08-11 10:35:16 +04:00
static inline enum htb_cmode
htb_class_mode ( struct htb_class * cl , long * diff )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
long toks ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
if ( ( toks = ( cl - > ctokens + * diff ) ) < htb_lowater ( cl ) ) {
* diff = - toks ;
return HTB_CANT_SEND ;
}
2006-08-11 10:34:02 +04:00
2006-08-11 10:35:16 +04:00
if ( ( toks = ( cl - > tokens + * diff ) ) > = htb_hiwater ( cl ) )
return HTB_CAN_SEND ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
* diff = - toks ;
return HTB_MAY_BORROW ;
2005-04-17 02:20:36 +04:00
}
/**
* htb_change_class_mode - changes classe ' s mode
*
* This should be the only way how to change classe ' s mode under normal
* cirsumstances . Routine will update feed lists linkage , change mode
* and add class to the wait event queue if appropriate . New mode should
* be different from old one and cl - > pq_key has to be valid if changing
* to mode other than HTB_CAN_SEND ( see htb_add_to_wait_tree ) .
*/
2006-08-11 10:35:16 +04:00
static void
2005-04-17 02:20:36 +04:00
htb_change_class_mode ( struct htb_sched * q , struct htb_class * cl , long * diff )
2006-08-11 10:35:16 +04:00
{
enum htb_cmode new_mode = htb_class_mode ( cl , diff ) ;
2005-04-17 02:20:36 +04:00
if ( new_mode = = cl - > cmode )
2006-08-11 10:35:16 +04:00
return ;
if ( cl - > prio_activity ) { /* not necessary: speed optimization */
if ( cl - > cmode ! = HTB_CANT_SEND )
htb_deactivate_prios ( q , cl ) ;
2005-04-17 02:20:36 +04:00
cl - > cmode = new_mode ;
2006-08-11 10:35:16 +04:00
if ( new_mode ! = HTB_CANT_SEND )
htb_activate_prios ( q , cl ) ;
} else
2005-04-17 02:20:36 +04:00
cl - > cmode = new_mode ;
}
/**
2007-02-09 17:25:16 +03:00
* htb_activate - inserts leaf cl into appropriate active feeds
2005-04-17 02:20:36 +04:00
*
* Routine learns ( new ) priority of leaf and activates feed chain
* for the prio . It can be called on already active leaf safely .
* It also adds leaf into droplist .
*/
2006-08-11 10:35:16 +04:00
static inline void htb_activate ( struct htb_sched * q , struct htb_class * cl )
2005-04-17 02:20:36 +04:00
{
2008-07-26 08:43:18 +04:00
WARN_ON ( cl - > level | | ! cl - > un . leaf . q | | ! cl - > un . leaf . q - > q . qlen ) ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
if ( ! cl - > prio_activity ) {
2008-12-04 08:09:45 +03:00
cl - > prio_activity = 1 < < cl - > prio ;
2006-08-11 10:35:16 +04:00
htb_activate_prios ( q , cl ) ;
list_add_tail ( & cl - > un . leaf . drop_list ,
2008-12-04 08:09:45 +03:00
q - > drops + cl - > prio ) ;
2005-04-17 02:20:36 +04:00
}
}
/**
2007-02-09 17:25:16 +03:00
* htb_deactivate - remove leaf cl from active feeds
2005-04-17 02:20:36 +04:00
*
* Make sure that leaf is active . In the other words it can ' t be called
* with non - active leaf . It also removes class from the drop list .
*/
2006-08-11 10:35:16 +04:00
static inline void htb_deactivate ( struct htb_sched * q , struct htb_class * cl )
2005-04-17 02:20:36 +04:00
{
2008-07-26 08:43:18 +04:00
WARN_ON ( ! cl - > prio_activity ) ;
2006-08-11 10:31:08 +04:00
2006-08-11 10:35:16 +04:00
htb_deactivate_prios ( q , cl ) ;
2005-04-17 02:20:36 +04:00
cl - > prio_activity = 0 ;
list_del_init ( & cl - > un . leaf . drop_list ) ;
}
static int htb_enqueue ( struct sk_buff * skb , struct Qdisc * sch )
{
2008-11-14 09:56:30 +03:00
int uninitialized_var ( ret ) ;
2006-08-11 10:35:16 +04:00
struct htb_sched * q = qdisc_priv ( sch ) ;
struct htb_class * cl = htb_classify ( skb , sch , & ret ) ;
if ( cl = = HTB_DIRECT ) {
/* enqueue to helper queue */
if ( q - > direct_queue . qlen < q - > direct_qlen ) {
__skb_queue_tail ( & q - > direct_queue , skb ) ;
q - > direct_pkts + + ;
} else {
kfree_skb ( skb ) ;
sch - > qstats . drops + + ;
return NET_XMIT_DROP ;
}
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_NET_CLS_ACT
2006-08-11 10:35:16 +04:00
} else if ( ! cl ) {
2008-08-05 09:39:11 +04:00
if ( ret & __NET_XMIT_BYPASS )
2006-08-11 10:35:16 +04:00
sch - > qstats . drops + + ;
kfree_skb ( skb ) ;
return ret ;
2005-04-17 02:20:36 +04:00
# endif
2008-08-05 09:31:03 +04:00
} else if ( ( ret = qdisc_enqueue ( skb , cl - > un . leaf . q ) ) ! = NET_XMIT_SUCCESS ) {
if ( net_xmit_drop_count ( ret ) ) {
sch - > qstats . drops + + ;
cl - > qstats . drops + + ;
}
2008-08-18 10:55:36 +04:00
return ret ;
2006-08-11 10:35:16 +04:00
} else {
2007-07-11 09:43:16 +04:00
cl - > bstats . packets + =
skb_is_gso ( skb ) ? skb_shinfo ( skb ) - > gso_segs : 1 ;
2008-07-20 11:08:27 +04:00
cl - > bstats . bytes + = qdisc_pkt_len ( skb ) ;
2006-08-11 10:35:16 +04:00
htb_activate ( q , cl ) ;
}
sch - > q . qlen + + ;
2007-07-11 09:43:16 +04:00
sch - > bstats . packets + = skb_is_gso ( skb ) ? skb_shinfo ( skb ) - > gso_segs : 1 ;
2008-07-20 11:08:27 +04:00
sch - > bstats . bytes + = qdisc_pkt_len ( skb ) ;
2006-08-11 10:35:16 +04:00
return NET_XMIT_SUCCESS ;
2005-04-17 02:20:36 +04:00
}
2008-12-04 08:17:27 +03:00
static inline void htb_accnt_tokens ( struct htb_class * cl , int bytes , long diff )
{
long toks = diff + cl - > tokens ;
if ( toks > cl - > buffer )
toks = cl - > buffer ;
toks - = ( long ) qdisc_l2t ( cl - > rate , bytes ) ;
if ( toks < = - cl - > mbuffer )
toks = 1 - cl - > mbuffer ;
cl - > tokens = toks ;
}
static inline void htb_accnt_ctokens ( struct htb_class * cl , int bytes , long diff )
{
long toks = diff + cl - > ctokens ;
if ( toks > cl - > cbuffer )
toks = cl - > cbuffer ;
toks - = ( long ) qdisc_l2t ( cl - > ceil , bytes ) ;
if ( toks < = - cl - > mbuffer )
toks = 1 - cl - > mbuffer ;
cl - > ctokens = toks ;
}
2005-04-17 02:20:36 +04:00
/**
* htb_charge_class - charges amount " bytes " to leaf and ancestors
*
* Routine assumes that packet " bytes " long was dequeued from leaf cl
* borrowing from " level " . It accounts bytes to ceil leaky bucket for
* leaf and all ancestors and to rate bucket for ancestors at levels
* " level " and higher . It also handles possible change of mode resulting
* from the update . Note that mode can also increase here ( MAY_BORROW to
* CAN_SEND ) because we can use more precise clock that event queue here .
* In such case we remove class from event queue first .
*/
2006-08-11 10:35:16 +04:00
static void htb_charge_class ( struct htb_sched * q , struct htb_class * cl ,
2007-07-11 09:43:16 +04:00
int level , struct sk_buff * skb )
2006-08-11 10:35:16 +04:00
{
2008-07-20 11:08:27 +04:00
int bytes = qdisc_pkt_len ( skb ) ;
2005-04-17 02:20:36 +04:00
enum htb_cmode old_mode ;
2008-12-04 08:17:27 +03:00
long diff ;
2005-04-17 02:20:36 +04:00
while ( cl ) {
2007-03-23 21:29:11 +03:00
diff = psched_tdiff_bounded ( q - > now , cl - > t_c , cl - > mbuffer ) ;
2005-04-17 02:20:36 +04:00
if ( cl - > level > = level ) {
2006-08-11 10:35:16 +04:00
if ( cl - > level = = level )
cl - > xstats . lends + + ;
2008-12-04 08:17:27 +03:00
htb_accnt_tokens ( cl , bytes , diff ) ;
2005-04-17 02:20:36 +04:00
} else {
cl - > xstats . borrows + + ;
2006-08-11 10:35:16 +04:00
cl - > tokens + = diff ; /* we moved t_c; update tokens */
2005-04-17 02:20:36 +04:00
}
2008-12-04 08:17:27 +03:00
htb_accnt_ctokens ( cl , bytes , diff ) ;
2005-04-17 02:20:36 +04:00
cl - > t_c = q - > now ;
2006-08-11 10:35:16 +04:00
old_mode = cl - > cmode ;
diff = 0 ;
htb_change_class_mode ( q , cl , & diff ) ;
2005-04-17 02:20:36 +04:00
if ( old_mode ! = cl - > cmode ) {
if ( old_mode ! = HTB_CAN_SEND )
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( & cl - > pq_node , q - > wait_pq + cl - > level ) ;
2005-04-17 02:20:36 +04:00
if ( cl - > cmode ! = HTB_CAN_SEND )
2006-08-11 10:35:16 +04:00
htb_add_to_wait_tree ( q , cl , diff ) ;
2005-04-17 02:20:36 +04:00
}
/* update byte stats except for leaves which are already updated */
if ( cl - > level ) {
cl - > bstats . bytes + = bytes ;
2007-07-11 09:43:16 +04:00
cl - > bstats . packets + = skb_is_gso ( skb ) ?
skb_shinfo ( skb ) - > gso_segs : 1 ;
2005-04-17 02:20:36 +04:00
}
cl = cl - > parent ;
}
}
/**
* htb_do_events - make mode changes to classes at the level
*
2007-03-16 11:22:39 +03:00
* Scans event queue for pending events and applies them . Returns time of
2009-02-01 12:13:22 +03:00
* next pending event ( 0 for no event in pq , q - > now for too many events ) .
2007-03-16 11:22:39 +03:00
* Note : Applied are events whose have cl - > pq_key < = q - > now .
2005-04-17 02:20:36 +04:00
*/
2009-01-13 08:54:40 +03:00
static psched_time_t htb_do_events ( struct htb_sched * q , int level ,
unsigned long start )
2005-04-17 02:20:36 +04:00
{
2008-03-24 08:00:38 +03:00
/* don't run for longer than 2 jiffies; 2 is used instead of
1 to simplify things when jiffy is going to be incremented
too soon */
2009-01-13 08:54:40 +03:00
unsigned long stop_at = start + 2 ;
2008-03-24 08:00:38 +03:00
while ( time_before ( jiffies , stop_at ) ) {
2005-04-17 02:20:36 +04:00
struct htb_class * cl ;
long diff ;
2006-10-12 12:52:05 +04:00
struct rb_node * p = rb_first ( & q - > wait_pq [ level ] ) ;
2006-08-11 10:35:16 +04:00
if ( ! p )
return 0 ;
2005-04-17 02:20:36 +04:00
cl = rb_entry ( p , struct htb_class , pq_node ) ;
2007-03-16 11:22:39 +03:00
if ( cl - > pq_key > q - > now )
return cl - > pq_key ;
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( p , q - > wait_pq + level ) ;
2007-03-23 21:29:11 +03:00
diff = psched_tdiff_bounded ( q - > now , cl - > t_c , cl - > mbuffer ) ;
2006-08-11 10:35:16 +04:00
htb_change_class_mode ( q , cl , & diff ) ;
2005-04-17 02:20:36 +04:00
if ( cl - > cmode ! = HTB_CAN_SEND )
2006-08-11 10:35:16 +04:00
htb_add_to_wait_tree ( q , cl , diff ) ;
2005-04-17 02:20:36 +04:00
}
2009-02-01 12:13:22 +03:00
/* too much load - let's continue after a break for scheduling */
2009-02-01 12:13:05 +03:00
if ( ! ( q - > warned & HTB_WARN_TOOMANYEVENTS ) ) {
printk ( KERN_WARNING " htb: too many events! \n " ) ;
q - > warned | = HTB_WARN_TOOMANYEVENTS ;
}
2009-02-01 12:13:22 +03:00
return q - > now ;
2005-04-17 02:20:36 +04:00
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
is no such one exists . */
2006-08-11 10:35:16 +04:00
static struct rb_node * htb_id_find_next_upper ( int prio , struct rb_node * n ,
u32 id )
2005-04-17 02:20:36 +04:00
{
struct rb_node * r = NULL ;
while ( n ) {
2006-08-11 10:35:16 +04:00
struct htb_class * cl =
rb_entry ( n , struct htb_class , node [ prio ] ) ;
2008-07-06 10:22:35 +04:00
if ( id > cl - > common . classid ) {
2005-04-17 02:20:36 +04:00
n = n - > rb_right ;
2008-12-10 09:34:40 +03:00
} else if ( id < cl - > common . classid ) {
2005-04-17 02:20:36 +04:00
r = n ;
n = n - > rb_left ;
2008-12-10 09:34:40 +03:00
} else {
return n ;
2005-04-17 02:20:36 +04:00
}
}
return r ;
}
/**
* htb_lookup_leaf - returns next leaf class in DRR order
*
* Find leaf where current feed pointers points to .
*/
2006-08-11 10:35:16 +04:00
static struct htb_class * htb_lookup_leaf ( struct rb_root * tree , int prio ,
struct rb_node * * pptr , u32 * pid )
2005-04-17 02:20:36 +04:00
{
int i ;
struct {
struct rb_node * root ;
struct rb_node * * pptr ;
u32 * pid ;
2006-08-11 10:35:16 +04:00
} stk [ TC_HTB_MAXDEPTH ] , * sp = stk ;
2008-12-10 09:35:02 +03:00
BUG_ON ( ! tree - > rb_node ) ;
2005-04-17 02:20:36 +04:00
sp - > root = tree - > rb_node ;
sp - > pptr = pptr ;
sp - > pid = pid ;
for ( i = 0 ; i < 65535 ; i + + ) {
2006-08-11 10:35:16 +04:00
if ( ! * sp - > pptr & & * sp - > pid ) {
2007-02-09 17:25:16 +03:00
/* ptr was invalidated but id is valid - try to recover
2005-04-17 02:20:36 +04:00
the original or next ptr */
2006-08-11 10:35:16 +04:00
* sp - > pptr =
htb_id_find_next_upper ( prio , sp - > root , * sp - > pid ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
* sp - > pid = 0 ; /* ptr is valid now so that remove this hint as it
can become out of date quickly */
if ( ! * sp - > pptr ) { /* we are at right end; rewind & go up */
2005-04-17 02:20:36 +04:00
* sp - > pptr = sp - > root ;
2006-08-11 10:35:16 +04:00
while ( ( * sp - > pptr ) - > rb_left )
2005-04-17 02:20:36 +04:00
* sp - > pptr = ( * sp - > pptr ) - > rb_left ;
if ( sp > stk ) {
sp - - ;
2008-12-10 09:35:02 +03:00
if ( ! * sp - > pptr ) {
WARN_ON ( 1 ) ;
2006-08-11 10:35:16 +04:00
return NULL ;
2008-12-10 09:35:02 +03:00
}
2006-08-11 10:35:16 +04:00
htb_next_rb_node ( sp - > pptr ) ;
2005-04-17 02:20:36 +04:00
}
} else {
struct htb_class * cl ;
2006-08-11 10:35:16 +04:00
cl = rb_entry ( * sp - > pptr , struct htb_class , node [ prio ] ) ;
if ( ! cl - > level )
2005-04-17 02:20:36 +04:00
return cl ;
( + + sp ) - > root = cl - > un . inner . feed [ prio ] . rb_node ;
2006-08-11 10:35:16 +04:00
sp - > pptr = cl - > un . inner . ptr + prio ;
sp - > pid = cl - > un . inner . last_ptr_id + prio ;
2005-04-17 02:20:36 +04:00
}
}
2008-07-26 08:43:18 +04:00
WARN_ON ( 1 ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
/* dequeues packet at given priority and level; call only if
you are sure that there is active class at prio / level */
2006-08-11 10:35:16 +04:00
static struct sk_buff * htb_dequeue_tree ( struct htb_sched * q , int prio ,
int level )
2005-04-17 02:20:36 +04:00
{
struct sk_buff * skb = NULL ;
2006-08-11 10:35:16 +04:00
struct htb_class * cl , * start ;
2005-04-17 02:20:36 +04:00
/* look initial class up in the row */
2006-08-11 10:35:16 +04:00
start = cl = htb_lookup_leaf ( q - > row [ level ] + prio , prio ,
q - > ptr [ level ] + prio ,
q - > last_ptr_id [ level ] + prio ) ;
2005-04-17 02:20:36 +04:00
do {
next :
2008-12-10 09:35:02 +03:00
if ( unlikely ( ! cl ) )
2006-08-11 10:35:16 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
/* class can be empty - it is unlikely but can be true if leaf
qdisc drops packets in enqueue routine or if someone used
2007-02-09 17:25:16 +03:00
graft operation on the leaf since last dequeue ;
2005-04-17 02:20:36 +04:00
simply deactivate and skip such class */
if ( unlikely ( cl - > un . leaf . q - > q . qlen = = 0 ) ) {
struct htb_class * next ;
2006-08-11 10:35:16 +04:00
htb_deactivate ( q , cl ) ;
2005-04-17 02:20:36 +04:00
/* row/level might become empty */
if ( ( q - > row_mask [ level ] & ( 1 < < prio ) ) = = 0 )
2006-08-11 10:35:16 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
next = htb_lookup_leaf ( q - > row [ level ] + prio ,
prio , q - > ptr [ level ] + prio ,
q - > last_ptr_id [ level ] + prio ) ;
if ( cl = = start ) /* fix start if we just deleted it */
2005-04-17 02:20:36 +04:00
start = next ;
cl = next ;
goto next ;
}
2006-08-11 10:35:16 +04:00
skb = cl - > un . leaf . q - > dequeue ( cl - > un . leaf . q ) ;
if ( likely ( skb ! = NULL ) )
2005-04-17 02:20:36 +04:00
break ;
2008-12-04 08:09:10 +03:00
2009-02-01 12:12:42 +03:00
qdisc_warn_nonwc ( " htb " , cl - > un . leaf . q ) ;
2006-08-11 10:35:16 +04:00
htb_next_rb_node ( ( level ? cl - > parent - > un . inner . ptr : q - >
ptr [ 0 ] ) + prio ) ;
cl = htb_lookup_leaf ( q - > row [ level ] + prio , prio ,
q - > ptr [ level ] + prio ,
q - > last_ptr_id [ level ] + prio ) ;
2005-04-17 02:20:36 +04:00
} while ( cl ! = start ) ;
if ( likely ( skb ! = NULL ) ) {
2008-07-20 11:08:27 +04:00
cl - > un . leaf . deficit [ level ] - = qdisc_pkt_len ( skb ) ;
if ( cl - > un . leaf . deficit [ level ] < 0 ) {
2008-12-04 08:09:45 +03:00
cl - > un . leaf . deficit [ level ] + = cl - > quantum ;
2006-08-11 10:35:16 +04:00
htb_next_rb_node ( ( level ? cl - > parent - > un . inner . ptr : q - >
ptr [ 0 ] ) + prio ) ;
2005-04-17 02:20:36 +04:00
}
/* this used to be after charge_class but this constelation
gives us slightly better performance */
if ( ! cl - > un . leaf . q - > q . qlen )
2006-08-11 10:35:16 +04:00
htb_deactivate ( q , cl ) ;
2007-07-11 09:43:16 +04:00
htb_charge_class ( q , cl , level , skb ) ;
2005-04-17 02:20:36 +04:00
}
return skb ;
}
static struct sk_buff * htb_dequeue ( struct Qdisc * sch )
{
struct sk_buff * skb = NULL ;
struct htb_sched * q = qdisc_priv ( sch ) ;
int level ;
2007-03-16 11:22:39 +03:00
psched_time_t next_event ;
2009-01-13 08:54:40 +03:00
unsigned long start_at ;
2005-04-17 02:20:36 +04:00
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
2006-08-11 10:35:16 +04:00
skb = __skb_dequeue ( & q - > direct_queue ) ;
if ( skb ! = NULL ) {
2005-04-17 02:20:36 +04:00
sch - > flags & = ~ TCQ_F_THROTTLED ;
sch - > q . qlen - - ;
return skb ;
}
2006-08-11 10:35:16 +04:00
if ( ! sch - > q . qlen )
goto fin ;
2007-03-23 21:29:25 +03:00
q - > now = psched_get_time ( ) ;
2009-01-13 08:54:40 +03:00
start_at = jiffies ;
2005-04-17 02:20:36 +04:00
2007-03-16 11:22:39 +03:00
next_event = q - > now + 5 * PSCHED_TICKS_PER_SEC ;
2008-12-04 08:09:10 +03:00
2005-04-17 02:20:36 +04:00
for ( level = 0 ; level < TC_HTB_MAXDEPTH ; level + + ) {
/* common case optimization - skip event handler quickly */
int m ;
2007-03-16 11:22:39 +03:00
psched_time_t event ;
if ( q - > now > = q - > near_ev_cache [ level ] ) {
2009-01-13 08:54:40 +03:00
event = htb_do_events ( q , level , start_at ) ;
2007-05-24 10:39:54 +04:00
if ( ! event )
event = q - > now + PSCHED_TICKS_PER_SEC ;
q - > near_ev_cache [ level ] = event ;
2005-04-17 02:20:36 +04:00
} else
2007-03-16 11:22:39 +03:00
event = q - > near_ev_cache [ level ] ;
2009-01-13 08:54:16 +03:00
if ( next_event > event )
2007-03-16 11:22:39 +03:00
next_event = event ;
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
m = ~ q - > row_mask [ level ] ;
while ( m ! = ( int ) ( - 1 ) ) {
2006-08-11 10:35:16 +04:00
int prio = ffz ( m ) ;
2005-04-17 02:20:36 +04:00
m | = 1 < < prio ;
2006-08-11 10:35:16 +04:00
skb = htb_dequeue_tree ( q , prio , level ) ;
2005-04-17 02:20:36 +04:00
if ( likely ( skb ! = NULL ) ) {
sch - > q . qlen - - ;
sch - > flags & = ~ TCQ_F_THROTTLED ;
goto fin ;
}
}
}
2007-03-16 11:22:39 +03:00
sch - > qstats . overlimits + + ;
2009-02-01 12:13:22 +03:00
if ( likely ( next_event > q - > now ) )
qdisc_watchdog_schedule ( & q - > watchdog , next_event ) ;
else
schedule_work ( & q - > work ) ;
2005-04-17 02:20:36 +04:00
fin :
return skb ;
}
/* try to drop from each class (by prio) until one succeed */
2006-08-11 10:35:16 +04:00
static unsigned int htb_drop ( struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
int prio ;
for ( prio = TC_HTB_NUMPRIO - 1 ; prio > = 0 ; prio - - ) {
struct list_head * p ;
2006-08-11 10:35:16 +04:00
list_for_each ( p , q - > drops + prio ) {
2005-04-17 02:20:36 +04:00
struct htb_class * cl = list_entry ( p , struct htb_class ,
un . leaf . drop_list ) ;
unsigned int len ;
2006-08-11 10:35:16 +04:00
if ( cl - > un . leaf . q - > ops - > drop & &
( len = cl - > un . leaf . q - > ops - > drop ( cl - > un . leaf . q ) ) ) {
2005-04-17 02:20:36 +04:00
sch - > q . qlen - - ;
if ( ! cl - > un . leaf . q - > q . qlen )
2006-08-11 10:35:16 +04:00
htb_deactivate ( q , cl ) ;
2005-04-17 02:20:36 +04:00
return len ;
}
}
}
return 0 ;
}
/* reset all classes */
/* always caled under BH & queue lock */
2006-08-11 10:35:16 +04:00
static void htb_reset ( struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2008-07-06 10:22:35 +04:00
struct htb_class * cl ;
struct hlist_node * n ;
unsigned int i ;
2006-08-11 10:35:38 +04:00
2008-07-06 10:22:35 +04:00
for ( i = 0 ; i < q - > clhash . hashsize ; i + + ) {
hlist_for_each_entry ( cl , n , & q - > clhash . hash [ i ] , common . hnode ) {
2005-04-17 02:20:36 +04:00
if ( cl - > level )
2006-08-11 10:35:16 +04:00
memset ( & cl - > un . inner , 0 , sizeof ( cl - > un . inner ) ) ;
2005-04-17 02:20:36 +04:00
else {
2006-08-11 10:35:16 +04:00
if ( cl - > un . leaf . q )
2005-04-17 02:20:36 +04:00
qdisc_reset ( cl - > un . leaf . q ) ;
INIT_LIST_HEAD ( & cl - > un . leaf . drop_list ) ;
}
cl - > prio_activity = 0 ;
cl - > cmode = HTB_CAN_SEND ;
}
}
2007-03-16 11:22:39 +03:00
qdisc_watchdog_cancel ( & q - > watchdog ) ;
2005-04-17 02:20:36 +04:00
__skb_queue_purge ( & q - > direct_queue ) ;
sch - > q . qlen = 0 ;
2006-08-11 10:35:16 +04:00
memset ( q - > row , 0 , sizeof ( q - > row ) ) ;
memset ( q - > row_mask , 0 , sizeof ( q - > row_mask ) ) ;
memset ( q - > wait_pq , 0 , sizeof ( q - > wait_pq ) ) ;
memset ( q - > ptr , 0 , sizeof ( q - > ptr ) ) ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < TC_HTB_NUMPRIO ; i + + )
2006-08-11 10:35:16 +04:00
INIT_LIST_HEAD ( q - > drops + i ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-24 07:35:39 +03:00
static const struct nla_policy htb_policy [ TCA_HTB_MAX + 1 ] = {
[ TCA_HTB_PARMS ] = { . len = sizeof ( struct tc_htb_opt ) } ,
[ TCA_HTB_INIT ] = { . len = sizeof ( struct tc_htb_glob ) } ,
[ TCA_HTB_CTAB ] = { . type = NLA_BINARY , . len = TC_RTAB_SIZE } ,
[ TCA_HTB_RTAB ] = { . type = NLA_BINARY , . len = TC_RTAB_SIZE } ,
} ;
2009-02-01 12:13:22 +03:00
static void htb_work_func ( struct work_struct * work )
{
struct htb_sched * q = container_of ( work , struct htb_sched , work ) ;
struct Qdisc * sch = q - > watchdog . qdisc ;
__netif_schedule ( qdisc_root ( sch ) ) ;
}
2008-01-23 09:11:17 +03:00
static int htb_init ( struct Qdisc * sch , struct nlattr * opt )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2008-01-23 09:11:17 +03:00
struct nlattr * tb [ TCA_HTB_INIT + 1 ] ;
2005-04-17 02:20:36 +04:00
struct tc_htb_glob * gopt ;
2008-01-24 07:33:32 +03:00
int err ;
2005-04-17 02:20:36 +04:00
int i ;
2008-01-24 07:33:32 +03:00
if ( ! opt )
return - EINVAL ;
2008-01-24 07:35:39 +03:00
err = nla_parse_nested ( tb , TCA_HTB_INIT , opt , htb_policy ) ;
2008-01-24 07:33:32 +03:00
if ( err < 0 )
return err ;
2008-01-24 07:35:39 +03:00
if ( tb [ TCA_HTB_INIT ] = = NULL ) {
2005-04-17 02:20:36 +04:00
printk ( KERN_ERR " HTB: hey probably you have bad tc tool ? \n " ) ;
return - EINVAL ;
}
2008-01-23 09:11:17 +03:00
gopt = nla_data ( tb [ TCA_HTB_INIT ] ) ;
2005-04-17 02:20:36 +04:00
if ( gopt - > version ! = HTB_VER > > 16 ) {
2006-08-11 10:35:16 +04:00
printk ( KERN_ERR
" HTB: need tc/htb version %d (minor is %d), you have %d \n " ,
HTB_VER > > 16 , HTB_VER & 0xffff , gopt - > version ) ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
}
2008-07-06 10:22:35 +04:00
err = qdisc_class_hash_init ( & q - > clhash ) ;
if ( err < 0 )
return err ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < TC_HTB_NUMPRIO ; i + + )
2006-08-11 10:35:16 +04:00
INIT_LIST_HEAD ( q - > drops + i ) ;
2005-04-17 02:20:36 +04:00
2007-03-16 11:22:39 +03:00
qdisc_watchdog_init ( & q - > watchdog , sch ) ;
2009-02-01 12:13:22 +03:00
INIT_WORK ( & q - > work , htb_work_func ) ;
2005-04-17 02:20:36 +04:00
skb_queue_head_init ( & q - > direct_queue ) ;
2008-07-09 04:06:30 +04:00
q - > direct_qlen = qdisc_dev ( sch ) - > tx_queue_len ;
2006-08-11 10:35:16 +04:00
if ( q - > direct_qlen < 2 ) /* some devices have zero tx_queue_len */
2005-04-17 02:20:36 +04:00
q - > direct_qlen = 2 ;
if ( ( q - > rate2quantum = gopt - > rate2quantum ) < 1 )
q - > rate2quantum = 1 ;
q - > defcls = gopt - > defcls ;
return 0 ;
}
static int htb_dump ( struct Qdisc * sch , struct sk_buff * skb )
{
2008-08-30 01:21:52 +04:00
spinlock_t * root_lock = qdisc_root_sleeping_lock ( sch ) ;
2005-04-17 02:20:36 +04:00
struct htb_sched * q = qdisc_priv ( sch ) ;
2008-01-24 07:34:11 +03:00
struct nlattr * nest ;
2005-04-17 02:20:36 +04:00
struct tc_htb_glob gopt ;
2008-01-24 07:34:11 +03:00
2008-07-16 12:42:40 +04:00
spin_lock_bh ( root_lock ) ;
2005-04-17 02:20:36 +04:00
2008-01-24 07:34:11 +03:00
gopt . direct_pkts = q - > direct_pkts ;
2005-04-17 02:20:36 +04:00
gopt . version = HTB_VER ;
gopt . rate2quantum = q - > rate2quantum ;
gopt . defcls = q - > defcls ;
2006-08-11 10:31:08 +04:00
gopt . debug = 0 ;
2008-01-24 07:34:11 +03:00
nest = nla_nest_start ( skb , TCA_OPTIONS ) ;
if ( nest = = NULL )
goto nla_put_failure ;
2008-01-23 09:11:17 +03:00
NLA_PUT ( skb , TCA_HTB_INIT , sizeof ( gopt ) , & gopt ) ;
2008-01-24 07:34:11 +03:00
nla_nest_end ( skb , nest ) ;
2008-07-16 12:42:40 +04:00
spin_unlock_bh ( root_lock ) ;
2005-04-17 02:20:36 +04:00
return skb - > len ;
2008-01-24 07:34:11 +03:00
2008-01-23 09:11:17 +03:00
nla_put_failure :
2008-07-16 12:42:40 +04:00
spin_unlock_bh ( root_lock ) ;
2008-01-24 07:34:11 +03:00
nla_nest_cancel ( skb , nest ) ;
2005-04-17 02:20:36 +04:00
return - 1 ;
}
static int htb_dump_class ( struct Qdisc * sch , unsigned long arg ,
2006-08-11 10:35:16 +04:00
struct sk_buff * skb , struct tcmsg * tcm )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2008-08-30 01:21:52 +04:00
spinlock_t * root_lock = qdisc_root_sleeping_lock ( sch ) ;
2008-01-24 07:34:11 +03:00
struct nlattr * nest ;
2005-04-17 02:20:36 +04:00
struct tc_htb_opt opt ;
2008-07-16 12:42:40 +04:00
spin_lock_bh ( root_lock ) ;
2008-07-06 10:22:35 +04:00
tcm - > tcm_parent = cl - > parent ? cl - > parent - > common . classid : TC_H_ROOT ;
tcm - > tcm_handle = cl - > common . classid ;
2005-04-17 02:20:36 +04:00
if ( ! cl - > level & & cl - > un . leaf . q )
tcm - > tcm_info = cl - > un . leaf . q - > handle ;
2008-01-24 07:34:11 +03:00
nest = nla_nest_start ( skb , TCA_OPTIONS ) ;
if ( nest = = NULL )
goto nla_put_failure ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
memset ( & opt , 0 , sizeof ( opt ) ) ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
opt . rate = cl - > rate - > rate ;
opt . buffer = cl - > buffer ;
opt . ceil = cl - > ceil - > rate ;
opt . cbuffer = cl - > cbuffer ;
2008-12-04 08:09:45 +03:00
opt . quantum = cl - > quantum ;
opt . prio = cl - > prio ;
2006-08-11 10:35:16 +04:00
opt . level = cl - > level ;
2008-01-23 09:11:17 +03:00
NLA_PUT ( skb , TCA_HTB_PARMS , sizeof ( opt ) , & opt ) ;
2008-01-24 07:34:11 +03:00
nla_nest_end ( skb , nest ) ;
2008-07-16 12:42:40 +04:00
spin_unlock_bh ( root_lock ) ;
2005-04-17 02:20:36 +04:00
return skb - > len ;
2008-01-24 07:34:11 +03:00
2008-01-23 09:11:17 +03:00
nla_put_failure :
2008-07-16 12:42:40 +04:00
spin_unlock_bh ( root_lock ) ;
2008-01-24 07:34:11 +03:00
nla_nest_cancel ( skb , nest ) ;
2005-04-17 02:20:36 +04:00
return - 1 ;
}
static int
2006-08-11 10:35:16 +04:00
htb_dump_class_stats ( struct Qdisc * sch , unsigned long arg , struct gnet_dump * d )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2005-04-17 02:20:36 +04:00
if ( ! cl - > level & & cl - > un . leaf . q )
cl - > qstats . qlen = cl - > un . leaf . q - > q . qlen ;
cl - > xstats . tokens = cl - > tokens ;
cl - > xstats . ctokens = cl - > ctokens ;
if ( gnet_stats_copy_basic ( d , & cl - > bstats ) < 0 | |
gnet_stats_copy_rate_est ( d , & cl - > rate_est ) < 0 | |
gnet_stats_copy_queue ( d , & cl - > qstats ) < 0 )
return - 1 ;
return gnet_stats_copy_app ( d , & cl - > xstats , sizeof ( cl - > xstats ) ) ;
}
static int htb_graft ( struct Qdisc * sch , unsigned long arg , struct Qdisc * new ,
2006-08-11 10:35:16 +04:00
struct Qdisc * * old )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2005-04-17 02:20:36 +04:00
if ( cl & & ! cl - > level ) {
2006-11-30 04:35:18 +03:00
if ( new = = NULL & &
2008-07-09 04:06:30 +04:00
( new = qdisc_create_dflt ( qdisc_dev ( sch ) , sch - > dev_queue ,
2008-07-09 03:55:56 +04:00
& pfifo_qdisc_ops ,
2008-07-06 10:22:35 +04:00
cl - > common . classid ) )
2006-08-11 10:35:16 +04:00
= = NULL )
return - ENOBUFS ;
2005-04-17 02:20:36 +04:00
sch_tree_lock ( sch ) ;
2008-11-20 15:11:36 +03:00
* old = cl - > un . leaf . q ;
cl - > un . leaf . q = new ;
if ( * old ! = NULL ) {
2006-11-30 04:37:05 +03:00
qdisc_tree_decrease_qlen ( * old , ( * old ) - > q . qlen ) ;
2005-04-17 02:20:36 +04:00
qdisc_reset ( * old ) ;
}
sch_tree_unlock ( sch ) ;
return 0 ;
}
return - ENOENT ;
}
2006-08-11 10:35:16 +04:00
static struct Qdisc * htb_leaf ( struct Qdisc * sch , unsigned long arg )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2005-04-17 02:20:36 +04:00
return ( cl & & ! cl - > level ) ? cl - > un . leaf . q : NULL ;
}
2006-11-30 04:37:05 +03:00
static void htb_qlen_notify ( struct Qdisc * sch , unsigned long arg )
{
struct htb_class * cl = ( struct htb_class * ) arg ;
if ( cl - > un . leaf . q - > q . qlen = = 0 )
htb_deactivate ( qdisc_priv ( sch ) , cl ) ;
}
2005-04-17 02:20:36 +04:00
static unsigned long htb_get ( struct Qdisc * sch , u32 classid )
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = htb_find ( classid , sch ) ;
if ( cl )
2005-04-17 02:20:36 +04:00
cl - > refcnt + + ;
return ( unsigned long ) cl ;
}
2006-12-08 11:26:56 +03:00
static inline int htb_parent_last_child ( struct htb_class * cl )
{
if ( ! cl - > parent )
/* the root class */
return 0 ;
2008-07-06 10:22:53 +04:00
if ( cl - > parent - > children > 1 )
2006-12-08 11:26:56 +03:00
/* not the last child */
return 0 ;
return 1 ;
}
2008-05-04 07:46:29 +04:00
static void htb_parent_to_leaf ( struct htb_sched * q , struct htb_class * cl ,
struct Qdisc * new_q )
2006-12-08 11:26:56 +03:00
{
struct htb_class * parent = cl - > parent ;
2008-07-26 08:43:18 +04:00
WARN_ON ( cl - > level | | ! cl - > un . leaf . q | | cl - > prio_activity ) ;
2006-12-08 11:26:56 +03:00
2008-05-04 07:46:29 +04:00
if ( parent - > cmode ! = HTB_CAN_SEND )
htb_safe_rb_erase ( & parent - > pq_node , q - > wait_pq + parent - > level ) ;
2006-12-08 11:26:56 +03:00
parent - > level = 0 ;
memset ( & parent - > un . inner , 0 , sizeof ( parent - > un . inner ) ) ;
INIT_LIST_HEAD ( & parent - > un . leaf . drop_list ) ;
parent - > un . leaf . q = new_q ? new_q : & noop_qdisc ;
parent - > tokens = parent - > buffer ;
parent - > ctokens = parent - > cbuffer ;
2007-03-23 21:29:25 +03:00
parent - > t_c = psched_get_time ( ) ;
2006-12-08 11:26:56 +03:00
parent - > cmode = HTB_CAN_SEND ;
}
2006-08-11 10:35:16 +04:00
static void htb_destroy_class ( struct Qdisc * sch , struct htb_class * cl )
2005-04-17 02:20:36 +04:00
{
if ( ! cl - > level ) {
2008-07-26 08:43:18 +04:00
WARN_ON ( ! cl - > un . leaf . q ) ;
2005-04-17 02:20:36 +04:00
qdisc_destroy ( cl - > un . leaf . q ) ;
}
2007-07-03 09:48:13 +04:00
gen_kill_estimator ( & cl - > bstats , & cl - > rate_est ) ;
2005-04-17 02:20:36 +04:00
qdisc_put_rtab ( cl - > rate ) ;
qdisc_put_rtab ( cl - > ceil ) ;
2006-08-11 10:35:16 +04:00
2008-07-02 06:52:38 +04:00
tcf_destroy_chain ( & cl - > filter_list ) ;
2005-04-17 02:20:36 +04:00
kfree ( cl ) ;
}
2006-08-11 10:35:16 +04:00
static void htb_destroy ( struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2008-07-06 10:22:19 +04:00
struct hlist_node * n , * next ;
struct htb_class * cl ;
unsigned int i ;
2005-04-17 02:20:36 +04:00
2009-02-01 12:13:22 +03:00
cancel_work_sync ( & q - > work ) ;
2007-03-16 11:22:39 +03:00
qdisc_watchdog_cancel ( & q - > watchdog ) ;
2005-04-17 02:20:36 +04:00
/* This line used to be after htb_destroy_class call below
2007-02-09 17:25:16 +03:00
and surprisingly it worked in 2.4 . But it must precede it
2005-04-17 02:20:36 +04:00
because filter need its target class alive to be able to call
unbind_filter on it ( without Oops ) . */
2008-07-02 06:52:38 +04:00
tcf_destroy_chain ( & q - > filter_list ) ;
2006-08-11 10:35:16 +04:00
2008-07-06 10:22:35 +04:00
for ( i = 0 ; i < q - > clhash . hashsize ; i + + ) {
hlist_for_each_entry ( cl , n , & q - > clhash . hash [ i ] , common . hnode )
2008-07-06 10:22:19 +04:00
tcf_destroy_chain ( & cl - > filter_list ) ;
}
2008-07-06 10:22:35 +04:00
for ( i = 0 ; i < q - > clhash . hashsize ; i + + ) {
hlist_for_each_entry_safe ( cl , n , next , & q - > clhash . hash [ i ] ,
common . hnode )
2008-07-06 10:22:19 +04:00
htb_destroy_class ( sch , cl ) ;
}
2008-07-06 10:22:35 +04:00
qdisc_class_hash_destroy ( & q - > clhash ) ;
2005-04-17 02:20:36 +04:00
__skb_queue_purge ( & q - > direct_queue ) ;
}
static int htb_delete ( struct Qdisc * sch , unsigned long arg )
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2006-11-30 04:37:05 +03:00
unsigned int qlen ;
2006-12-08 11:26:56 +03:00
struct Qdisc * new_q = NULL ;
int last_child = 0 ;
2005-04-17 02:20:36 +04:00
// TODO: why don't allow to delete subtree ? references ? does
// tc subsys quarantee us that in htb_destroy it holds no class
// refs so that we can remove children safely there ?
2008-07-06 10:22:53 +04:00
if ( cl - > children | | cl - > filter_cnt )
2005-04-17 02:20:36 +04:00
return - EBUSY ;
2006-08-11 10:35:16 +04:00
2006-12-08 11:26:56 +03:00
if ( ! cl - > level & & htb_parent_last_child ( cl ) ) {
2008-07-09 04:06:30 +04:00
new_q = qdisc_create_dflt ( qdisc_dev ( sch ) , sch - > dev_queue ,
2008-07-09 03:55:56 +04:00
& pfifo_qdisc_ops ,
cl - > parent - > common . classid ) ;
2006-12-08 11:26:56 +03:00
last_child = 1 ;
}
2005-04-17 02:20:36 +04:00
sch_tree_lock ( sch ) ;
2006-08-11 10:35:16 +04:00
2006-11-30 04:34:50 +03:00
if ( ! cl - > level ) {
2006-11-30 04:37:05 +03:00
qlen = cl - > un . leaf . q - > q . qlen ;
2006-11-30 04:34:50 +03:00
qdisc_reset ( cl - > un . leaf . q ) ;
2006-11-30 04:37:05 +03:00
qdisc_tree_decrease_qlen ( cl - > un . leaf . q , qlen ) ;
2006-11-30 04:34:50 +03:00
}
2008-07-06 10:22:35 +04:00
/* delete from hash and active; remainder in destroy_class */
qdisc_class_hash_remove ( & q - > clhash , & cl - > common ) ;
2008-08-14 02:16:43 +04:00
if ( cl - > parent )
cl - > parent - > children - - ;
2007-03-28 01:04:24 +04:00
2005-04-17 02:20:36 +04:00
if ( cl - > prio_activity )
2006-08-11 10:35:16 +04:00
htb_deactivate ( q , cl ) ;
2005-04-17 02:20:36 +04:00
2008-07-06 10:22:19 +04:00
if ( cl - > cmode ! = HTB_CAN_SEND )
htb_safe_rb_erase ( & cl - > pq_node , q - > wait_pq + cl - > level ) ;
2006-12-08 11:26:56 +03:00
if ( last_child )
2008-05-04 07:46:29 +04:00
htb_parent_to_leaf ( q , cl , new_q ) ;
2006-12-08 11:26:56 +03:00
2005-04-17 02:20:36 +04:00
if ( - - cl - > refcnt = = 0 )
2006-08-11 10:35:16 +04:00
htb_destroy_class ( sch , cl ) ;
2005-04-17 02:20:36 +04:00
sch_tree_unlock ( sch ) ;
return 0 ;
}
static void htb_put ( struct Qdisc * sch , unsigned long arg )
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) arg ;
2005-04-17 02:20:36 +04:00
if ( - - cl - > refcnt = = 0 )
2006-08-11 10:35:16 +04:00
htb_destroy_class ( sch , cl ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
static int htb_change_class ( struct Qdisc * sch , u32 classid ,
2008-01-23 09:11:17 +03:00
u32 parentid , struct nlattr * * tca ,
2006-08-11 10:35:16 +04:00
unsigned long * arg )
2005-04-17 02:20:36 +04:00
{
int err = - EINVAL ;
struct htb_sched * q = qdisc_priv ( sch ) ;
2006-08-11 10:35:16 +04:00
struct htb_class * cl = ( struct htb_class * ) * arg , * parent ;
2008-01-23 09:11:17 +03:00
struct nlattr * opt = tca [ TCA_OPTIONS ] ;
2005-04-17 02:20:36 +04:00
struct qdisc_rate_table * rtab = NULL , * ctab = NULL ;
2008-01-23 09:11:17 +03:00
struct nlattr * tb [ TCA_HTB_RTAB + 1 ] ;
2005-04-17 02:20:36 +04:00
struct tc_htb_opt * hopt ;
/* extract all subattrs from opt attr */
2008-01-24 07:33:32 +03:00
if ( ! opt )
goto failure ;
2008-01-24 07:35:39 +03:00
err = nla_parse_nested ( tb , TCA_HTB_RTAB , opt , htb_policy ) ;
2008-01-24 07:33:32 +03:00
if ( err < 0 )
goto failure ;
err = - EINVAL ;
2008-01-24 07:35:39 +03:00
if ( tb [ TCA_HTB_PARMS ] = = NULL )
2005-04-17 02:20:36 +04:00
goto failure ;
2006-08-11 10:35:16 +04:00
parent = parentid = = TC_H_ROOT ? NULL : htb_find ( parentid , sch ) ;
2008-01-23 09:11:17 +03:00
hopt = nla_data ( tb [ TCA_HTB_PARMS ] ) ;
2006-08-11 10:31:08 +04:00
2008-01-23 09:11:17 +03:00
rtab = qdisc_get_rtab ( & hopt - > rate , tb [ TCA_HTB_RTAB ] ) ;
ctab = qdisc_get_rtab ( & hopt - > ceil , tb [ TCA_HTB_CTAB ] ) ;
2006-08-11 10:35:16 +04:00
if ( ! rtab | | ! ctab )
goto failure ;
2005-04-17 02:20:36 +04:00
2006-08-11 10:35:16 +04:00
if ( ! cl ) { /* new class */
2005-04-17 02:20:36 +04:00
struct Qdisc * new_q ;
2006-08-11 10:36:01 +04:00
int prio ;
2007-07-03 09:48:13 +04:00
struct {
2008-01-23 09:11:17 +03:00
struct nlattr nla ;
2007-07-03 09:48:13 +04:00
struct gnet_estimator opt ;
} est = {
2008-01-23 09:11:17 +03:00
. nla = {
. nla_len = nla_attr_size ( sizeof ( est . opt ) ) ,
. nla_type = TCA_RATE ,
2007-07-03 09:48:13 +04:00
} ,
. opt = {
/* 4s interval, 16s averaging constant */
. interval = 2 ,
. ewma_log = 2 ,
} ,
} ;
2006-08-11 10:36:01 +04:00
2005-04-17 02:20:36 +04:00
/* check for valid classid */
2006-08-11 10:35:16 +04:00
if ( ! classid | | TC_H_MAJ ( classid ^ sch - > handle )
| | htb_find ( classid , sch ) )
2005-04-17 02:20:36 +04:00
goto failure ;
/* check maximal depth */
if ( parent & & parent - > parent & & parent - > parent - > level < 2 ) {
printk ( KERN_ERR " htb: tree is too deep \n " ) ;
goto failure ;
}
err = - ENOBUFS ;
2006-07-22 01:51:30 +04:00
if ( ( cl = kzalloc ( sizeof ( * cl ) , GFP_KERNEL ) ) = = NULL )
2005-04-17 02:20:36 +04:00
goto failure ;
2006-08-11 10:35:16 +04:00
2008-11-26 08:13:31 +03:00
err = gen_new_estimator ( & cl - > bstats , & cl - > rate_est ,
qdisc_root_sleeping_lock ( sch ) ,
tca [ TCA_RATE ] ? : & est . nla ) ;
if ( err ) {
kfree ( cl ) ;
goto failure ;
}
2005-04-17 02:20:36 +04:00
cl - > refcnt = 1 ;
2008-07-06 10:22:53 +04:00
cl - > children = 0 ;
2005-04-17 02:20:36 +04:00
INIT_LIST_HEAD ( & cl - > un . leaf . drop_list ) ;
2006-08-11 10:36:01 +04:00
RB_CLEAR_NODE ( & cl - > pq_node ) ;
for ( prio = 0 ; prio < TC_HTB_NUMPRIO ; prio + + )
RB_CLEAR_NODE ( & cl - > node [ prio ] ) ;
2005-04-17 02:20:36 +04:00
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
so that can ' t be used inside of sch_tree_lock
- - thanks to Karlis Peisenieks */
2008-07-09 04:06:30 +04:00
new_q = qdisc_create_dflt ( qdisc_dev ( sch ) , sch - > dev_queue ,
2008-07-09 03:55:56 +04:00
& pfifo_qdisc_ops , classid ) ;
2005-04-17 02:20:36 +04:00
sch_tree_lock ( sch ) ;
if ( parent & & ! parent - > level ) {
2006-11-30 04:37:05 +03:00
unsigned int qlen = parent - > un . leaf . q - > q . qlen ;
2005-04-17 02:20:36 +04:00
/* turn parent into inner node */
2006-11-30 04:37:05 +03:00
qdisc_reset ( parent - > un . leaf . q ) ;
qdisc_tree_decrease_qlen ( parent - > un . leaf . q , qlen ) ;
2006-08-11 10:35:16 +04:00
qdisc_destroy ( parent - > un . leaf . q ) ;
if ( parent - > prio_activity )
htb_deactivate ( q , parent ) ;
2005-04-17 02:20:36 +04:00
/* remove from evt list because of level change */
if ( parent - > cmode ! = HTB_CAN_SEND ) {
2006-08-11 10:36:01 +04:00
htb_safe_rb_erase ( & parent - > pq_node , q - > wait_pq ) ;
2005-04-17 02:20:36 +04:00
parent - > cmode = HTB_CAN_SEND ;
}
parent - > level = ( parent - > parent ? parent - > parent - > level
2006-08-11 10:35:16 +04:00
: TC_HTB_MAXDEPTH ) - 1 ;
memset ( & parent - > un . inner , 0 , sizeof ( parent - > un . inner ) ) ;
2005-04-17 02:20:36 +04:00
}
/* leaf (we) needs elementary qdisc */
cl - > un . leaf . q = new_q ? new_q : & noop_qdisc ;
2008-07-06 10:22:35 +04:00
cl - > common . classid = classid ;
2006-08-11 10:35:16 +04:00
cl - > parent = parent ;
2005-04-17 02:20:36 +04:00
/* set class to be in HTB_CAN_SEND state */
cl - > tokens = hopt - > buffer ;
cl - > ctokens = hopt - > cbuffer ;
2007-03-16 11:23:02 +03:00
cl - > mbuffer = 60 * PSCHED_TICKS_PER_SEC ; /* 1min */
2007-03-23 21:29:25 +03:00
cl - > t_c = psched_get_time ( ) ;
2005-04-17 02:20:36 +04:00
cl - > cmode = HTB_CAN_SEND ;
/* attach to the hash list and parent's family */
2008-07-06 10:22:35 +04:00
qdisc_class_hash_insert ( & q - > clhash , & cl - > common ) ;
2008-07-06 10:22:53 +04:00
if ( parent )
parent - > children + + ;
2007-07-03 09:48:13 +04:00
} else {
2008-11-26 08:13:31 +03:00
if ( tca [ TCA_RATE ] ) {
err = gen_replace_estimator ( & cl - > bstats , & cl - > rate_est ,
qdisc_root_sleeping_lock ( sch ) ,
tca [ TCA_RATE ] ) ;
if ( err )
return err ;
}
2006-08-11 10:35:16 +04:00
sch_tree_lock ( sch ) ;
2007-07-03 09:48:13 +04:00
}
2005-04-17 02:20:36 +04:00
/* it used to be a nasty bug here, we have to check that node
2006-08-11 10:35:16 +04:00
is really leaf before changing cl - > un . leaf ! */
2005-04-17 02:20:36 +04:00
if ( ! cl - > level ) {
2008-12-04 08:09:45 +03:00
cl - > quantum = rtab - > rate . rate / q - > rate2quantum ;
if ( ! hopt - > quantum & & cl - > quantum < 1000 ) {
2006-08-11 10:35:16 +04:00
printk ( KERN_WARNING
" HTB: quantum of class %X is small. Consider r2q change. \n " ,
2008-07-06 10:22:35 +04:00
cl - > common . classid ) ;
2008-12-04 08:09:45 +03:00
cl - > quantum = 1000 ;
2005-04-17 02:20:36 +04:00
}
2008-12-04 08:09:45 +03:00
if ( ! hopt - > quantum & & cl - > quantum > 200000 ) {
2006-08-11 10:35:16 +04:00
printk ( KERN_WARNING
" HTB: quantum of class %X is big. Consider r2q change. \n " ,
2008-07-06 10:22:35 +04:00
cl - > common . classid ) ;
2008-12-04 08:09:45 +03:00
cl - > quantum = 200000 ;
2005-04-17 02:20:36 +04:00
}
if ( hopt - > quantum )
2008-12-04 08:09:45 +03:00
cl - > quantum = hopt - > quantum ;
if ( ( cl - > prio = hopt - > prio ) > = TC_HTB_NUMPRIO )
cl - > prio = TC_HTB_NUMPRIO - 1 ;
2005-04-17 02:20:36 +04:00
}
cl - > buffer = hopt - > buffer ;
cl - > cbuffer = hopt - > cbuffer ;
2006-08-11 10:35:16 +04:00
if ( cl - > rate )
qdisc_put_rtab ( cl - > rate ) ;
cl - > rate = rtab ;
if ( cl - > ceil )
qdisc_put_rtab ( cl - > ceil ) ;
cl - > ceil = ctab ;
2005-04-17 02:20:36 +04:00
sch_tree_unlock ( sch ) ;
2008-07-06 10:22:35 +04:00
qdisc_class_hash_grow ( sch , & q - > clhash ) ;
2005-04-17 02:20:36 +04:00
* arg = ( unsigned long ) cl ;
return 0 ;
failure :
2006-08-11 10:35:16 +04:00
if ( rtab )
qdisc_put_rtab ( rtab ) ;
if ( ctab )
qdisc_put_rtab ( ctab ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
static struct tcf_proto * * htb_find_tcf ( struct Qdisc * sch , unsigned long arg )
{
struct htb_sched * q = qdisc_priv ( sch ) ;
struct htb_class * cl = ( struct htb_class * ) arg ;
struct tcf_proto * * fl = cl ? & cl - > filter_list : & q - > filter_list ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
return fl ;
}
static unsigned long htb_bind_filter ( struct Qdisc * sch , unsigned long parent ,
2006-08-11 10:35:16 +04:00
u32 classid )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
struct htb_class * cl = htb_find ( classid , sch ) ;
2006-08-11 10:31:08 +04:00
2005-04-17 02:20:36 +04:00
/*if (cl && !cl->level) return 0;
2006-08-11 10:35:16 +04:00
The line above used to be there to prevent attaching filters to
leaves . But at least tc_index filter uses this just to get class
for other reasons so that we have to allow for it .
- - - -
19.6 .2002 As Werner explained it is ok - bind filter is just
another way to " lock " the class - unlike " get " this lock can
be broken by class during destroy IIUC .
2005-04-17 02:20:36 +04:00
*/
2006-08-11 10:35:16 +04:00
if ( cl )
cl - > filter_cnt + + ;
2005-04-17 02:20:36 +04:00
return ( unsigned long ) cl ;
}
static void htb_unbind_filter ( struct Qdisc * sch , unsigned long arg )
{
struct htb_class * cl = ( struct htb_class * ) arg ;
2006-08-11 10:31:08 +04:00
2006-08-11 10:35:16 +04:00
if ( cl )
cl - > filter_cnt - - ;
2005-04-17 02:20:36 +04:00
}
static void htb_walk ( struct Qdisc * sch , struct qdisc_walker * arg )
{
struct htb_sched * q = qdisc_priv ( sch ) ;
2008-07-06 10:22:35 +04:00
struct htb_class * cl ;
struct hlist_node * n ;
unsigned int i ;
2005-04-17 02:20:36 +04:00
if ( arg - > stop )
return ;
2008-07-06 10:22:35 +04:00
for ( i = 0 ; i < q - > clhash . hashsize ; i + + ) {
hlist_for_each_entry ( cl , n , & q - > clhash . hash [ i ] , common . hnode ) {
2005-04-17 02:20:36 +04:00
if ( arg - > count < arg - > skip ) {
arg - > count + + ;
continue ;
}
if ( arg - > fn ( sch , ( unsigned long ) cl , arg ) < 0 ) {
arg - > stop = 1 ;
return ;
}
arg - > count + + ;
}
}
}
2007-11-14 12:44:41 +03:00
static const struct Qdisc_class_ops htb_class_ops = {
2005-04-17 02:20:36 +04:00
. graft = htb_graft ,
. leaf = htb_leaf ,
2006-11-30 04:37:05 +03:00
. qlen_notify = htb_qlen_notify ,
2005-04-17 02:20:36 +04:00
. get = htb_get ,
. put = htb_put ,
. change = htb_change_class ,
. delete = htb_delete ,
. walk = htb_walk ,
. tcf_chain = htb_find_tcf ,
. bind_tcf = htb_bind_filter ,
. unbind_tcf = htb_unbind_filter ,
. dump = htb_dump_class ,
. dump_stats = htb_dump_class_stats ,
} ;
2007-11-14 12:44:41 +03:00
static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
2005-04-17 02:20:36 +04:00
. next = NULL ,
. cl_ops = & htb_class_ops ,
. id = " htb " ,
. priv_size = sizeof ( struct htb_sched ) ,
. enqueue = htb_enqueue ,
. dequeue = htb_dequeue ,
2008-10-31 10:47:01 +03:00
. peek = qdisc_peek_dequeued ,
2005-04-17 02:20:36 +04:00
. drop = htb_drop ,
. init = htb_init ,
. reset = htb_reset ,
. destroy = htb_destroy ,
. change = NULL /* htb_change */ ,
. dump = htb_dump ,
. owner = THIS_MODULE ,
} ;
static int __init htb_module_init ( void )
{
2006-08-11 10:35:16 +04:00
return register_qdisc ( & htb_qdisc_ops ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
static void __exit htb_module_exit ( void )
2005-04-17 02:20:36 +04:00
{
2006-08-11 10:35:16 +04:00
unregister_qdisc ( & htb_qdisc_ops ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-11 10:35:16 +04:00
2005-04-17 02:20:36 +04:00
module_init ( htb_module_init )
module_exit ( htb_module_exit )
MODULE_LICENSE ( " GPL " ) ;