2005-04-16 15:20:36 -07:00
/*
* net / sched / sch_red . c Random Early Detection queue .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
* Authors : Alexey Kuznetsov , < kuznet @ ms2 . inr . ac . ru >
*
* Changes :
2005-11-05 21:14:08 +01:00
* J Hadi Salim 980914 : computation fixes
2005-04-16 15:20:36 -07:00
* Alexey Makarenko < makar @ phoenix . kharkov . ua > 990814 : qave on idle link was calculated incorrectly .
2005-11-05 21:14:08 +01:00
* J Hadi Salim 980816 : ECN support
2005-04-16 15:20:36 -07:00
*/
# include <linux/module.h>
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/skbuff.h>
# include <net/pkt_sched.h>
# include <net/inet_ecn.h>
2005-11-05 21:14:05 +01:00
# include <net/red.h>
2005-04-16 15:20:36 -07:00
2005-11-05 21:14:05 +01:00
/* Parameters, settable by user:
2005-04-16 15:20:36 -07:00
- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
limit - bytes ( must be > qth_max + burst )
Hard limit on queue length , should be chosen > qth_max
to allow packet bursts . This parameter does not
affect the algorithms behaviour and can be chosen
arbitrarily high ( well , less than ram size )
Really , this limit will never be reached
if RED works correctly .
*/
2011-01-19 19:26:56 +00:00
struct red_sched_data {
2005-11-05 21:14:05 +01:00
u32 limit ; /* HARD maximal queue length */
unsigned char flags ;
struct red_parms parms ;
struct red_stats stats ;
2006-03-20 19:20:44 -08:00
struct Qdisc * qdisc ;
2005-04-16 15:20:36 -07:00
} ;
2005-11-05 21:14:05 +01:00
static inline int red_use_ecn ( struct red_sched_data * q )
2005-04-16 15:20:36 -07:00
{
2005-11-05 21:14:05 +01:00
return q - > flags & TC_RED_ECN ;
2005-04-16 15:20:36 -07:00
}
2005-11-05 21:14:28 +01:00
static inline int red_use_harddrop ( struct red_sched_data * q )
{
return q - > flags & TC_RED_HARDDROP ;
}
2011-01-19 19:26:56 +00:00
static int red_enqueue ( struct sk_buff * skb , struct Qdisc * sch )
2005-04-16 15:20:36 -07:00
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2006-03-20 19:20:44 -08:00
struct Qdisc * child = q - > qdisc ;
int ret ;
2005-04-16 15:20:36 -07:00
2006-03-20 19:20:44 -08:00
q - > parms . qavg = red_calc_qavg ( & q - > parms , child - > qstats . backlog ) ;
2005-04-16 15:20:36 -07:00
2005-11-05 21:14:05 +01:00
if ( red_is_idling ( & q - > parms ) )
red_end_of_idle_period ( & q - > parms ) ;
2005-04-16 15:20:36 -07:00
2005-11-05 21:14:05 +01:00
switch ( red_action ( & q - > parms , q - > parms . qavg ) ) {
2011-01-19 19:26:56 +00:00
case RED_DONT_MARK :
break ;
case RED_PROB_MARK :
sch - > qstats . overlimits + + ;
if ( ! red_use_ecn ( q ) | | ! INET_ECN_set_ce ( skb ) ) {
q - > stats . prob_drop + + ;
goto congestion_drop ;
}
q - > stats . prob_mark + + ;
break ;
case RED_HARD_MARK :
sch - > qstats . overlimits + + ;
if ( red_use_harddrop ( q ) | | ! red_use_ecn ( q ) | |
! INET_ECN_set_ce ( skb ) ) {
q - > stats . forced_drop + + ;
goto congestion_drop ;
}
q - > stats . forced_mark + + ;
break ;
2005-04-16 15:20:36 -07:00
}
2008-07-20 00:08:04 -07:00
ret = qdisc_enqueue ( skb , child ) ;
2006-03-20 19:20:44 -08:00
if ( likely ( ret = = NET_XMIT_SUCCESS ) ) {
sch - > q . qlen + + ;
2008-08-04 22:31:03 -07:00
} else if ( net_xmit_drop_count ( ret ) ) {
2006-03-20 19:20:44 -08:00
q - > stats . pdrop + + ;
sch - > qstats . drops + + ;
}
return ret ;
2005-11-05 21:14:05 +01:00
congestion_drop :
2005-11-05 21:14:06 +01:00
qdisc_drop ( skb , sch ) ;
2005-04-16 15:20:36 -07:00
return NET_XMIT_CN ;
}
2011-01-19 19:26:56 +00:00
static struct sk_buff * red_dequeue ( struct Qdisc * sch )
2005-04-16 15:20:36 -07:00
{
struct sk_buff * skb ;
struct red_sched_data * q = qdisc_priv ( sch ) ;
2006-03-20 19:20:44 -08:00
struct Qdisc * child = q - > qdisc ;
2005-04-16 15:20:36 -07:00
2006-03-20 19:20:44 -08:00
skb = child - > dequeue ( child ) ;
2011-01-20 23:31:33 -08:00
if ( skb ) {
qdisc_bstats_update ( sch , skb ) ;
2006-03-20 19:20:44 -08:00
sch - > q . qlen - - ;
2011-01-20 23:31:33 -08:00
} else {
if ( ! red_is_idling ( & q - > parms ) )
red_start_of_idle_period ( & q - > parms ) ;
}
2005-11-05 21:14:06 +01:00
return skb ;
2005-04-16 15:20:36 -07:00
}
2011-01-19 19:26:56 +00:00
static struct sk_buff * red_peek ( struct Qdisc * sch )
2008-10-31 00:45:55 -07:00
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
struct Qdisc * child = q - > qdisc ;
return child - > ops - > peek ( child ) ;
}
2011-01-19 19:26:56 +00:00
static unsigned int red_drop ( struct Qdisc * sch )
2005-04-16 15:20:36 -07:00
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2006-03-20 19:20:44 -08:00
struct Qdisc * child = q - > qdisc ;
unsigned int len ;
2005-04-16 15:20:36 -07:00
2006-03-20 19:20:44 -08:00
if ( child - > ops - > drop & & ( len = child - > ops - > drop ( child ) ) > 0 ) {
2005-11-05 21:14:05 +01:00
q - > stats . other + + ;
2006-03-20 19:20:44 -08:00
sch - > qstats . drops + + ;
sch - > q . qlen - - ;
2005-04-16 15:20:36 -07:00
return len ;
}
2005-11-05 21:14:05 +01:00
2005-11-05 21:14:07 +01:00
if ( ! red_is_idling ( & q - > parms ) )
red_start_of_idle_period ( & q - > parms ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
2011-01-19 19:26:56 +00:00
static void red_reset ( struct Qdisc * sch )
2005-04-16 15:20:36 -07:00
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2006-03-20 19:20:44 -08:00
qdisc_reset ( q - > qdisc ) ;
sch - > q . qlen = 0 ;
2005-11-05 21:14:05 +01:00
red_restart ( & q - > parms ) ;
2005-04-16 15:20:36 -07:00
}
2006-03-20 19:20:44 -08:00
static void red_destroy ( struct Qdisc * sch )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
qdisc_destroy ( q - > qdisc ) ;
}
2008-01-23 20:35:39 -08:00
static const struct nla_policy red_policy [ TCA_RED_MAX + 1 ] = {
[ TCA_RED_PARMS ] = { . len = sizeof ( struct tc_red_qopt ) } ,
[ TCA_RED_STAB ] = { . len = RED_STAB_SIZE } ,
} ;
2008-01-22 22:11:17 -08:00
static int red_change ( struct Qdisc * sch , struct nlattr * opt )
2005-04-16 15:20:36 -07:00
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2008-01-22 22:11:17 -08:00
struct nlattr * tb [ TCA_RED_MAX + 1 ] ;
2005-04-16 15:20:36 -07:00
struct tc_red_qopt * ctl ;
2006-03-20 19:20:44 -08:00
struct Qdisc * child = NULL ;
2008-01-23 20:33:32 -08:00
int err ;
2005-04-16 15:20:36 -07:00
2008-01-23 20:33:32 -08:00
if ( opt = = NULL )
2005-11-05 21:14:08 +01:00
return - EINVAL ;
2008-01-23 20:35:39 -08:00
err = nla_parse_nested ( tb , TCA_RED_MAX , opt , red_policy ) ;
2008-01-23 20:33:32 -08:00
if ( err < 0 )
return err ;
2008-01-22 22:11:17 -08:00
if ( tb [ TCA_RED_PARMS ] = = NULL | |
2008-01-23 20:35:39 -08:00
tb [ TCA_RED_STAB ] = = NULL )
2005-04-16 15:20:36 -07:00
return - EINVAL ;
2008-01-22 22:11:17 -08:00
ctl = nla_data ( tb [ TCA_RED_PARMS ] ) ;
2005-04-16 15:20:36 -07:00
2006-03-20 19:20:44 -08:00
if ( ctl - > limit > 0 ) {
2008-07-05 23:40:21 -07:00
child = fifo_create_dflt ( sch , & bfifo_qdisc_ops , ctl - > limit ) ;
if ( IS_ERR ( child ) )
return PTR_ERR ( child ) ;
2006-03-20 19:20:44 -08:00
}
2005-04-16 15:20:36 -07:00
sch_tree_lock ( sch ) ;
q - > flags = ctl - > flags ;
q - > limit = ctl - > limit ;
2006-11-29 17:36:20 -08:00
if ( child ) {
qdisc_tree_decrease_qlen ( q - > qdisc , q - > qdisc - > q . qlen ) ;
2008-11-20 04:11:36 -08:00
qdisc_destroy ( q - > qdisc ) ;
q - > qdisc = child ;
2006-11-29 17:36:20 -08:00
}
2005-04-16 15:20:36 -07:00
2005-11-05 21:14:05 +01:00
red_set_parms ( & q - > parms , ctl - > qth_min , ctl - > qth_max , ctl - > Wlog ,
ctl - > Plog , ctl - > Scell_log ,
2008-01-22 22:11:17 -08:00
nla_data ( tb [ TCA_RED_STAB ] ) ) ;
2005-11-05 21:14:05 +01:00
2005-07-08 14:57:23 -07:00
if ( skb_queue_empty ( & sch - > q ) )
2005-11-05 21:14:05 +01:00
red_end_of_idle_period ( & q - > parms ) ;
2005-11-05 21:14:08 +01:00
2005-04-16 15:20:36 -07:00
sch_tree_unlock ( sch ) ;
return 0 ;
}
2011-01-19 19:26:56 +00:00
static int red_init ( struct Qdisc * sch , struct nlattr * opt )
2005-04-16 15:20:36 -07:00
{
2006-03-20 19:20:44 -08:00
struct red_sched_data * q = qdisc_priv ( sch ) ;
q - > qdisc = & noop_qdisc ;
2005-04-16 15:20:36 -07:00
return red_change ( sch , opt ) ;
}
static int red_dump ( struct Qdisc * sch , struct sk_buff * skb )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2008-01-22 22:11:17 -08:00
struct nlattr * opts = NULL ;
2005-11-05 21:14:05 +01:00
struct tc_red_qopt opt = {
. limit = q - > limit ,
. flags = q - > flags ,
. qth_min = q - > parms . qth_min > > q - > parms . Wlog ,
. qth_max = q - > parms . qth_max > > q - > parms . Wlog ,
. Wlog = q - > parms . Wlog ,
. Plog = q - > parms . Plog ,
. Scell_log = q - > parms . Scell_log ,
} ;
2005-04-16 15:20:36 -07:00
2011-01-03 08:11:38 +00:00
sch - > qstats . backlog = q - > qdisc - > qstats . backlog ;
2008-01-22 22:11:17 -08:00
opts = nla_nest_start ( skb , TCA_OPTIONS ) ;
if ( opts = = NULL )
goto nla_put_failure ;
NLA_PUT ( skb , TCA_RED_PARMS , sizeof ( opt ) , & opt ) ;
return nla_nest_end ( skb , opts ) ;
2005-04-16 15:20:36 -07:00
2008-01-22 22:11:17 -08:00
nla_put_failure :
2008-06-03 16:36:54 -07:00
nla_nest_cancel ( skb , opts ) ;
return - EMSGSIZE ;
2005-04-16 15:20:36 -07:00
}
static int red_dump_stats ( struct Qdisc * sch , struct gnet_dump * d )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2005-11-05 21:14:05 +01:00
struct tc_red_xstats st = {
. early = q - > stats . prob_drop + q - > stats . forced_drop ,
. pdrop = q - > stats . pdrop ,
. other = q - > stats . other ,
. marked = q - > stats . prob_mark + q - > stats . forced_mark ,
} ;
return gnet_stats_copy_app ( d , & st , sizeof ( st ) ) ;
2005-04-16 15:20:36 -07:00
}
2006-03-20 19:20:44 -08:00
static int red_dump_class ( struct Qdisc * sch , unsigned long cl ,
struct sk_buff * skb , struct tcmsg * tcm )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
tcm - > tcm_handle | = TC_H_MIN ( 1 ) ;
tcm - > tcm_info = q - > qdisc - > handle ;
return 0 ;
}
static int red_graft ( struct Qdisc * sch , unsigned long arg , struct Qdisc * new ,
struct Qdisc * * old )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
if ( new = = NULL )
new = & noop_qdisc ;
sch_tree_lock ( sch ) ;
2008-11-20 04:11:36 -08:00
* old = q - > qdisc ;
q - > qdisc = new ;
2006-11-29 17:36:20 -08:00
qdisc_tree_decrease_qlen ( * old , ( * old ) - > q . qlen ) ;
2006-03-20 19:20:44 -08:00
qdisc_reset ( * old ) ;
sch_tree_unlock ( sch ) ;
return 0 ;
}
static struct Qdisc * red_leaf ( struct Qdisc * sch , unsigned long arg )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
return q - > qdisc ;
}
static unsigned long red_get ( struct Qdisc * sch , u32 classid )
{
return 1 ;
}
static void red_put ( struct Qdisc * sch , unsigned long arg )
{
}
static void red_walk ( struct Qdisc * sch , struct qdisc_walker * walker )
{
if ( ! walker - > stop ) {
if ( walker - > count > = walker - > skip )
if ( walker - > fn ( sch , 1 , walker ) < 0 ) {
walker - > stop = 1 ;
return ;
}
walker - > count + + ;
}
}
2007-11-14 01:44:41 -08:00
static const struct Qdisc_class_ops red_class_ops = {
2006-03-20 19:20:44 -08:00
. graft = red_graft ,
. leaf = red_leaf ,
. get = red_get ,
. put = red_put ,
. walk = red_walk ,
. dump = red_dump_class ,
} ;
2007-11-14 01:44:41 -08:00
static struct Qdisc_ops red_qdisc_ops __read_mostly = {
2005-04-16 15:20:36 -07:00
. id = " red " ,
. priv_size = sizeof ( struct red_sched_data ) ,
2006-03-20 19:20:44 -08:00
. cl_ops = & red_class_ops ,
2005-04-16 15:20:36 -07:00
. enqueue = red_enqueue ,
. dequeue = red_dequeue ,
2008-10-31 00:45:55 -07:00
. peek = red_peek ,
2005-04-16 15:20:36 -07:00
. drop = red_drop ,
. init = red_init ,
. reset = red_reset ,
2006-03-20 19:20:44 -08:00
. destroy = red_destroy ,
2005-04-16 15:20:36 -07:00
. change = red_change ,
. dump = red_dump ,
. dump_stats = red_dump_stats ,
. owner = THIS_MODULE ,
} ;
static int __init red_module_init ( void )
{
return register_qdisc ( & red_qdisc_ops ) ;
}
2005-11-05 21:14:08 +01:00
static void __exit red_module_exit ( void )
2005-04-16 15:20:36 -07:00
{
unregister_qdisc ( & red_qdisc_ops ) ;
}
2005-11-05 21:14:08 +01:00
2005-04-16 15:20:36 -07:00
module_init ( red_module_init )
module_exit ( red_module_exit )
2005-11-05 21:14:08 +01:00
2005-04-16 15:20:36 -07:00
MODULE_LICENSE ( " GPL " ) ;