2005-04-17 02:20:36 +04:00
/*
* net / sched / sch_red . c Random Early Detection queue .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
* Authors : Alexey Kuznetsov , < kuznet @ ms2 . inr . ac . ru >
*
* Changes :
2005-11-05 23:14:08 +03:00
* J Hadi Salim 980914 : computation fixes
2005-04-17 02:20:36 +04:00
* Alexey Makarenko < makar @ phoenix . kharkov . ua > 990814 : qave on idle link was calculated incorrectly .
2005-11-05 23:14:08 +03:00
* J Hadi Salim 980816 : ECN support
2005-04-17 02:20:36 +04:00
*/
# include <linux/module.h>
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/skbuff.h>
# include <net/pkt_sched.h>
# include <net/inet_ecn.h>
2005-11-05 23:14:05 +03:00
# include <net/red.h>
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:05 +03:00
/* Parameters, settable by user:
2005-04-17 02:20:36 +04:00
- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
limit - bytes ( must be > qth_max + burst )
Hard limit on queue length , should be chosen > qth_max
to allow packet bursts . This parameter does not
affect the algorithms behaviour and can be chosen
arbitrarily high ( well , less than ram size )
Really , this limit will never be reached
if RED works correctly .
*/
struct red_sched_data
{
2005-11-05 23:14:05 +03:00
u32 limit ; /* HARD maximal queue length */
unsigned char flags ;
struct red_parms parms ;
struct red_stats stats ;
2006-03-21 06:20:44 +03:00
struct Qdisc * qdisc ;
2005-04-17 02:20:36 +04:00
} ;
2005-11-05 23:14:05 +03:00
static inline int red_use_ecn ( struct red_sched_data * q )
2005-04-17 02:20:36 +04:00
{
2005-11-05 23:14:05 +03:00
return q - > flags & TC_RED_ECN ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:28 +03:00
static inline int red_use_harddrop ( struct red_sched_data * q )
{
return q - > flags & TC_RED_HARDDROP ;
}
2005-11-05 23:14:08 +03:00
static int red_enqueue ( struct sk_buff * skb , struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2006-03-21 06:20:44 +03:00
struct Qdisc * child = q - > qdisc ;
int ret ;
2005-04-17 02:20:36 +04:00
2006-03-21 06:20:44 +03:00
q - > parms . qavg = red_calc_qavg ( & q - > parms , child - > qstats . backlog ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:05 +03:00
if ( red_is_idling ( & q - > parms ) )
red_end_of_idle_period ( & q - > parms ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:05 +03:00
switch ( red_action ( & q - > parms , q - > parms . qavg ) ) {
case RED_DONT_MARK :
break ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:05 +03:00
case RED_PROB_MARK :
sch - > qstats . overlimits + + ;
if ( ! red_use_ecn ( q ) | | ! INET_ECN_set_ce ( skb ) ) {
q - > stats . prob_drop + + ;
goto congestion_drop ;
}
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:05 +03:00
q - > stats . prob_mark + + ;
break ;
case RED_HARD_MARK :
sch - > qstats . overlimits + + ;
2005-11-05 23:14:28 +03:00
if ( red_use_harddrop ( q ) | | ! red_use_ecn ( q ) | |
! INET_ECN_set_ce ( skb ) ) {
2005-11-05 23:14:05 +03:00
q - > stats . forced_drop + + ;
goto congestion_drop ;
}
q - > stats . forced_mark + + ;
break ;
2005-04-17 02:20:36 +04:00
}
2008-07-20 11:08:04 +04:00
ret = qdisc_enqueue ( skb , child ) ;
2006-03-21 06:20:44 +03:00
if ( likely ( ret = = NET_XMIT_SUCCESS ) ) {
2008-07-20 11:08:27 +04:00
sch - > bstats . bytes + = qdisc_pkt_len ( skb ) ;
2006-03-21 06:20:44 +03:00
sch - > bstats . packets + + ;
sch - > q . qlen + + ;
2008-08-05 09:31:03 +04:00
} else if ( net_xmit_drop_count ( ret ) ) {
2006-03-21 06:20:44 +03:00
q - > stats . pdrop + + ;
sch - > qstats . drops + + ;
}
return ret ;
2005-11-05 23:14:05 +03:00
congestion_drop :
2005-11-05 23:14:06 +03:00
qdisc_drop ( skb , sch ) ;
2005-04-17 02:20:36 +04:00
return NET_XMIT_CN ;
}
2005-11-05 23:14:08 +03:00
static int red_requeue ( struct sk_buff * skb , struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2006-03-21 06:20:44 +03:00
struct Qdisc * child = q - > qdisc ;
int ret ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:05 +03:00
if ( red_is_idling ( & q - > parms ) )
red_end_of_idle_period ( & q - > parms ) ;
2005-04-17 02:20:36 +04:00
2006-03-21 06:20:44 +03:00
ret = child - > ops - > requeue ( skb , child ) ;
if ( likely ( ret = = NET_XMIT_SUCCESS ) ) {
sch - > qstats . requeues + + ;
sch - > q . qlen + + ;
}
return ret ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:08 +03:00
static struct sk_buff * red_dequeue ( struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct sk_buff * skb ;
struct red_sched_data * q = qdisc_priv ( sch ) ;
2006-03-21 06:20:44 +03:00
struct Qdisc * child = q - > qdisc ;
2005-04-17 02:20:36 +04:00
2006-03-21 06:20:44 +03:00
skb = child - > dequeue ( child ) ;
if ( skb )
sch - > q . qlen - - ;
else if ( ! red_is_idling ( & q - > parms ) )
2005-11-05 23:14:06 +03:00
red_start_of_idle_period ( & q - > parms ) ;
return skb ;
2005-04-17 02:20:36 +04:00
}
static unsigned int red_drop ( struct Qdisc * sch )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2006-03-21 06:20:44 +03:00
struct Qdisc * child = q - > qdisc ;
unsigned int len ;
2005-04-17 02:20:36 +04:00
2006-03-21 06:20:44 +03:00
if ( child - > ops - > drop & & ( len = child - > ops - > drop ( child ) ) > 0 ) {
2005-11-05 23:14:05 +03:00
q - > stats . other + + ;
2006-03-21 06:20:44 +03:00
sch - > qstats . drops + + ;
sch - > q . qlen - - ;
2005-04-17 02:20:36 +04:00
return len ;
}
2005-11-05 23:14:05 +03:00
2005-11-05 23:14:07 +03:00
if ( ! red_is_idling ( & q - > parms ) )
red_start_of_idle_period ( & q - > parms ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
static void red_reset ( struct Qdisc * sch )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2006-03-21 06:20:44 +03:00
qdisc_reset ( q - > qdisc ) ;
sch - > q . qlen = 0 ;
2005-11-05 23:14:05 +03:00
red_restart ( & q - > parms ) ;
2005-04-17 02:20:36 +04:00
}
2006-03-21 06:20:44 +03:00
static void red_destroy ( struct Qdisc * sch )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
qdisc_destroy ( q - > qdisc ) ;
}
2008-01-24 07:35:39 +03:00
static const struct nla_policy red_policy [ TCA_RED_MAX + 1 ] = {
[ TCA_RED_PARMS ] = { . len = sizeof ( struct tc_red_qopt ) } ,
[ TCA_RED_STAB ] = { . len = RED_STAB_SIZE } ,
} ;
2008-01-23 09:11:17 +03:00
static int red_change ( struct Qdisc * sch , struct nlattr * opt )
2005-04-17 02:20:36 +04:00
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2008-01-23 09:11:17 +03:00
struct nlattr * tb [ TCA_RED_MAX + 1 ] ;
2005-04-17 02:20:36 +04:00
struct tc_red_qopt * ctl ;
2006-03-21 06:20:44 +03:00
struct Qdisc * child = NULL ;
2008-01-24 07:33:32 +03:00
int err ;
2005-04-17 02:20:36 +04:00
2008-01-24 07:33:32 +03:00
if ( opt = = NULL )
2005-11-05 23:14:08 +03:00
return - EINVAL ;
2008-01-24 07:35:39 +03:00
err = nla_parse_nested ( tb , TCA_RED_MAX , opt , red_policy ) ;
2008-01-24 07:33:32 +03:00
if ( err < 0 )
return err ;
2008-01-23 09:11:17 +03:00
if ( tb [ TCA_RED_PARMS ] = = NULL | |
2008-01-24 07:35:39 +03:00
tb [ TCA_RED_STAB ] = = NULL )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2008-01-23 09:11:17 +03:00
ctl = nla_data ( tb [ TCA_RED_PARMS ] ) ;
2005-04-17 02:20:36 +04:00
2006-03-21 06:20:44 +03:00
if ( ctl - > limit > 0 ) {
2008-07-06 10:40:21 +04:00
child = fifo_create_dflt ( sch , & bfifo_qdisc_ops , ctl - > limit ) ;
if ( IS_ERR ( child ) )
return PTR_ERR ( child ) ;
2006-03-21 06:20:44 +03:00
}
2005-04-17 02:20:36 +04:00
sch_tree_lock ( sch ) ;
q - > flags = ctl - > flags ;
q - > limit = ctl - > limit ;
2006-11-30 04:36:20 +03:00
if ( child ) {
qdisc_tree_decrease_qlen ( q - > qdisc , q - > qdisc - > q . qlen ) ;
2006-03-21 06:20:44 +03:00
qdisc_destroy ( xchg ( & q - > qdisc , child ) ) ;
2006-11-30 04:36:20 +03:00
}
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:05 +03:00
red_set_parms ( & q - > parms , ctl - > qth_min , ctl - > qth_max , ctl - > Wlog ,
ctl - > Plog , ctl - > Scell_log ,
2008-01-23 09:11:17 +03:00
nla_data ( tb [ TCA_RED_STAB ] ) ) ;
2005-11-05 23:14:05 +03:00
2005-07-09 01:57:23 +04:00
if ( skb_queue_empty ( & sch - > q ) )
2005-11-05 23:14:05 +03:00
red_end_of_idle_period ( & q - > parms ) ;
2005-11-05 23:14:08 +03:00
2005-04-17 02:20:36 +04:00
sch_tree_unlock ( sch ) ;
return 0 ;
}
2008-01-23 09:11:17 +03:00
static int red_init ( struct Qdisc * sch , struct nlattr * opt )
2005-04-17 02:20:36 +04:00
{
2006-03-21 06:20:44 +03:00
struct red_sched_data * q = qdisc_priv ( sch ) ;
q - > qdisc = & noop_qdisc ;
2005-04-17 02:20:36 +04:00
return red_change ( sch , opt ) ;
}
static int red_dump ( struct Qdisc * sch , struct sk_buff * skb )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2008-01-23 09:11:17 +03:00
struct nlattr * opts = NULL ;
2005-11-05 23:14:05 +03:00
struct tc_red_qopt opt = {
. limit = q - > limit ,
. flags = q - > flags ,
. qth_min = q - > parms . qth_min > > q - > parms . Wlog ,
. qth_max = q - > parms . qth_max > > q - > parms . Wlog ,
. Wlog = q - > parms . Wlog ,
. Plog = q - > parms . Plog ,
. Scell_log = q - > parms . Scell_log ,
} ;
2005-04-17 02:20:36 +04:00
2008-01-23 09:11:17 +03:00
opts = nla_nest_start ( skb , TCA_OPTIONS ) ;
if ( opts = = NULL )
goto nla_put_failure ;
NLA_PUT ( skb , TCA_RED_PARMS , sizeof ( opt ) , & opt ) ;
return nla_nest_end ( skb , opts ) ;
2005-04-17 02:20:36 +04:00
2008-01-23 09:11:17 +03:00
nla_put_failure :
2008-06-04 03:36:54 +04:00
nla_nest_cancel ( skb , opts ) ;
return - EMSGSIZE ;
2005-04-17 02:20:36 +04:00
}
static int red_dump_stats ( struct Qdisc * sch , struct gnet_dump * d )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
2005-11-05 23:14:05 +03:00
struct tc_red_xstats st = {
. early = q - > stats . prob_drop + q - > stats . forced_drop ,
. pdrop = q - > stats . pdrop ,
. other = q - > stats . other ,
. marked = q - > stats . prob_mark + q - > stats . forced_mark ,
} ;
return gnet_stats_copy_app ( d , & st , sizeof ( st ) ) ;
2005-04-17 02:20:36 +04:00
}
2006-03-21 06:20:44 +03:00
static int red_dump_class ( struct Qdisc * sch , unsigned long cl ,
struct sk_buff * skb , struct tcmsg * tcm )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
if ( cl ! = 1 )
return - ENOENT ;
tcm - > tcm_handle | = TC_H_MIN ( 1 ) ;
tcm - > tcm_info = q - > qdisc - > handle ;
return 0 ;
}
static int red_graft ( struct Qdisc * sch , unsigned long arg , struct Qdisc * new ,
struct Qdisc * * old )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
if ( new = = NULL )
new = & noop_qdisc ;
sch_tree_lock ( sch ) ;
* old = xchg ( & q - > qdisc , new ) ;
2006-11-30 04:36:20 +03:00
qdisc_tree_decrease_qlen ( * old , ( * old ) - > q . qlen ) ;
2006-03-21 06:20:44 +03:00
qdisc_reset ( * old ) ;
sch_tree_unlock ( sch ) ;
return 0 ;
}
static struct Qdisc * red_leaf ( struct Qdisc * sch , unsigned long arg )
{
struct red_sched_data * q = qdisc_priv ( sch ) ;
return q - > qdisc ;
}
static unsigned long red_get ( struct Qdisc * sch , u32 classid )
{
return 1 ;
}
static void red_put ( struct Qdisc * sch , unsigned long arg )
{
return ;
}
static int red_change_class ( struct Qdisc * sch , u32 classid , u32 parentid ,
2008-01-23 09:11:17 +03:00
struct nlattr * * tca , unsigned long * arg )
2006-03-21 06:20:44 +03:00
{
return - ENOSYS ;
}
static int red_delete ( struct Qdisc * sch , unsigned long cl )
{
return - ENOSYS ;
}
static void red_walk ( struct Qdisc * sch , struct qdisc_walker * walker )
{
if ( ! walker - > stop ) {
if ( walker - > count > = walker - > skip )
if ( walker - > fn ( sch , 1 , walker ) < 0 ) {
walker - > stop = 1 ;
return ;
}
walker - > count + + ;
}
}
static struct tcf_proto * * red_find_tcf ( struct Qdisc * sch , unsigned long cl )
{
return NULL ;
}
2007-11-14 12:44:41 +03:00
static const struct Qdisc_class_ops red_class_ops = {
2006-03-21 06:20:44 +03:00
. graft = red_graft ,
. leaf = red_leaf ,
. get = red_get ,
. put = red_put ,
. change = red_change_class ,
. delete = red_delete ,
. walk = red_walk ,
. tcf_chain = red_find_tcf ,
. dump = red_dump_class ,
} ;
2007-11-14 12:44:41 +03:00
static struct Qdisc_ops red_qdisc_ops __read_mostly = {
2005-04-17 02:20:36 +04:00
. id = " red " ,
. priv_size = sizeof ( struct red_sched_data ) ,
2006-03-21 06:20:44 +03:00
. cl_ops = & red_class_ops ,
2005-04-17 02:20:36 +04:00
. enqueue = red_enqueue ,
. dequeue = red_dequeue ,
. requeue = red_requeue ,
. drop = red_drop ,
. init = red_init ,
. reset = red_reset ,
2006-03-21 06:20:44 +03:00
. destroy = red_destroy ,
2005-04-17 02:20:36 +04:00
. change = red_change ,
. dump = red_dump ,
. dump_stats = red_dump_stats ,
. owner = THIS_MODULE ,
} ;
static int __init red_module_init ( void )
{
return register_qdisc ( & red_qdisc_ops ) ;
}
2005-11-05 23:14:08 +03:00
static void __exit red_module_exit ( void )
2005-04-17 02:20:36 +04:00
{
unregister_qdisc ( & red_qdisc_ops ) ;
}
2005-11-05 23:14:08 +03:00
2005-04-17 02:20:36 +04:00
module_init ( red_module_init )
module_exit ( red_module_exit )
2005-11-05 23:14:08 +03:00
2005-04-17 02:20:36 +04:00
MODULE_LICENSE ( " GPL " ) ;