2005-04-17 02:20:36 +04:00
/*
* net / sched / sch_gred . c Generic Random Early Detection queue .
*
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
* Authors : J Hadi Salim ( hadi @ cyberus . ca ) 1998 - 2002
*
* 991129 : - Bug fix with grio mode
* - a better sing . AvgQ mode with Grio ( WRED )
* - A finer grained VQ dequeue based on sugestion
* from Ren Liu
* - More error checks
*
2005-11-05 23:14:25 +03:00
* For all the glorious comments look at include / net / red . h
2005-04-17 02:20:36 +04:00
*/
# include <linux/module.h>
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/skbuff.h>
# include <net/pkt_sched.h>
2005-11-05 23:14:16 +03:00
# include <net/red.h>
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:15 +03:00
# define GRED_DEF_PRIO (MAX_DPs / 2)
2005-11-05 23:14:20 +03:00
# define GRED_VQ_MASK (MAX_DPs - 1)
2005-11-05 23:14:15 +03:00
2005-04-17 02:20:36 +04:00
struct gred_sched_data ;
struct gred_sched ;
struct gred_sched_data
{
u32 limit ; /* HARD maximal queue length */
u32 DP ; /* the drop pramaters */
u32 bytesin ; /* bytes seen on virtualQ so far*/
u32 packetsin ; /* packets seen on virtualQ so far*/
u32 backlog ; /* bytes on the virtualQ */
2005-11-05 23:14:25 +03:00
u8 prio ; /* the prio of this vq */
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:16 +03:00
struct red_parms parms ;
struct red_stats stats ;
2005-04-17 02:20:36 +04:00
} ;
2005-11-05 23:14:09 +03:00
enum {
GRED_WRED_MODE = 1 ,
2005-11-05 23:14:10 +03:00
GRED_RIO_MODE ,
2005-11-05 23:14:09 +03:00
} ;
2005-04-17 02:20:36 +04:00
struct gred_sched
{
struct gred_sched_data * tab [ MAX_DPs ] ;
2005-11-05 23:14:09 +03:00
unsigned long flags ;
2005-11-05 23:14:27 +03:00
u32 red_flags ;
2005-11-05 23:14:25 +03:00
u32 DPs ;
u32 def ;
2005-11-05 23:14:23 +03:00
struct red_parms wred_set ;
2005-04-17 02:20:36 +04:00
} ;
2005-11-05 23:14:09 +03:00
static inline int gred_wred_mode ( struct gred_sched * table )
{
return test_bit ( GRED_WRED_MODE , & table - > flags ) ;
}
static inline void gred_enable_wred_mode ( struct gred_sched * table )
{
__set_bit ( GRED_WRED_MODE , & table - > flags ) ;
}
static inline void gred_disable_wred_mode ( struct gred_sched * table )
{
__clear_bit ( GRED_WRED_MODE , & table - > flags ) ;
}
2005-11-05 23:14:10 +03:00
static inline int gred_rio_mode ( struct gred_sched * table )
{
return test_bit ( GRED_RIO_MODE , & table - > flags ) ;
}
static inline void gred_enable_rio_mode ( struct gred_sched * table )
{
__set_bit ( GRED_RIO_MODE , & table - > flags ) ;
}
static inline void gred_disable_rio_mode ( struct gred_sched * table )
{
__clear_bit ( GRED_RIO_MODE , & table - > flags ) ;
}
2005-11-05 23:14:09 +03:00
static inline int gred_wred_mode_check ( struct Qdisc * sch )
{
struct gred_sched * table = qdisc_priv ( sch ) ;
int i ;
/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
for ( i = 0 ; i < table - > DPs ; i + + ) {
struct gred_sched_data * q = table - > tab [ i ] ;
int n ;
if ( q = = NULL )
continue ;
for ( n = 0 ; n < table - > DPs ; n + + )
if ( table - > tab [ n ] & & table - > tab [ n ] ! = q & &
table - > tab [ n ] - > prio = = q - > prio )
return 1 ;
}
return 0 ;
}
2005-11-05 23:14:16 +03:00
static inline unsigned int gred_backlog ( struct gred_sched * table ,
struct gred_sched_data * q ,
struct Qdisc * sch )
{
if ( gred_wred_mode ( table ) )
return sch - > qstats . backlog ;
else
return q - > backlog ;
}
2005-11-05 23:14:20 +03:00
static inline u16 tc_index_to_dp ( struct sk_buff * skb )
{
return skb - > tc_index & GRED_VQ_MASK ;
}
2005-11-05 23:14:23 +03:00
static inline void gred_load_wred_set ( struct gred_sched * table ,
struct gred_sched_data * q )
{
q - > parms . qavg = table - > wred_set . qavg ;
q - > parms . qidlestart = table - > wred_set . qidlestart ;
}
static inline void gred_store_wred_set ( struct gred_sched * table ,
struct gred_sched_data * q )
{
table - > wred_set . qavg = q - > parms . qavg ;
}
2005-11-05 23:14:27 +03:00
static inline int gred_use_ecn ( struct gred_sched * t )
{
return t - > red_flags & TC_RED_ECN ;
}
2005-11-05 23:14:28 +03:00
static inline int gred_use_harddrop ( struct gred_sched * t )
{
return t - > red_flags & TC_RED_HARDDROP ;
}
2005-11-05 23:14:25 +03:00
static int gred_enqueue ( struct sk_buff * skb , struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct gred_sched_data * q = NULL ;
struct gred_sched * t = qdisc_priv ( sch ) ;
2005-11-05 23:14:16 +03:00
unsigned long qavg = 0 ;
2005-11-05 23:14:22 +03:00
u16 dp = tc_index_to_dp ( skb ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:20 +03:00
if ( dp > = t - > DPs | | ( q = t - > tab [ dp ] ) = = NULL ) {
2005-11-05 23:14:21 +03:00
dp = t - > def ;
if ( ( q = t - > tab [ dp ] ) = = NULL ) {
/* Pass through packets not assigned to a DP
* if no default DP has been configured . This
* allows for DP flows to be left untouched .
*/
2008-07-09 04:06:30 +04:00
if ( skb_queue_len ( & sch - > q ) < qdisc_dev ( sch ) - > tx_queue_len )
2005-11-05 23:14:21 +03:00
return qdisc_enqueue_tail ( skb , sch ) ;
else
goto drop ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:21 +03:00
2005-04-17 02:20:36 +04:00
/* fix tc_index? --could be controvesial but needed for
requeueing */
2005-11-05 23:14:21 +03:00
skb - > tc_index = ( skb - > tc_index & ~ GRED_VQ_MASK ) | dp ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:25 +03:00
/* sum up all the qaves of prios <= to ours to get the new qave */
2005-11-05 23:14:10 +03:00
if ( ! gred_wred_mode ( t ) & & gred_rio_mode ( t ) ) {
2005-11-05 23:14:25 +03:00
int i ;
for ( i = 0 ; i < t - > DPs ; i + + ) {
if ( t - > tab [ i ] & & t - > tab [ i ] - > prio < q - > prio & &
2005-11-05 23:14:16 +03:00
! red_is_idling ( & t - > tab [ i ] - > parms ) )
qavg + = t - > tab [ i ] - > parms . qavg ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:25 +03:00
2005-04-17 02:20:36 +04:00
}
q - > packetsin + + ;
2008-07-20 11:08:27 +04:00
q - > bytesin + = qdisc_pkt_len ( skb ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:25 +03:00
if ( gred_wred_mode ( t ) )
2005-11-05 23:14:23 +03:00
gred_load_wred_set ( t , q ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:16 +03:00
q - > parms . qavg = red_calc_qavg ( & q - > parms , gred_backlog ( t , q , sch ) ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:16 +03:00
if ( red_is_idling ( & q - > parms ) )
red_end_of_idle_period ( & q - > parms ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:09 +03:00
if ( gred_wred_mode ( t ) )
2005-11-05 23:14:23 +03:00
gred_store_wred_set ( t , q ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:16 +03:00
switch ( red_action ( & q - > parms , q - > parms . qavg + qavg ) ) {
case RED_DONT_MARK :
break ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:16 +03:00
case RED_PROB_MARK :
sch - > qstats . overlimits + + ;
2005-11-05 23:14:27 +03:00
if ( ! gred_use_ecn ( t ) | | ! INET_ECN_set_ce ( skb ) ) {
q - > stats . prob_drop + + ;
goto congestion_drop ;
}
q - > stats . prob_mark + + ;
break ;
2005-11-05 23:14:16 +03:00
case RED_HARD_MARK :
sch - > qstats . overlimits + + ;
2005-11-05 23:14:28 +03:00
if ( gred_use_harddrop ( t ) | | ! gred_use_ecn ( t ) | |
! INET_ECN_set_ce ( skb ) ) {
2005-11-05 23:14:27 +03:00
q - > stats . forced_drop + + ;
goto congestion_drop ;
}
q - > stats . forced_mark + + ;
break ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:16 +03:00
2008-07-20 11:08:27 +04:00
if ( q - > backlog + qdisc_pkt_len ( skb ) < = q - > limit ) {
q - > backlog + = qdisc_pkt_len ( skb ) ;
2005-11-05 23:14:19 +03:00
return qdisc_enqueue_tail ( skb , sch ) ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:16 +03:00
q - > stats . pdrop + + ;
drop :
2005-11-05 23:14:19 +03:00
return qdisc_drop ( skb , sch ) ;
2005-11-05 23:14:18 +03:00
congestion_drop :
2005-11-05 23:14:19 +03:00
qdisc_drop ( skb , sch ) ;
2005-11-05 23:14:18 +03:00
return NET_XMIT_CN ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:25 +03:00
static struct sk_buff * gred_dequeue ( struct Qdisc * sch )
2005-04-17 02:20:36 +04:00
{
struct sk_buff * skb ;
2005-11-05 23:14:25 +03:00
struct gred_sched * t = qdisc_priv ( sch ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:19 +03:00
skb = qdisc_dequeue_head ( sch ) ;
2005-04-17 02:20:36 +04:00
if ( skb ) {
2005-11-05 23:14:25 +03:00
struct gred_sched_data * q ;
2005-11-05 23:14:21 +03:00
u16 dp = tc_index_to_dp ( skb ) ;
if ( dp > = t - > DPs | | ( q = t - > tab [ dp ] ) = = NULL ) {
if ( net_ratelimit ( ) )
printk ( KERN_WARNING " GRED: Unable to relocate "
" VQ 0x%x after dequeue, screwing up "
" backlog. \n " , tc_index_to_dp ( skb ) ) ;
} else {
2008-07-20 11:08:27 +04:00
q - > backlog - = qdisc_pkt_len ( skb ) ;
2005-11-05 23:14:21 +03:00
2005-11-05 23:14:09 +03:00
if ( ! q - > backlog & & ! gred_wred_mode ( t ) )
2005-11-05 23:14:16 +03:00
red_start_of_idle_period ( & q - > parms ) ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:21 +03:00
2005-04-17 02:20:36 +04:00
return skb ;
}
2005-11-05 23:14:26 +03:00
if ( gred_wred_mode ( t ) & & ! red_is_idling ( & t - > wred_set ) )
2005-11-05 23:14:23 +03:00
red_start_of_idle_period ( & t - > wred_set ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
static unsigned int gred_drop ( struct Qdisc * sch )
{
struct sk_buff * skb ;
2005-11-05 23:14:25 +03:00
struct gred_sched * t = qdisc_priv ( sch ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:19 +03:00
skb = qdisc_dequeue_tail ( sch ) ;
2005-04-17 02:20:36 +04:00
if ( skb ) {
2008-07-20 11:08:27 +04:00
unsigned int len = qdisc_pkt_len ( skb ) ;
2005-11-05 23:14:25 +03:00
struct gred_sched_data * q ;
2005-11-05 23:14:21 +03:00
u16 dp = tc_index_to_dp ( skb ) ;
if ( dp > = t - > DPs | | ( q = t - > tab [ dp ] ) = = NULL ) {
if ( net_ratelimit ( ) )
printk ( KERN_WARNING " GRED: Unable to relocate "
" VQ 0x%x while dropping, screwing up "
" backlog. \n " , tc_index_to_dp ( skb ) ) ;
} else {
2005-04-17 02:20:36 +04:00
q - > backlog - = len ;
2005-11-05 23:14:16 +03:00
q - > stats . other + + ;
2005-11-05 23:14:21 +03:00
2005-11-05 23:14:09 +03:00
if ( ! q - > backlog & & ! gred_wred_mode ( t ) )
2005-11-05 23:14:16 +03:00
red_start_of_idle_period ( & q - > parms ) ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:19 +03:00
qdisc_drop ( skb , sch ) ;
2005-04-17 02:20:36 +04:00
return len ;
}
2005-11-05 23:14:26 +03:00
if ( gred_wred_mode ( t ) & & ! red_is_idling ( & t - > wred_set ) )
2005-11-05 23:14:23 +03:00
red_start_of_idle_period ( & t - > wred_set ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
static void gred_reset ( struct Qdisc * sch )
{
int i ;
2005-11-05 23:14:25 +03:00
struct gred_sched * t = qdisc_priv ( sch ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:19 +03:00
qdisc_reset_queue ( sch ) ;
2005-04-17 02:20:36 +04:00
2007-02-09 17:25:16 +03:00
for ( i = 0 ; i < t - > DPs ; i + + ) {
2005-11-05 23:14:25 +03:00
struct gred_sched_data * q = t - > tab [ i ] ;
if ( ! q )
continue ;
2005-11-05 23:14:16 +03:00
red_restart ( & q - > parms ) ;
2005-04-17 02:20:36 +04:00
q - > backlog = 0 ;
}
}
2005-11-05 23:14:13 +03:00
static inline void gred_destroy_vq ( struct gred_sched_data * q )
{
kfree ( q ) ;
}
2008-01-23 09:11:17 +03:00
static inline int gred_change_table_def ( struct Qdisc * sch , struct nlattr * dps )
2005-04-17 02:20:36 +04:00
{
struct gred_sched * table = qdisc_priv ( sch ) ;
struct tc_gred_sopt * sopt ;
2005-11-05 23:14:13 +03:00
int i ;
2005-04-17 02:20:36 +04:00
2008-01-24 07:35:39 +03:00
if ( dps = = NULL )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2008-01-23 09:11:17 +03:00
sopt = nla_data ( dps ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:13 +03:00
if ( sopt - > DPs > MAX_DPs | | sopt - > DPs = = 0 | | sopt - > def_DP > = sopt - > DPs )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:13 +03:00
sch_tree_lock ( sch ) ;
table - > DPs = sopt - > DPs ;
table - > def = sopt - > def_DP ;
2005-11-05 23:14:27 +03:00
table - > red_flags = sopt - > flags ;
2005-11-05 23:14:09 +03:00
2005-11-05 23:14:13 +03:00
/*
* Every entry point to GRED is synchronized with the above code
* and the DP is checked against DPs , i . e . shadowed VQs can no
* longer be found so we can unlock right here .
*/
sch_tree_unlock ( sch ) ;
2005-11-05 23:14:09 +03:00
2005-11-05 23:14:13 +03:00
if ( sopt - > grio ) {
gred_enable_rio_mode ( table ) ;
gred_disable_wred_mode ( table ) ;
if ( gred_wred_mode_check ( sch ) )
gred_enable_wred_mode ( table ) ;
} else {
gred_disable_rio_mode ( table ) ;
gred_disable_wred_mode ( table ) ;
}
for ( i = table - > DPs ; i < MAX_DPs ; i + + ) {
if ( table - > tab [ i ] ) {
printk ( KERN_WARNING " GRED: Warning: Destroying "
" shadowed VQ 0x%x \n " , i ) ;
gred_destroy_vq ( table - > tab [ i ] ) ;
table - > tab [ i ] = NULL ;
2007-02-09 17:25:16 +03:00
}
2005-11-05 23:14:13 +03:00
}
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:13 +03:00
return 0 ;
}
2005-11-05 23:14:15 +03:00
static inline int gred_change_vq ( struct Qdisc * sch , int dp ,
struct tc_gred_qopt * ctl , int prio , u8 * stab )
2005-11-05 23:14:13 +03:00
{
struct gred_sched * table = qdisc_priv ( sch ) ;
struct gred_sched_data * q ;
2005-11-05 23:14:15 +03:00
if ( table - > tab [ dp ] = = NULL ) {
2006-07-22 01:51:30 +04:00
table - > tab [ dp ] = kzalloc ( sizeof ( * q ) , GFP_KERNEL ) ;
2005-11-05 23:14:15 +03:00
if ( table - > tab [ dp ] = = NULL )
return - ENOMEM ;
}
q = table - > tab [ dp ] ;
q - > DP = dp ;
q - > prio = prio ;
q - > limit = ctl - > limit ;
2005-11-05 23:14:16 +03:00
if ( q - > backlog = = 0 )
red_end_of_idle_period ( & q - > parms ) ;
red_set_parms ( & q - > parms ,
ctl - > qth_min , ctl - > qth_max , ctl - > Wlog , ctl - > Plog ,
ctl - > Scell_log , stab ) ;
2005-11-05 23:14:15 +03:00
return 0 ;
}
2008-01-24 07:35:39 +03:00
static const struct nla_policy gred_policy [ TCA_GRED_MAX + 1 ] = {
[ TCA_GRED_PARMS ] = { . len = sizeof ( struct tc_gred_qopt ) } ,
[ TCA_GRED_STAB ] = { . len = 256 } ,
[ TCA_GRED_DPS ] = { . len = sizeof ( struct tc_gred_sopt ) } ,
} ;
2008-01-23 09:11:17 +03:00
static int gred_change ( struct Qdisc * sch , struct nlattr * opt )
2005-11-05 23:14:15 +03:00
{
struct gred_sched * table = qdisc_priv ( sch ) ;
2005-11-05 23:14:13 +03:00
struct tc_gred_qopt * ctl ;
2008-01-23 09:11:17 +03:00
struct nlattr * tb [ TCA_GRED_MAX + 1 ] ;
2008-01-24 07:33:32 +03:00
int err , prio = GRED_DEF_PRIO ;
2005-11-05 23:14:15 +03:00
u8 * stab ;
2005-11-05 23:14:13 +03:00
2008-01-24 07:33:32 +03:00
if ( opt = = NULL )
2005-11-05 23:14:13 +03:00
return - EINVAL ;
2008-01-24 07:35:39 +03:00
err = nla_parse_nested ( tb , TCA_GRED_MAX , opt , gred_policy ) ;
2008-01-24 07:33:32 +03:00
if ( err < 0 )
return err ;
2008-01-23 09:11:17 +03:00
if ( tb [ TCA_GRED_PARMS ] = = NULL & & tb [ TCA_GRED_STAB ] = = NULL )
2005-11-05 23:14:15 +03:00
return gred_change_table_def ( sch , opt ) ;
2005-04-17 02:20:36 +04:00
2008-01-23 09:11:17 +03:00
if ( tb [ TCA_GRED_PARMS ] = = NULL | |
2008-01-24 07:35:39 +03:00
tb [ TCA_GRED_STAB ] = = NULL )
2005-11-05 23:14:15 +03:00
return - EINVAL ;
2005-04-17 02:20:36 +04:00
2008-01-24 07:33:32 +03:00
err = - EINVAL ;
2008-01-23 09:11:17 +03:00
ctl = nla_data ( tb [ TCA_GRED_PARMS ] ) ;
stab = nla_data ( tb [ TCA_GRED_STAB ] ) ;
2005-11-05 23:14:14 +03:00
if ( ctl - > DP > = table - > DPs )
2005-11-05 23:14:15 +03:00
goto errout ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:10 +03:00
if ( gred_rio_mode ( table ) ) {
2005-11-05 23:14:15 +03:00
if ( ctl - > prio = = 0 ) {
int def_prio = GRED_DEF_PRIO ;
if ( table - > tab [ table - > def ] )
def_prio = table - > tab [ table - > def ] - > prio ;
printk ( KERN_DEBUG " GRED: DP %u does not have a prio "
" setting default to %d \n " , ctl - > DP , def_prio ) ;
prio = def_prio ;
} else
prio = ctl - > prio ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:15 +03:00
sch_tree_lock ( sch ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:15 +03:00
err = gred_change_vq ( sch , ctl - > DP , ctl , prio , stab ) ;
if ( err < 0 )
goto errout_locked ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:10 +03:00
if ( gred_rio_mode ( table ) ) {
2005-11-05 23:14:09 +03:00
gred_disable_wred_mode ( table ) ;
if ( gred_wred_mode_check ( sch ) )
gred_enable_wred_mode ( table ) ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:15 +03:00
err = 0 ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:15 +03:00
errout_locked :
sch_tree_unlock ( sch ) ;
errout :
return err ;
2005-04-17 02:20:36 +04:00
}
2008-01-23 09:11:17 +03:00
static int gred_init ( struct Qdisc * sch , struct nlattr * opt )
2005-04-17 02:20:36 +04:00
{
2008-01-23 09:11:17 +03:00
struct nlattr * tb [ TCA_GRED_MAX + 1 ] ;
2008-01-24 07:33:32 +03:00
int err ;
2005-04-17 02:20:36 +04:00
2008-01-24 07:33:32 +03:00
if ( opt = = NULL )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2008-01-24 07:35:39 +03:00
err = nla_parse_nested ( tb , TCA_GRED_MAX , opt , gred_policy ) ;
2008-01-24 07:33:32 +03:00
if ( err < 0 )
return err ;
2008-01-23 09:11:17 +03:00
if ( tb [ TCA_GRED_PARMS ] | | tb [ TCA_GRED_STAB ] )
2005-11-05 23:14:13 +03:00
return - EINVAL ;
2005-04-17 02:20:36 +04:00
2008-01-23 09:11:17 +03:00
return gred_change_table_def ( sch , tb [ TCA_GRED_DPS ] ) ;
2005-04-17 02:20:36 +04:00
}
static int gred_dump ( struct Qdisc * sch , struct sk_buff * skb )
{
struct gred_sched * table = qdisc_priv ( sch ) ;
2008-01-23 09:11:17 +03:00
struct nlattr * parms , * opts = NULL ;
2005-04-17 02:20:36 +04:00
int i ;
2005-11-05 23:14:12 +03:00
struct tc_gred_sopt sopt = {
. DPs = table - > DPs ,
. def_DP = table - > def ,
. grio = gred_rio_mode ( table ) ,
2005-11-05 23:14:27 +03:00
. flags = table - > red_flags ,
2005-11-05 23:14:12 +03:00
} ;
2005-04-17 02:20:36 +04:00
2008-01-23 09:11:17 +03:00
opts = nla_nest_start ( skb , TCA_OPTIONS ) ;
if ( opts = = NULL )
goto nla_put_failure ;
NLA_PUT ( skb , TCA_GRED_DPS , sizeof ( sopt ) , & sopt ) ;
parms = nla_nest_start ( skb , TCA_GRED_PARMS ) ;
if ( parms = = NULL )
goto nla_put_failure ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:11 +03:00
for ( i = 0 ; i < MAX_DPs ; i + + ) {
struct gred_sched_data * q = table - > tab [ i ] ;
struct tc_gred_qopt opt ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:11 +03:00
memset ( & opt , 0 , sizeof ( opt ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! q ) {
/* hack -- fix at some point with proper message
This is how we indicate to tc that there is no VQ
at this DP */
2005-11-05 23:14:11 +03:00
opt . DP = MAX_DPs + i ;
goto append_opt ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:11 +03:00
opt . limit = q - > limit ;
opt . DP = q - > DP ;
opt . backlog = q - > backlog ;
opt . prio = q - > prio ;
2005-11-05 23:14:16 +03:00
opt . qth_min = q - > parms . qth_min > > q - > parms . Wlog ;
opt . qth_max = q - > parms . qth_max > > q - > parms . Wlog ;
opt . Wlog = q - > parms . Wlog ;
opt . Plog = q - > parms . Plog ;
opt . Scell_log = q - > parms . Scell_log ;
opt . other = q - > stats . other ;
opt . early = q - > stats . prob_drop ;
opt . forced = q - > stats . forced_drop ;
opt . pdrop = q - > stats . pdrop ;
2005-11-05 23:14:11 +03:00
opt . packets = q - > packetsin ;
opt . bytesin = q - > bytesin ;
2005-11-05 23:14:16 +03:00
if ( gred_wred_mode ( table ) ) {
q - > parms . qidlestart =
table - > tab [ table - > def ] - > parms . qidlestart ;
q - > parms . qavg = table - > tab [ table - > def ] - > parms . qavg ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:11 +03:00
2005-11-05 23:14:16 +03:00
opt . qave = red_calc_qavg ( & q - > parms , q - > parms . qavg ) ;
2005-11-05 23:14:11 +03:00
append_opt :
2008-01-23 09:11:17 +03:00
if ( nla_append ( skb , sizeof ( opt ) , & opt ) < 0 )
goto nla_put_failure ;
2005-04-17 02:20:36 +04:00
}
2008-01-23 09:11:17 +03:00
nla_nest_end ( skb , parms ) ;
2005-04-17 02:20:36 +04:00
2008-01-23 09:11:17 +03:00
return nla_nest_end ( skb , opts ) ;
2005-04-17 02:20:36 +04:00
2008-01-23 09:11:17 +03:00
nla_put_failure :
2008-06-04 03:36:54 +04:00
nla_nest_cancel ( skb , opts ) ;
return - EMSGSIZE ;
2005-04-17 02:20:36 +04:00
}
static void gred_destroy ( struct Qdisc * sch )
{
struct gred_sched * table = qdisc_priv ( sch ) ;
int i ;
2005-11-05 23:14:25 +03:00
for ( i = 0 ; i < table - > DPs ; i + + ) {
2005-04-17 02:20:36 +04:00
if ( table - > tab [ i ] )
2005-11-05 23:14:13 +03:00
gred_destroy_vq ( table - > tab [ i ] ) ;
2005-04-17 02:20:36 +04:00
}
}
2007-11-14 12:44:41 +03:00
static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
2005-04-17 02:20:36 +04:00
. id = " gred " ,
. priv_size = sizeof ( struct gred_sched ) ,
. enqueue = gred_enqueue ,
. dequeue = gred_dequeue ,
2008-10-31 10:45:55 +03:00
. peek = qdisc_peek_head ,
2005-04-17 02:20:36 +04:00
. drop = gred_drop ,
. init = gred_init ,
. reset = gred_reset ,
. destroy = gred_destroy ,
. change = gred_change ,
. dump = gred_dump ,
. owner = THIS_MODULE ,
} ;
static int __init gred_module_init ( void )
{
return register_qdisc ( & gred_qdisc_ops ) ;
}
2005-11-05 23:14:25 +03:00
static void __exit gred_module_exit ( void )
2005-04-17 02:20:36 +04:00
{
unregister_qdisc ( & gred_qdisc_ops ) ;
}
2005-11-05 23:14:25 +03:00
2005-04-17 02:20:36 +04:00
module_init ( gred_module_init )
module_exit ( gred_module_exit )
2005-11-05 23:14:25 +03:00
2005-04-17 02:20:36 +04:00
MODULE_LICENSE ( " GPL " ) ;