2005-04-17 02:20:36 +04:00
/*
* net / sched / sch_gred . c Generic Random Early Detection queue .
*
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
* Authors : J Hadi Salim ( hadi @ cyberus . ca ) 1998 - 2002
*
* 991129 : - Bug fix with grio mode
* - a better sing . AvgQ mode with Grio ( WRED )
* - A finer grained VQ dequeue based on sugestion
* from Ren Liu
* - More error checks
*
*
*
* For all the glorious comments look at Alexey ' s sch_red . c
*/
# include <linux/config.h>
# include <linux/module.h>
# include <asm/uaccess.h>
# include <asm/system.h>
# include <linux/bitops.h>
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/sched.h>
# include <linux/string.h>
# include <linux/mm.h>
# include <linux/socket.h>
# include <linux/sockios.h>
# include <linux/in.h>
# include <linux/errno.h>
# include <linux/interrupt.h>
# include <linux/if_ether.h>
# include <linux/inet.h>
# include <linux/netdevice.h>
# include <linux/etherdevice.h>
# include <linux/notifier.h>
# include <net/ip.h>
# include <net/route.h>
# include <linux/skbuff.h>
# include <net/sock.h>
# include <net/pkt_sched.h>
# if 1 /* control */
# define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
# else
# define DPRINTK(format,args...)
# endif
#if 0 /* data */
# define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
# else
# define D2PRINTK(format,args...)
# endif
2005-11-05 23:14:15 +03:00
# define GRED_DEF_PRIO (MAX_DPs / 2)
2005-04-17 02:20:36 +04:00
struct gred_sched_data ;
struct gred_sched ;
struct gred_sched_data
{
/* Parameters */
u32 limit ; /* HARD maximal queue length */
u32 qth_min ; /* Min average length threshold: A scaled */
u32 qth_max ; /* Max average length threshold: A scaled */
u32 DP ; /* the drop pramaters */
char Wlog ; /* log(W) */
char Plog ; /* random number bits */
u32 Scell_max ;
u32 Rmask ;
u32 bytesin ; /* bytes seen on virtualQ so far*/
u32 packetsin ; /* packets seen on virtualQ so far*/
u32 backlog ; /* bytes on the virtualQ */
u32 forced ; /* packets dropped for exceeding limits */
u32 early ; /* packets dropped as a warning */
u32 other ; /* packets dropped by invoking drop() */
u32 pdrop ; /* packets dropped because we exceeded physical queue limits */
char Scell_log ;
u8 Stab [ 256 ] ;
u8 prio ; /* the prio of this vq */
/* Variables */
unsigned long qave ; /* Average queue length: A scaled */
int qcount ; /* Packets since last random number generation */
u32 qR ; /* Cached random number */
psched_time_t qidlestart ; /* Start of idle period */
} ;
2005-11-05 23:14:09 +03:00
enum {
GRED_WRED_MODE = 1 ,
2005-11-05 23:14:10 +03:00
GRED_RIO_MODE ,
2005-11-05 23:14:09 +03:00
} ;
2005-04-17 02:20:36 +04:00
struct gred_sched
{
struct gred_sched_data * tab [ MAX_DPs ] ;
2005-11-05 23:14:09 +03:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
u32 DPs ;
u32 def ;
u8 initd ;
} ;
2005-11-05 23:14:09 +03:00
static inline int gred_wred_mode ( struct gred_sched * table )
{
return test_bit ( GRED_WRED_MODE , & table - > flags ) ;
}
static inline void gred_enable_wred_mode ( struct gred_sched * table )
{
__set_bit ( GRED_WRED_MODE , & table - > flags ) ;
}
static inline void gred_disable_wred_mode ( struct gred_sched * table )
{
__clear_bit ( GRED_WRED_MODE , & table - > flags ) ;
}
2005-11-05 23:14:10 +03:00
static inline int gred_rio_mode ( struct gred_sched * table )
{
return test_bit ( GRED_RIO_MODE , & table - > flags ) ;
}
static inline void gred_enable_rio_mode ( struct gred_sched * table )
{
__set_bit ( GRED_RIO_MODE , & table - > flags ) ;
}
static inline void gred_disable_rio_mode ( struct gred_sched * table )
{
__clear_bit ( GRED_RIO_MODE , & table - > flags ) ;
}
2005-11-05 23:14:09 +03:00
static inline int gred_wred_mode_check ( struct Qdisc * sch )
{
struct gred_sched * table = qdisc_priv ( sch ) ;
int i ;
/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
for ( i = 0 ; i < table - > DPs ; i + + ) {
struct gred_sched_data * q = table - > tab [ i ] ;
int n ;
if ( q = = NULL )
continue ;
for ( n = 0 ; n < table - > DPs ; n + + )
if ( table - > tab [ n ] & & table - > tab [ n ] ! = q & &
table - > tab [ n ] - > prio = = q - > prio )
return 1 ;
}
return 0 ;
}
2005-04-17 02:20:36 +04:00
static int
gred_enqueue ( struct sk_buff * skb , struct Qdisc * sch )
{
psched_time_t now ;
struct gred_sched_data * q = NULL ;
struct gred_sched * t = qdisc_priv ( sch ) ;
unsigned long qave = 0 ;
int i = 0 ;
if ( ! t - > initd & & skb_queue_len ( & sch - > q ) < ( sch - > dev - > tx_queue_len ? : 1 ) ) {
D2PRINTK ( " NO GRED Queues setup yet! Enqueued anyway \n " ) ;
goto do_enqueue ;
}
if ( ( ( skb - > tc_index & 0xf ) > ( t - > DPs - 1 ) ) | | ! ( q = t - > tab [ skb - > tc_index & 0xf ] ) ) {
printk ( " GRED: setting to default (%d) \n " , t - > def ) ;
if ( ! ( q = t - > tab [ t - > def ] ) ) {
DPRINTK ( " GRED: setting to default FAILED! dropping!! "
" (%d) \n " , t - > def ) ;
goto drop ;
}
/* fix tc_index? --could be controvesial but needed for
requeueing */
skb - > tc_index = ( skb - > tc_index & 0xfffffff0 ) | t - > def ;
}
D2PRINTK ( " gred_enqueue virtualQ 0x%x classid %x backlog %d "
" general backlog %d \n " , skb - > tc_index & 0xf , sch - > handle , q - > backlog ,
sch - > qstats . backlog ) ;
/* sum up all the qaves of prios <= to ours to get the new qave*/
2005-11-05 23:14:10 +03:00
if ( ! gred_wred_mode ( t ) & & gred_rio_mode ( t ) ) {
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < t - > DPs ; i + + ) {
if ( ( ! t - > tab [ i ] ) | | ( i = = q - > DP ) )
continue ;
if ( ( t - > tab [ i ] - > prio < q - > prio ) & & ( PSCHED_IS_PASTPERFECT ( t - > tab [ i ] - > qidlestart ) ) )
qave + = t - > tab [ i ] - > qave ;
}
}
q - > packetsin + + ;
q - > bytesin + = skb - > len ;
2005-11-05 23:14:09 +03:00
if ( gred_wred_mode ( t ) ) {
2005-04-17 02:20:36 +04:00
qave = 0 ;
q - > qave = t - > tab [ t - > def ] - > qave ;
q - > qidlestart = t - > tab [ t - > def ] - > qidlestart ;
}
if ( ! PSCHED_IS_PASTPERFECT ( q - > qidlestart ) ) {
long us_idle ;
PSCHED_GET_TIME ( now ) ;
us_idle = PSCHED_TDIFF_SAFE ( now , q - > qidlestart , q - > Scell_max ) ;
PSCHED_SET_PASTPERFECT ( q - > qidlestart ) ;
q - > qave > > = q - > Stab [ ( us_idle > > q - > Scell_log ) & 0xFF ] ;
} else {
2005-11-05 23:14:09 +03:00
if ( gred_wred_mode ( t ) ) {
2005-04-17 02:20:36 +04:00
q - > qave + = sch - > qstats . backlog - ( q - > qave > > q - > Wlog ) ;
} else {
q - > qave + = q - > backlog - ( q - > qave > > q - > Wlog ) ;
}
}
2005-11-05 23:14:09 +03:00
if ( gred_wred_mode ( t ) )
2005-04-17 02:20:36 +04:00
t - > tab [ t - > def ] - > qave = q - > qave ;
if ( ( q - > qave + qave ) < q - > qth_min ) {
q - > qcount = - 1 ;
enqueue :
if ( q - > backlog + skb - > len < = q - > limit ) {
q - > backlog + = skb - > len ;
do_enqueue :
__skb_queue_tail ( & sch - > q , skb ) ;
sch - > qstats . backlog + = skb - > len ;
sch - > bstats . bytes + = skb - > len ;
sch - > bstats . packets + + ;
return 0 ;
} else {
q - > pdrop + + ;
}
drop :
kfree_skb ( skb ) ;
sch - > qstats . drops + + ;
return NET_XMIT_DROP ;
}
if ( ( q - > qave + qave ) > = q - > qth_max ) {
q - > qcount = - 1 ;
sch - > qstats . overlimits + + ;
q - > forced + + ;
goto drop ;
}
if ( + + q - > qcount ) {
if ( ( ( ( qave + q - > qave ) - q - > qth_min ) > > q - > Wlog ) * q - > qcount < q - > qR )
goto enqueue ;
q - > qcount = 0 ;
q - > qR = net_random ( ) & q - > Rmask ;
sch - > qstats . overlimits + + ;
q - > early + + ;
goto drop ;
}
q - > qR = net_random ( ) & q - > Rmask ;
goto enqueue ;
}
static int
gred_requeue ( struct sk_buff * skb , struct Qdisc * sch )
{
struct gred_sched_data * q ;
struct gred_sched * t = qdisc_priv ( sch ) ;
q = t - > tab [ ( skb - > tc_index & 0xf ) ] ;
/* error checking here -- probably unnecessary */
PSCHED_SET_PASTPERFECT ( q - > qidlestart ) ;
__skb_queue_head ( & sch - > q , skb ) ;
sch - > qstats . backlog + = skb - > len ;
sch - > qstats . requeues + + ;
q - > backlog + = skb - > len ;
return 0 ;
}
static struct sk_buff *
gred_dequeue ( struct Qdisc * sch )
{
struct sk_buff * skb ;
struct gred_sched_data * q ;
struct gred_sched * t = qdisc_priv ( sch ) ;
skb = __skb_dequeue ( & sch - > q ) ;
if ( skb ) {
sch - > qstats . backlog - = skb - > len ;
q = t - > tab [ ( skb - > tc_index & 0xf ) ] ;
if ( q ) {
q - > backlog - = skb - > len ;
2005-11-05 23:14:09 +03:00
if ( ! q - > backlog & & ! gred_wred_mode ( t ) )
2005-04-17 02:20:36 +04:00
PSCHED_GET_TIME ( q - > qidlestart ) ;
} else {
D2PRINTK ( " gred_dequeue: skb has bad tcindex %x \n " , skb - > tc_index & 0xf ) ;
}
return skb ;
}
2005-11-05 23:14:09 +03:00
if ( gred_wred_mode ( t ) ) {
2005-04-17 02:20:36 +04:00
q = t - > tab [ t - > def ] ;
if ( ! q )
D2PRINTK ( " no default VQ set: Results will be "
" screwed up \n " ) ;
else
PSCHED_GET_TIME ( q - > qidlestart ) ;
}
return NULL ;
}
static unsigned int gred_drop ( struct Qdisc * sch )
{
struct sk_buff * skb ;
struct gred_sched_data * q ;
struct gred_sched * t = qdisc_priv ( sch ) ;
skb = __skb_dequeue_tail ( & sch - > q ) ;
if ( skb ) {
unsigned int len = skb - > len ;
sch - > qstats . backlog - = len ;
sch - > qstats . drops + + ;
q = t - > tab [ ( skb - > tc_index & 0xf ) ] ;
if ( q ) {
q - > backlog - = len ;
q - > other + + ;
2005-11-05 23:14:09 +03:00
if ( ! q - > backlog & & ! gred_wred_mode ( t ) )
2005-04-17 02:20:36 +04:00
PSCHED_GET_TIME ( q - > qidlestart ) ;
} else {
D2PRINTK ( " gred_dequeue: skb has bad tcindex %x \n " , skb - > tc_index & 0xf ) ;
}
kfree_skb ( skb ) ;
return len ;
}
q = t - > tab [ t - > def ] ;
if ( ! q ) {
D2PRINTK ( " no default VQ set: Results might be screwed up \n " ) ;
return 0 ;
}
PSCHED_GET_TIME ( q - > qidlestart ) ;
return 0 ;
}
static void gred_reset ( struct Qdisc * sch )
{
int i ;
struct gred_sched_data * q ;
struct gred_sched * t = qdisc_priv ( sch ) ;
__skb_queue_purge ( & sch - > q ) ;
sch - > qstats . backlog = 0 ;
for ( i = 0 ; i < t - > DPs ; i + + ) {
q = t - > tab [ i ] ;
if ( ! q )
continue ;
PSCHED_SET_PASTPERFECT ( q - > qidlestart ) ;
q - > qave = 0 ;
q - > qcount = - 1 ;
q - > backlog = 0 ;
q - > other = 0 ;
q - > forced = 0 ;
q - > pdrop = 0 ;
q - > early = 0 ;
}
}
2005-11-05 23:14:13 +03:00
static inline void gred_destroy_vq ( struct gred_sched_data * q )
{
kfree ( q ) ;
}
static inline int gred_change_table_def ( struct Qdisc * sch , struct rtattr * dps )
2005-04-17 02:20:36 +04:00
{
struct gred_sched * table = qdisc_priv ( sch ) ;
struct tc_gred_sopt * sopt ;
2005-11-05 23:14:13 +03:00
int i ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:13 +03:00
if ( dps = = NULL | | RTA_PAYLOAD ( dps ) < sizeof ( * sopt ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2005-11-05 23:14:13 +03:00
sopt = RTA_DATA ( dps ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:13 +03:00
if ( sopt - > DPs > MAX_DPs | | sopt - > DPs = = 0 | | sopt - > def_DP > = sopt - > DPs )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:13 +03:00
sch_tree_lock ( sch ) ;
table - > DPs = sopt - > DPs ;
table - > def = sopt - > def_DP ;
2005-11-05 23:14:09 +03:00
2005-11-05 23:14:13 +03:00
/*
* Every entry point to GRED is synchronized with the above code
* and the DP is checked against DPs , i . e . shadowed VQs can no
* longer be found so we can unlock right here .
*/
sch_tree_unlock ( sch ) ;
2005-11-05 23:14:09 +03:00
2005-11-05 23:14:13 +03:00
if ( sopt - > grio ) {
gred_enable_rio_mode ( table ) ;
gred_disable_wred_mode ( table ) ;
if ( gred_wred_mode_check ( sch ) )
gred_enable_wred_mode ( table ) ;
} else {
gred_disable_rio_mode ( table ) ;
gred_disable_wred_mode ( table ) ;
}
for ( i = table - > DPs ; i < MAX_DPs ; i + + ) {
if ( table - > tab [ i ] ) {
printk ( KERN_WARNING " GRED: Warning: Destroying "
" shadowed VQ 0x%x \n " , i ) ;
gred_destroy_vq ( table - > tab [ i ] ) ;
table - > tab [ i ] = NULL ;
}
}
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:13 +03:00
table - > initd = 0 ;
return 0 ;
}
2005-11-05 23:14:15 +03:00
static inline int gred_change_vq ( struct Qdisc * sch , int dp ,
struct tc_gred_qopt * ctl , int prio , u8 * stab )
2005-11-05 23:14:13 +03:00
{
struct gred_sched * table = qdisc_priv ( sch ) ;
struct gred_sched_data * q ;
2005-11-05 23:14:15 +03:00
if ( table - > tab [ dp ] = = NULL ) {
table - > tab [ dp ] = kmalloc ( sizeof ( * q ) , GFP_KERNEL ) ;
if ( table - > tab [ dp ] = = NULL )
return - ENOMEM ;
memset ( table - > tab [ dp ] , 0 , sizeof ( * q ) ) ;
}
q = table - > tab [ dp ] ;
q - > DP = dp ;
q - > prio = prio ;
q - > Wlog = ctl - > Wlog ;
q - > Plog = ctl - > Plog ;
q - > limit = ctl - > limit ;
q - > Scell_log = ctl - > Scell_log ;
q - > Rmask = ctl - > Plog < 32 ? ( ( 1 < < ctl - > Plog ) - 1 ) : ~ 0UL ;
q - > Scell_max = ( 255 < < q - > Scell_log ) ;
q - > qth_min = ctl - > qth_min < < ctl - > Wlog ;
q - > qth_max = ctl - > qth_max < < ctl - > Wlog ;
q - > qave = 0 ;
q - > backlog = 0 ;
q - > qcount = - 1 ;
q - > other = 0 ;
q - > forced = 0 ;
q - > pdrop = 0 ;
q - > early = 0 ;
PSCHED_SET_PASTPERFECT ( q - > qidlestart ) ;
memcpy ( q - > Stab , stab , 256 ) ;
return 0 ;
}
static int gred_change ( struct Qdisc * sch , struct rtattr * opt )
{
struct gred_sched * table = qdisc_priv ( sch ) ;
2005-11-05 23:14:13 +03:00
struct tc_gred_qopt * ctl ;
2005-11-05 23:14:15 +03:00
struct rtattr * tb [ TCA_GRED_MAX ] ;
int err = - EINVAL , prio = GRED_DEF_PRIO ;
u8 * stab ;
2005-11-05 23:14:13 +03:00
2005-11-05 23:14:15 +03:00
if ( opt = = NULL | | rtattr_parse_nested ( tb , TCA_GRED_MAX , opt ) )
2005-11-05 23:14:13 +03:00
return - EINVAL ;
if ( tb [ TCA_GRED_PARMS - 1 ] = = NULL & & tb [ TCA_GRED_STAB - 1 ] = = NULL )
2005-11-05 23:14:15 +03:00
return gred_change_table_def ( sch , opt ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:15 +03:00
if ( tb [ TCA_GRED_PARMS - 1 ] = = NULL | |
RTA_PAYLOAD ( tb [ TCA_GRED_PARMS - 1 ] ) < sizeof ( * ctl ) | |
tb [ TCA_GRED_STAB - 1 ] = = NULL | |
RTA_PAYLOAD ( tb [ TCA_GRED_STAB - 1 ] ) < 256 )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
ctl = RTA_DATA ( tb [ TCA_GRED_PARMS - 1 ] ) ;
2005-11-05 23:14:15 +03:00
stab = RTA_DATA ( tb [ TCA_GRED_STAB - 1 ] ) ;
2005-11-05 23:14:14 +03:00
if ( ctl - > DP > = table - > DPs )
2005-11-05 23:14:15 +03:00
goto errout ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:10 +03:00
if ( gred_rio_mode ( table ) ) {
2005-11-05 23:14:15 +03:00
if ( ctl - > prio = = 0 ) {
int def_prio = GRED_DEF_PRIO ;
if ( table - > tab [ table - > def ] )
def_prio = table - > tab [ table - > def ] - > prio ;
printk ( KERN_DEBUG " GRED: DP %u does not have a prio "
" setting default to %d \n " , ctl - > DP , def_prio ) ;
prio = def_prio ;
} else
prio = ctl - > prio ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:15 +03:00
sch_tree_lock ( sch ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:15 +03:00
err = gred_change_vq ( sch , ctl - > DP , ctl , prio , stab ) ;
if ( err < 0 )
goto errout_locked ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:15 +03:00
if ( table - > tab [ table - > def ] = = NULL ) {
if ( gred_rio_mode ( table ) )
prio = table - > tab [ ctl - > DP ] - > prio ;
err = gred_change_vq ( sch , table - > def , ctl , prio , stab ) ;
if ( err < 0 )
goto errout_locked ;
}
table - > initd = 1 ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:10 +03:00
if ( gred_rio_mode ( table ) ) {
2005-11-05 23:14:09 +03:00
gred_disable_wred_mode ( table ) ;
if ( gred_wred_mode_check ( sch ) )
gred_enable_wred_mode ( table ) ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:15 +03:00
err = 0 ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:15 +03:00
errout_locked :
sch_tree_unlock ( sch ) ;
errout :
return err ;
2005-04-17 02:20:36 +04:00
}
static int gred_init ( struct Qdisc * sch , struct rtattr * opt )
{
2005-11-05 23:14:13 +03:00
struct rtattr * tb [ TCA_GRED_MAX ] ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:13 +03:00
if ( opt = = NULL | | rtattr_parse_nested ( tb , TCA_GRED_MAX , opt ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2005-11-05 23:14:13 +03:00
if ( tb [ TCA_GRED_PARMS - 1 ] | | tb [ TCA_GRED_STAB - 1 ] )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:13 +03:00
return gred_change_table_def ( sch , tb [ TCA_GRED_DPS - 1 ] ) ;
2005-04-17 02:20:36 +04:00
}
static int gred_dump ( struct Qdisc * sch , struct sk_buff * skb )
{
struct gred_sched * table = qdisc_priv ( sch ) ;
2005-11-05 23:14:11 +03:00
struct rtattr * parms , * opts = NULL ;
2005-04-17 02:20:36 +04:00
int i ;
2005-11-05 23:14:12 +03:00
struct tc_gred_sopt sopt = {
. DPs = table - > DPs ,
. def_DP = table - > def ,
. grio = gred_rio_mode ( table ) ,
} ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:11 +03:00
opts = RTA_NEST ( skb , TCA_OPTIONS ) ;
2005-11-05 23:14:12 +03:00
RTA_PUT ( skb , TCA_GRED_DPS , sizeof ( sopt ) , & sopt ) ;
2005-11-05 23:14:11 +03:00
parms = RTA_NEST ( skb , TCA_GRED_PARMS ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:11 +03:00
for ( i = 0 ; i < MAX_DPs ; i + + ) {
struct gred_sched_data * q = table - > tab [ i ] ;
struct tc_gred_qopt opt ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:11 +03:00
memset ( & opt , 0 , sizeof ( opt ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! q ) {
/* hack -- fix at some point with proper message
This is how we indicate to tc that there is no VQ
at this DP */
2005-11-05 23:14:11 +03:00
opt . DP = MAX_DPs + i ;
goto append_opt ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:11 +03:00
opt . limit = q - > limit ;
opt . DP = q - > DP ;
opt . backlog = q - > backlog ;
opt . prio = q - > prio ;
opt . qth_min = q - > qth_min > > q - > Wlog ;
opt . qth_max = q - > qth_max > > q - > Wlog ;
opt . Wlog = q - > Wlog ;
opt . Plog = q - > Plog ;
opt . Scell_log = q - > Scell_log ;
opt . other = q - > other ;
opt . early = q - > early ;
opt . forced = q - > forced ;
opt . pdrop = q - > pdrop ;
opt . packets = q - > packetsin ;
opt . bytesin = q - > bytesin ;
2005-04-17 02:20:36 +04:00
if ( q - > qave ) {
2005-11-05 23:14:09 +03:00
if ( gred_wred_mode ( table ) ) {
2005-04-17 02:20:36 +04:00
q - > qidlestart = table - > tab [ table - > def ] - > qidlestart ;
q - > qave = table - > tab [ table - > def ] - > qave ;
}
if ( ! PSCHED_IS_PASTPERFECT ( q - > qidlestart ) ) {
long idle ;
2005-11-05 23:14:11 +03:00
unsigned long qave ;
2005-04-17 02:20:36 +04:00
psched_time_t now ;
PSCHED_GET_TIME ( now ) ;
idle = PSCHED_TDIFF_SAFE ( now , q - > qidlestart , q - > Scell_max ) ;
qave = q - > qave > > q - > Stab [ ( idle > > q - > Scell_log ) & 0xFF ] ;
2005-11-05 23:14:11 +03:00
opt . qave = qave > > q - > Wlog ;
2005-04-17 02:20:36 +04:00
} else {
2005-11-05 23:14:11 +03:00
opt . qave = q - > qave > > q - > Wlog ;
2005-04-17 02:20:36 +04:00
}
}
2005-11-05 23:14:11 +03:00
append_opt :
RTA_APPEND ( skb , sizeof ( opt ) , & opt ) ;
2005-04-17 02:20:36 +04:00
}
2005-11-05 23:14:11 +03:00
RTA_NEST_END ( skb , parms ) ;
2005-04-17 02:20:36 +04:00
2005-11-05 23:14:11 +03:00
return RTA_NEST_END ( skb , opts ) ;
2005-04-17 02:20:36 +04:00
rtattr_failure :
2005-11-05 23:14:11 +03:00
return RTA_NEST_CANCEL ( skb , opts ) ;
2005-04-17 02:20:36 +04:00
}
static void gred_destroy ( struct Qdisc * sch )
{
struct gred_sched * table = qdisc_priv ( sch ) ;
int i ;
for ( i = 0 ; i < table - > DPs ; i + + ) {
if ( table - > tab [ i ] )
2005-11-05 23:14:13 +03:00
gred_destroy_vq ( table - > tab [ i ] ) ;
2005-04-17 02:20:36 +04:00
}
}
static struct Qdisc_ops gred_qdisc_ops = {
. next = NULL ,
. cl_ops = NULL ,
. id = " gred " ,
. priv_size = sizeof ( struct gred_sched ) ,
. enqueue = gred_enqueue ,
. dequeue = gred_dequeue ,
. requeue = gred_requeue ,
. drop = gred_drop ,
. init = gred_init ,
. reset = gred_reset ,
. destroy = gred_destroy ,
. change = gred_change ,
. dump = gred_dump ,
. owner = THIS_MODULE ,
} ;
static int __init gred_module_init ( void )
{
return register_qdisc ( & gred_qdisc_ops ) ;
}
static void __exit gred_module_exit ( void )
{
unregister_qdisc ( & gred_qdisc_ops ) ;
}
module_init ( gred_module_init )
module_exit ( gred_module_exit )
MODULE_LICENSE ( " GPL " ) ;