2005-04-17 02:20:36 +04:00
/*
* net / sched / sch_netem . c Network emulator
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
2006-10-23 07:16:57 +04:00
* 2 of the License .
2005-04-17 02:20:36 +04:00
*
* Many of the algorithms and ideas for this came from
2007-02-09 17:25:16 +03:00
* NIST Net which is not copyrighted .
2005-04-17 02:20:36 +04:00
*
* Authors : Stephen Hemminger < shemminger @ osdl . org >
* Catalin ( ux aka Dino ) BOIE < catab at umbrella dot ro >
*/
# include <linux/module.h>
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/skbuff.h>
# include <linux/rtnetlink.h>
2007-03-26 10:06:12 +04:00
# include <net/netlink.h>
2005-04-17 02:20:36 +04:00
# include <net/pkt_sched.h>
2005-12-22 06:03:44 +03:00
# define VERSION "1.2"
2005-11-04 00:49:01 +03:00
2005-04-17 02:20:36 +04:00
/* Network Emulation Queuing algorithm.
= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
Sources : [ 1 ] Mark Carson , Darrin Santay , " NIST Net - A Linux-based
Network Emulation Tool
[ 2 ] Luigi Rizzo , DummyNet for FreeBSD
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
This started out as a simple way to delay outgoing packets to
test TCP but has grown to include most of the functionality
of a full blown network emulator like NISTnet . It can delay
packets and add random jitter ( and correlation ) . The random
distribution can be loaded from a table as well to provide
normal , Pareto , or experimental curves . Packet loss ,
duplication , and reordering can also be emulated .
This qdisc does not do classification that can be handled in
layering other disciplines . It does not need to do bandwidth
control either since that can be handled by using token
bucket or other rate control .
The simulator is limited by the Linux timer resolution
and will create packet bursts on the HZ boundary ( 1 ms ) .
*/
struct netem_sched_data {
struct Qdisc * qdisc ;
2007-03-16 11:20:31 +03:00
struct qdisc_watchdog watchdog ;
2005-04-17 02:20:36 +04:00
2007-03-22 22:16:21 +03:00
psched_tdiff_t latency ;
psched_tdiff_t jitter ;
2005-04-17 02:20:36 +04:00
u32 loss ;
u32 limit ;
u32 counter ;
u32 gap ;
u32 duplicate ;
2005-05-26 23:55:48 +04:00
u32 reorder ;
2005-12-22 06:03:44 +03:00
u32 corrupt ;
2005-04-17 02:20:36 +04:00
struct crndstate {
2007-03-22 22:16:21 +03:00
u32 last ;
u32 rho ;
2005-12-22 06:03:44 +03:00
} delay_cor , loss_cor , dup_cor , reorder_cor , corrupt_cor ;
2005-04-17 02:20:36 +04:00
struct disttable {
u32 size ;
s16 table [ 0 ] ;
} * delay_dist ;
} ;
/* Time stamp put into socket buffer control block */
struct netem_skb_cb {
psched_time_t time_to_send ;
} ;
2008-07-20 11:08:04 +04:00
static inline struct netem_skb_cb * netem_skb_cb ( struct sk_buff * skb )
{
2008-07-20 11:08:47 +04:00
BUILD_BUG_ON ( sizeof ( skb - > cb ) <
sizeof ( struct qdisc_skb_cb ) + sizeof ( struct netem_skb_cb ) ) ;
return ( struct netem_skb_cb * ) qdisc_skb_cb ( skb ) - > data ;
2008-07-20 11:08:04 +04:00
}
2005-04-17 02:20:36 +04:00
/* init_crandom - initialize correlated random number generator
* Use entropy source for initial seed .
*/
static void init_crandom ( struct crndstate * state , unsigned long rho )
{
state - > rho = rho ;
state - > last = net_random ( ) ;
}
/* get_crandom - correlated random number generator
* Next number depends on last value .
* rho is scaled to avoid floating point .
*/
2007-03-22 22:16:21 +03:00
static u32 get_crandom ( struct crndstate * state )
2005-04-17 02:20:36 +04:00
{
u64 value , rho ;
unsigned long answer ;
2007-03-23 10:12:09 +03:00
if ( state - > rho = = 0 ) /* no correlation */
2005-04-17 02:20:36 +04:00
return net_random ( ) ;
value = net_random ( ) ;
rho = ( u64 ) state - > rho + 1 ;
answer = ( value * ( ( 1ull < < 32 ) - rho ) + state - > last * rho ) > > 32 ;
state - > last = answer ;
return answer ;
}
/* tabledist - return a pseudo-randomly distributed value with mean mu and
* std deviation sigma . Uses table lookup to approximate the desired
* distribution , and a uniformly - distributed pseudo - random source .
*/
2007-03-22 22:16:21 +03:00
static psched_tdiff_t tabledist ( psched_tdiff_t mu , psched_tdiff_t sigma ,
struct crndstate * state ,
const struct disttable * dist )
2005-04-17 02:20:36 +04:00
{
2007-03-22 22:16:21 +03:00
psched_tdiff_t x ;
long t ;
u32 rnd ;
2005-04-17 02:20:36 +04:00
if ( sigma = = 0 )
return mu ;
rnd = get_crandom ( state ) ;
/* default uniform distribution */
2007-02-09 17:25:16 +03:00
if ( dist = = NULL )
2005-04-17 02:20:36 +04:00
return ( rnd % ( 2 * sigma ) ) - sigma + mu ;
t = dist - > table [ rnd % dist - > size ] ;
x = ( sigma % NETEM_DIST_SCALE ) * t ;
if ( x > = 0 )
x + = NETEM_DIST_SCALE / 2 ;
else
x - = NETEM_DIST_SCALE / 2 ;
return x / NETEM_DIST_SCALE + ( sigma / NETEM_DIST_SCALE ) * t + mu ;
}
2005-05-26 23:53:49 +04:00
/*
* Insert one skb into qdisc .
* Note : parent depends on return value to account for queue length .
* NET_XMIT_DROP : queue length didn ' t change .
* NET_XMIT_SUCCESS : one skb was queued .
*/
2005-04-17 02:20:36 +04:00
static int netem_enqueue ( struct sk_buff * skb , struct Qdisc * sch )
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
2006-07-22 01:45:25 +04:00
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb * cb ;
2005-05-26 23:53:49 +04:00
struct sk_buff * skb2 ;
2005-04-17 02:20:36 +04:00
int ret ;
2005-05-26 23:53:49 +04:00
int count = 1 ;
2005-04-17 02:20:36 +04:00
2005-05-04 03:24:32 +04:00
pr_debug ( " netem_enqueue skb=%p \n " , skb ) ;
2005-04-17 02:20:36 +04:00
2005-05-26 23:53:49 +04:00
/* Random duplication */
if ( q - > duplicate & & q - > duplicate > = get_crandom ( & q - > dup_cor ) )
+ + count ;
2005-04-17 02:20:36 +04:00
/* Random packet drop 0 => none, ~0 => all */
2005-05-26 23:53:49 +04:00
if ( q - > loss & & q - > loss > = get_crandom ( & q - > loss_cor ) )
- - count ;
if ( count = = 0 ) {
2005-04-17 02:20:36 +04:00
sch - > qstats . drops + + ;
kfree_skb ( skb ) ;
2008-08-05 09:39:11 +04:00
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS ;
2005-04-17 02:20:36 +04:00
}
2006-10-23 08:00:33 +04:00
skb_orphan ( skb ) ;
2005-05-26 23:53:49 +04:00
/*
* If we need to duplicate packet , then re - insert at top of the
* qdisc tree , since parent queuer expects that only one
* skb will be queued .
*/
if ( count > 1 & & ( skb2 = skb_clone ( skb , GFP_ATOMIC ) ) ! = NULL ) {
2008-07-16 12:42:40 +04:00
struct Qdisc * rootq = qdisc_root ( sch ) ;
2005-05-26 23:53:49 +04:00
u32 dupsave = q - > duplicate ; /* prevent duplicating a dup... */
q - > duplicate = 0 ;
2008-07-20 11:08:04 +04:00
qdisc_enqueue_root ( skb2 , rootq ) ;
2005-05-26 23:53:49 +04:00
q - > duplicate = dupsave ;
2005-04-17 02:20:36 +04:00
}
2005-12-22 06:03:44 +03:00
/*
* Randomized packet corruption .
* Make copy if needed since we are modifying
* If packet is going to be hardware checksummed , then
* do it now in software before we mangle it .
*/
if ( q - > corrupt & & q - > corrupt > = get_crandom ( & q - > corrupt_cor ) ) {
if ( ! ( skb = skb_unshare ( skb , GFP_ATOMIC ) )
2006-08-30 03:44:56 +04:00
| | ( skb - > ip_summed = = CHECKSUM_PARTIAL
& & skb_checksum_help ( skb ) ) ) {
2005-12-22 06:03:44 +03:00
sch - > qstats . drops + + ;
return NET_XMIT_DROP ;
}
skb - > data [ net_random ( ) % skb_headlen ( skb ) ] ^ = 1 < < ( net_random ( ) % 8 ) ;
}
2008-07-20 11:08:04 +04:00
cb = netem_skb_cb ( skb ) ;
2005-05-26 23:55:48 +04:00
if ( q - > gap = = 0 /* not doing reordering */
| | q - > counter < q - > gap /* inside last reordering gap */
| | q - > reorder < get_crandom ( & q - > reorder_cor ) ) {
2005-05-26 23:55:01 +04:00
psched_time_t now ;
2005-11-04 00:43:07 +03:00
psched_tdiff_t delay ;
delay = tabledist ( q - > latency , q - > jitter ,
& q - > delay_cor , q - > delay_dist ) ;
2007-03-23 21:29:25 +03:00
now = psched_get_time ( ) ;
2007-03-23 21:27:45 +03:00
cb - > time_to_send = now + delay ;
2005-04-17 02:20:36 +04:00
+ + q - > counter ;
2008-07-20 11:08:04 +04:00
ret = qdisc_enqueue ( skb , q - > qdisc ) ;
2005-04-17 02:20:36 +04:00
} else {
2007-02-09 17:25:16 +03:00
/*
2005-05-26 23:55:48 +04:00
* Do re - ordering by putting one out of N packets at the front
* of the queue .
*/
2007-03-23 21:29:25 +03:00
cb - > time_to_send = psched_get_time ( ) ;
2005-05-26 23:55:48 +04:00
q - > counter = 0 ;
2005-05-26 23:55:01 +04:00
ret = q - > qdisc - > ops - > requeue ( skb , q - > qdisc ) ;
2005-04-17 02:20:36 +04:00
}
if ( likely ( ret = = NET_XMIT_SUCCESS ) ) {
sch - > q . qlen + + ;
2008-07-20 11:08:27 +04:00
sch - > bstats . bytes + = qdisc_pkt_len ( skb ) ;
2005-04-17 02:20:36 +04:00
sch - > bstats . packets + + ;
2008-08-05 09:31:03 +04:00
} else if ( net_xmit_drop_count ( ret ) ) {
2005-04-17 02:20:36 +04:00
sch - > qstats . drops + + ;
2008-08-05 09:31:03 +04:00
}
2005-04-17 02:20:36 +04:00
2005-05-04 03:24:57 +04:00
pr_debug ( " netem: enqueue ret %d \n " , ret ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
/* Requeue packets but don't change time stamp */
static int netem_requeue ( struct sk_buff * skb , struct Qdisc * sch )
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
int ret ;
if ( ( ret = q - > qdisc - > ops - > requeue ( skb , q - > qdisc ) ) = = 0 ) {
sch - > q . qlen + + ;
sch - > qstats . requeues + + ;
}
return ret ;
}
static unsigned int netem_drop ( struct Qdisc * sch )
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
2006-03-21 06:00:49 +03:00
unsigned int len = 0 ;
2005-04-17 02:20:36 +04:00
2006-03-21 06:00:49 +03:00
if ( q - > qdisc - > ops - > drop & & ( len = q - > qdisc - > ops - > drop ( q - > qdisc ) ) ! = 0 ) {
2005-04-17 02:20:36 +04:00
sch - > q . qlen - - ;
sch - > qstats . drops + + ;
}
return len ;
}
static struct sk_buff * netem_dequeue ( struct Qdisc * sch )
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
struct sk_buff * skb ;
2007-03-22 22:17:42 +03:00
smp_mb ( ) ;
if ( sch - > flags & TCQ_F_THROTTLED )
return NULL ;
2008-10-31 10:46:19 +03:00
skb = q - > qdisc - > ops - > peek ( q - > qdisc ) ;
2005-05-04 03:24:32 +04:00
if ( skb ) {
2008-07-20 11:08:04 +04:00
const struct netem_skb_cb * cb = netem_skb_cb ( skb ) ;
2007-03-23 21:29:25 +03:00
psched_time_t now = psched_get_time ( ) ;
2005-05-26 23:55:01 +04:00
/* if more time remaining? */
2007-03-23 21:28:07 +03:00
if ( cb - > time_to_send < = now ) {
2008-10-31 10:46:19 +03:00
skb = q - > qdisc - > dequeue ( q - > qdisc ) ;
if ( ! skb )
return NULL ;
2005-05-26 23:55:01 +04:00
pr_debug ( " netem_dequeue: return skb=%p \n " , skb ) ;
sch - > q . qlen - - ;
return skb ;
2005-11-04 00:43:07 +03:00
}
2007-03-22 22:17:42 +03:00
qdisc_watchdog_schedule ( & q - > watchdog , cb - > time_to_send ) ;
2005-05-26 23:55:01 +04:00
}
return NULL ;
2005-04-17 02:20:36 +04:00
}
static void netem_reset ( struct Qdisc * sch )
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
qdisc_reset ( q - > qdisc ) ;
sch - > q . qlen = 0 ;
2007-03-16 11:20:31 +03:00
qdisc_watchdog_cancel ( & q - > watchdog ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Distribution data is a variable size payload containing
* signed 16 bit values .
*/
2008-01-23 09:11:17 +03:00
static int get_dist_table ( struct Qdisc * sch , const struct nlattr * attr )
2005-04-17 02:20:36 +04:00
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
2008-01-23 09:11:17 +03:00
unsigned long n = nla_len ( attr ) / sizeof ( __s16 ) ;
const __s16 * data = nla_data ( attr ) ;
2008-07-16 12:42:40 +04:00
spinlock_t * root_lock ;
2005-04-17 02:20:36 +04:00
struct disttable * d ;
int i ;
if ( n > 65536 )
return - EINVAL ;
d = kmalloc ( sizeof ( * d ) + n * sizeof ( d - > table [ 0 ] ) , GFP_KERNEL ) ;
if ( ! d )
return - ENOMEM ;
d - > size = n ;
for ( i = 0 ; i < n ; i + + )
d - > table [ i ] = data [ i ] ;
2007-02-09 17:25:16 +03:00
2008-08-30 01:21:52 +04:00
root_lock = qdisc_root_sleeping_lock ( sch ) ;
2008-07-16 12:42:40 +04:00
spin_lock_bh ( root_lock ) ;
2005-04-17 02:20:36 +04:00
d = xchg ( & q - > delay_dist , d ) ;
2008-07-16 12:42:40 +04:00
spin_unlock_bh ( root_lock ) ;
2005-04-17 02:20:36 +04:00
kfree ( d ) ;
return 0 ;
}
2008-01-23 09:11:17 +03:00
static int get_correlation ( struct Qdisc * sch , const struct nlattr * attr )
2005-04-17 02:20:36 +04:00
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
2008-01-23 09:11:17 +03:00
const struct tc_netem_corr * c = nla_data ( attr ) ;
2005-04-17 02:20:36 +04:00
init_crandom ( & q - > delay_cor , c - > delay_corr ) ;
init_crandom ( & q - > loss_cor , c - > loss_corr ) ;
init_crandom ( & q - > dup_cor , c - > dup_corr ) ;
return 0 ;
}
2008-01-23 09:11:17 +03:00
static int get_reorder ( struct Qdisc * sch , const struct nlattr * attr )
2005-05-26 23:55:48 +04:00
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
2008-01-23 09:11:17 +03:00
const struct tc_netem_reorder * r = nla_data ( attr ) ;
2005-05-26 23:55:48 +04:00
q - > reorder = r - > probability ;
init_crandom ( & q - > reorder_cor , r - > correlation ) ;
return 0 ;
}
2008-01-23 09:11:17 +03:00
static int get_corrupt ( struct Qdisc * sch , const struct nlattr * attr )
2005-12-22 06:03:44 +03:00
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
2008-01-23 09:11:17 +03:00
const struct tc_netem_corrupt * r = nla_data ( attr ) ;
2005-12-22 06:03:44 +03:00
q - > corrupt = r - > probability ;
init_crandom ( & q - > corrupt_cor , r - > correlation ) ;
return 0 ;
}
2008-01-24 07:35:39 +03:00
static const struct nla_policy netem_policy [ TCA_NETEM_MAX + 1 ] = {
[ TCA_NETEM_CORR ] = { . len = sizeof ( struct tc_netem_corr ) } ,
[ TCA_NETEM_REORDER ] = { . len = sizeof ( struct tc_netem_reorder ) } ,
[ TCA_NETEM_CORRUPT ] = { . len = sizeof ( struct tc_netem_corrupt ) } ,
} ;
2008-09-03 04:30:27 +04:00
static int parse_attr ( struct nlattr * tb [ ] , int maxtype , struct nlattr * nla ,
const struct nla_policy * policy , int len )
{
int nested_len = nla_len ( nla ) - NLA_ALIGN ( len ) ;
if ( nested_len < 0 )
return - EINVAL ;
if ( nested_len > = nla_attr_size ( 0 ) )
return nla_parse ( tb , maxtype , nla_data ( nla ) + NLA_ALIGN ( len ) ,
nested_len , policy ) ;
memset ( tb , 0 , sizeof ( struct nlattr * ) * ( maxtype + 1 ) ) ;
return 0 ;
}
2005-12-22 06:03:44 +03:00
/* Parse netlink message to set options */
2008-01-23 09:11:17 +03:00
static int netem_change ( struct Qdisc * sch , struct nlattr * opt )
2005-04-17 02:20:36 +04:00
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
2008-01-24 07:32:21 +03:00
struct nlattr * tb [ TCA_NETEM_MAX + 1 ] ;
2005-04-17 02:20:36 +04:00
struct tc_netem_qopt * qopt ;
int ret ;
2007-02-09 17:25:16 +03:00
2008-01-24 07:32:21 +03:00
if ( opt = = NULL )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2008-09-03 04:30:27 +04:00
qopt = nla_data ( opt ) ;
ret = parse_attr ( tb , TCA_NETEM_MAX , opt , netem_policy , sizeof ( * qopt ) ) ;
2008-01-24 07:32:21 +03:00
if ( ret < 0 )
return ret ;
2008-07-06 10:40:21 +04:00
ret = fifo_set_limit ( q - > qdisc , qopt - > limit ) ;
2005-04-17 02:20:36 +04:00
if ( ret ) {
pr_debug ( " netem: can't set fifo limit \n " ) ;
return ret ;
}
2007-02-09 17:25:16 +03:00
2005-04-17 02:20:36 +04:00
q - > latency = qopt - > latency ;
q - > jitter = qopt - > jitter ;
q - > limit = qopt - > limit ;
q - > gap = qopt - > gap ;
2005-05-26 23:55:48 +04:00
q - > counter = 0 ;
2005-04-17 02:20:36 +04:00
q - > loss = qopt - > loss ;
q - > duplicate = qopt - > duplicate ;
2007-03-23 10:12:09 +03:00
/* for compatibility with earlier versions.
* if gap is set , need to assume 100 % probability
2005-05-26 23:55:48 +04:00
*/
2007-03-22 22:15:45 +03:00
if ( q - > gap )
q - > reorder = ~ 0 ;
2005-05-26 23:55:48 +04:00
2008-01-24 07:32:21 +03:00
if ( tb [ TCA_NETEM_CORR ] ) {
ret = get_correlation ( sch , tb [ TCA_NETEM_CORR ] ) ;
if ( ret )
return ret ;
}
2005-04-17 02:20:36 +04:00
2008-01-24 07:32:21 +03:00
if ( tb [ TCA_NETEM_DELAY_DIST ] ) {
ret = get_dist_table ( sch , tb [ TCA_NETEM_DELAY_DIST ] ) ;
if ( ret )
return ret ;
}
2005-12-22 06:03:44 +03:00
2008-01-24 07:32:21 +03:00
if ( tb [ TCA_NETEM_REORDER ] ) {
ret = get_reorder ( sch , tb [ TCA_NETEM_REORDER ] ) ;
if ( ret )
return ret ;
}
2005-04-17 02:20:36 +04:00
2008-01-24 07:32:21 +03:00
if ( tb [ TCA_NETEM_CORRUPT ] ) {
ret = get_corrupt ( sch , tb [ TCA_NETEM_CORRUPT ] ) ;
if ( ret )
return ret ;
2005-12-22 06:03:44 +03:00
}
2005-04-17 02:20:36 +04:00
return 0 ;
}
2005-10-31 00:47:34 +03:00
/*
* Special case version of FIFO queue for use by netem .
* It queues in order based on timestamps in skb ' s
*/
struct fifo_sched_data {
u32 limit ;
2007-03-22 22:17:05 +03:00
psched_time_t oldest ;
2005-10-31 00:47:34 +03:00
} ;
static int tfifo_enqueue ( struct sk_buff * nskb , struct Qdisc * sch )
{
struct fifo_sched_data * q = qdisc_priv ( sch ) ;
struct sk_buff_head * list = & sch - > q ;
2008-07-20 11:08:04 +04:00
psched_time_t tnext = netem_skb_cb ( nskb ) - > time_to_send ;
2005-10-31 00:47:34 +03:00
struct sk_buff * skb ;
if ( likely ( skb_queue_len ( list ) < q - > limit ) ) {
2007-03-22 22:17:05 +03:00
/* Optimize for add at tail */
2007-03-23 21:28:07 +03:00
if ( likely ( skb_queue_empty ( list ) | | tnext > = q - > oldest ) ) {
2007-03-22 22:17:05 +03:00
q - > oldest = tnext ;
return qdisc_enqueue_tail ( nskb , sch ) ;
}
2005-10-31 00:47:34 +03:00
skb_queue_reverse_walk ( list , skb ) {
2008-07-20 11:08:04 +04:00
const struct netem_skb_cb * cb = netem_skb_cb ( skb ) ;
2005-10-31 00:47:34 +03:00
2007-03-23 21:28:07 +03:00
if ( tnext > = cb - > time_to_send )
2005-10-31 00:47:34 +03:00
break ;
}
__skb_queue_after ( list , skb , nskb ) ;
2008-07-20 11:08:27 +04:00
sch - > qstats . backlog + = qdisc_pkt_len ( nskb ) ;
sch - > bstats . bytes + = qdisc_pkt_len ( nskb ) ;
2005-10-31 00:47:34 +03:00
sch - > bstats . packets + + ;
return NET_XMIT_SUCCESS ;
}
2007-03-22 22:17:05 +03:00
return qdisc_reshape_fail ( nskb , sch ) ;
2005-10-31 00:47:34 +03:00
}
2008-01-23 09:11:17 +03:00
static int tfifo_init ( struct Qdisc * sch , struct nlattr * opt )
2005-10-31 00:47:34 +03:00
{
struct fifo_sched_data * q = qdisc_priv ( sch ) ;
if ( opt ) {
2008-01-23 09:11:17 +03:00
struct tc_fifo_qopt * ctl = nla_data ( opt ) ;
if ( nla_len ( opt ) < sizeof ( * ctl ) )
2005-10-31 00:47:34 +03:00
return - EINVAL ;
q - > limit = ctl - > limit ;
} else
2008-07-09 04:06:30 +04:00
q - > limit = max_t ( u32 , qdisc_dev ( sch ) - > tx_queue_len , 1 ) ;
2005-10-31 00:47:34 +03:00
2007-03-23 21:28:30 +03:00
q - > oldest = PSCHED_PASTPERFECT ;
2005-10-31 00:47:34 +03:00
return 0 ;
}
static int tfifo_dump ( struct Qdisc * sch , struct sk_buff * skb )
{
struct fifo_sched_data * q = qdisc_priv ( sch ) ;
struct tc_fifo_qopt opt = { . limit = q - > limit } ;
2008-01-23 09:11:17 +03:00
NLA_PUT ( skb , TCA_OPTIONS , sizeof ( opt ) , & opt ) ;
2005-10-31 00:47:34 +03:00
return skb - > len ;
2008-01-23 09:11:17 +03:00
nla_put_failure :
2005-10-31 00:47:34 +03:00
return - 1 ;
}
2007-11-14 12:44:41 +03:00
static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
2005-10-31 00:47:34 +03:00
. id = " tfifo " ,
. priv_size = sizeof ( struct fifo_sched_data ) ,
. enqueue = tfifo_enqueue ,
. dequeue = qdisc_dequeue_head ,
2008-10-31 10:45:55 +03:00
. peek = qdisc_peek_head ,
2005-10-31 00:47:34 +03:00
. requeue = qdisc_requeue ,
. drop = qdisc_queue_drop ,
. init = tfifo_init ,
. reset = qdisc_reset_queue ,
. change = tfifo_init ,
. dump = tfifo_dump ,
} ;
2008-01-23 09:11:17 +03:00
static int netem_init ( struct Qdisc * sch , struct nlattr * opt )
2005-04-17 02:20:36 +04:00
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
int ret ;
if ( ! opt )
return - EINVAL ;
2007-03-16 11:20:31 +03:00
qdisc_watchdog_init ( & q - > watchdog , sch ) ;
2005-04-17 02:20:36 +04:00
2008-07-09 04:06:30 +04:00
q - > qdisc = qdisc_create_dflt ( qdisc_dev ( sch ) , sch - > dev_queue ,
2008-07-09 03:55:56 +04:00
& tfifo_qdisc_ops ,
2006-11-30 04:35:18 +03:00
TC_H_MAKE ( sch - > handle , 1 ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! q - > qdisc ) {
pr_debug ( " netem: qdisc create failed \n " ) ;
return - ENOMEM ;
}
ret = netem_change ( sch , opt ) ;
if ( ret ) {
pr_debug ( " netem: change failed \n " ) ;
qdisc_destroy ( q - > qdisc ) ;
}
return ret ;
}
static void netem_destroy ( struct Qdisc * sch )
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
2007-03-16 11:20:31 +03:00
qdisc_watchdog_cancel ( & q - > watchdog ) ;
2005-04-17 02:20:36 +04:00
qdisc_destroy ( q - > qdisc ) ;
kfree ( q - > delay_dist ) ;
}
static int netem_dump ( struct Qdisc * sch , struct sk_buff * skb )
{
const struct netem_sched_data * q = qdisc_priv ( sch ) ;
2007-04-20 07:29:13 +04:00
unsigned char * b = skb_tail_pointer ( skb ) ;
2008-01-23 09:11:17 +03:00
struct nlattr * nla = ( struct nlattr * ) b ;
2005-04-17 02:20:36 +04:00
struct tc_netem_qopt qopt ;
struct tc_netem_corr cor ;
2005-05-26 23:55:48 +04:00
struct tc_netem_reorder reorder ;
2005-12-22 06:03:44 +03:00
struct tc_netem_corrupt corrupt ;
2005-04-17 02:20:36 +04:00
qopt . latency = q - > latency ;
qopt . jitter = q - > jitter ;
qopt . limit = q - > limit ;
qopt . loss = q - > loss ;
qopt . gap = q - > gap ;
qopt . duplicate = q - > duplicate ;
2008-01-23 09:11:17 +03:00
NLA_PUT ( skb , TCA_OPTIONS , sizeof ( qopt ) , & qopt ) ;
2005-04-17 02:20:36 +04:00
cor . delay_corr = q - > delay_cor . rho ;
cor . loss_corr = q - > loss_cor . rho ;
cor . dup_corr = q - > dup_cor . rho ;
2008-01-23 09:11:17 +03:00
NLA_PUT ( skb , TCA_NETEM_CORR , sizeof ( cor ) , & cor ) ;
2005-05-26 23:55:48 +04:00
reorder . probability = q - > reorder ;
reorder . correlation = q - > reorder_cor . rho ;
2008-01-23 09:11:17 +03:00
NLA_PUT ( skb , TCA_NETEM_REORDER , sizeof ( reorder ) , & reorder ) ;
2005-05-26 23:55:48 +04:00
2005-12-22 06:03:44 +03:00
corrupt . probability = q - > corrupt ;
corrupt . correlation = q - > corrupt_cor . rho ;
2008-01-23 09:11:17 +03:00
NLA_PUT ( skb , TCA_NETEM_CORRUPT , sizeof ( corrupt ) , & corrupt ) ;
2005-12-22 06:03:44 +03:00
2008-01-23 09:11:17 +03:00
nla - > nla_len = skb_tail_pointer ( skb ) - b ;
2005-04-17 02:20:36 +04:00
return skb - > len ;
2008-01-23 09:11:17 +03:00
nla_put_failure :
2007-03-26 10:06:12 +04:00
nlmsg_trim ( skb , b ) ;
2005-04-17 02:20:36 +04:00
return - 1 ;
}
static int netem_dump_class ( struct Qdisc * sch , unsigned long cl ,
struct sk_buff * skb , struct tcmsg * tcm )
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
if ( cl ! = 1 ) /* only one class */
return - ENOENT ;
tcm - > tcm_handle | = TC_H_MIN ( 1 ) ;
tcm - > tcm_info = q - > qdisc - > handle ;
return 0 ;
}
static int netem_graft ( struct Qdisc * sch , unsigned long arg , struct Qdisc * new ,
struct Qdisc * * old )
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
if ( new = = NULL )
new = & noop_qdisc ;
sch_tree_lock ( sch ) ;
* old = xchg ( & q - > qdisc , new ) ;
2006-11-30 04:36:20 +03:00
qdisc_tree_decrease_qlen ( * old , ( * old ) - > q . qlen ) ;
2005-04-17 02:20:36 +04:00
qdisc_reset ( * old ) ;
sch_tree_unlock ( sch ) ;
return 0 ;
}
static struct Qdisc * netem_leaf ( struct Qdisc * sch , unsigned long arg )
{
struct netem_sched_data * q = qdisc_priv ( sch ) ;
return q - > qdisc ;
}
static unsigned long netem_get ( struct Qdisc * sch , u32 classid )
{
return 1 ;
}
static void netem_put ( struct Qdisc * sch , unsigned long arg )
{
}
2007-02-09 17:25:16 +03:00
static int netem_change_class ( struct Qdisc * sch , u32 classid , u32 parentid ,
2008-01-23 09:11:17 +03:00
struct nlattr * * tca , unsigned long * arg )
2005-04-17 02:20:36 +04:00
{
return - ENOSYS ;
}
static int netem_delete ( struct Qdisc * sch , unsigned long arg )
{
return - ENOSYS ;
}
static void netem_walk ( struct Qdisc * sch , struct qdisc_walker * walker )
{
if ( ! walker - > stop ) {
if ( walker - > count > = walker - > skip )
if ( walker - > fn ( sch , 1 , walker ) < 0 ) {
walker - > stop = 1 ;
return ;
}
walker - > count + + ;
}
}
static struct tcf_proto * * netem_find_tcf ( struct Qdisc * sch , unsigned long cl )
{
return NULL ;
}
2007-11-14 12:44:41 +03:00
static const struct Qdisc_class_ops netem_class_ops = {
2005-04-17 02:20:36 +04:00
. graft = netem_graft ,
. leaf = netem_leaf ,
. get = netem_get ,
. put = netem_put ,
. change = netem_change_class ,
. delete = netem_delete ,
. walk = netem_walk ,
. tcf_chain = netem_find_tcf ,
. dump = netem_dump_class ,
} ;
2007-11-14 12:44:41 +03:00
static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
2005-04-17 02:20:36 +04:00
. id = " netem " ,
. cl_ops = & netem_class_ops ,
. priv_size = sizeof ( struct netem_sched_data ) ,
. enqueue = netem_enqueue ,
. dequeue = netem_dequeue ,
. requeue = netem_requeue ,
. drop = netem_drop ,
. init = netem_init ,
. reset = netem_reset ,
. destroy = netem_destroy ,
. change = netem_change ,
. dump = netem_dump ,
. owner = THIS_MODULE ,
} ;
static int __init netem_module_init ( void )
{
2005-11-04 00:49:01 +03:00
pr_info ( " netem: version " VERSION " \n " ) ;
2005-04-17 02:20:36 +04:00
return register_qdisc ( & netem_qdisc_ops ) ;
}
static void __exit netem_module_exit ( void )
{
unregister_qdisc ( & netem_qdisc_ops ) ;
}
module_init ( netem_module_init )
module_exit ( netem_module_exit )
MODULE_LICENSE ( " GPL " ) ;