2005-04-17 02:20:36 +04:00
# ifndef __NET_PKT_SCHED_H
# define __NET_PKT_SCHED_H
2006-01-09 09:12:03 +03:00
# include <linux/jiffies.h>
2007-03-16 11:18:42 +03:00
# include <linux/ktime.h>
2005-04-17 02:20:36 +04:00
# include <net/sch_generic.h>
struct qdisc_walker
{
int stop ;
int skip ;
int count ;
int ( * fn ) ( struct Qdisc * , unsigned long cl , struct qdisc_walker * ) ;
} ;
extern rwlock_t qdisc_tree_lock ;
2005-07-06 01:15:09 +04:00
# define QDISC_ALIGNTO 32
# define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
2005-04-17 02:20:36 +04:00
static inline void * qdisc_priv ( struct Qdisc * q )
{
2005-07-06 01:15:09 +04:00
return ( char * ) q + QDISC_ALIGN ( sizeof ( struct Qdisc ) ) ;
2005-04-17 02:20:36 +04:00
}
/*
Timer resolution MUST BE < 10 % of min_schedulable_packet_size / bandwidth
Normal IP packet size ~ 512 byte , hence :
0.5 Kbyte / 1 Mbyte / sec = 0.5 msec , so that we need 50u sec timer for
10 Mbit ethernet .
10 msec resolution - > < 50 Kbit / sec .
The result : [ 34 ] 86 is not good choice for QoS router : - (
The things are not so bad , because we may use artifical
clock evaluated by integration of network data flow
in the most critical places .
*/
typedef u64 psched_time_t ;
typedef long psched_tdiff_t ;
2007-03-16 11:18:42 +03:00
/* Avoid doing 64 bit divide by 1000 */
# define PSCHED_US2NS(x) ((s64)(x) << 10)
# define PSCHED_NS2US(x) ((x) >> 10)
2005-04-17 02:20:36 +04:00
2007-03-16 11:18:42 +03:00
# define PSCHED_TICKS_PER_SEC PSCHED_NS2US(NSEC_PER_SEC)
# define PSCHED_GET_TIME(stamp) \
( ( stamp ) = PSCHED_NS2US ( ktime_to_ns ( ktime_get ( ) ) ) )
2005-04-17 02:20:36 +04:00
2007-03-23 21:28:30 +03:00
# define PSCHED_PASTPERFECT 0
2005-04-17 02:20:36 +04:00
2007-03-23 21:29:11 +03:00
static inline psched_tdiff_t
psched_tdiff_bounded ( psched_time_t tv1 , psched_time_t tv2 , psched_time_t bound )
{
return min ( tv1 - tv2 , bound ) ;
}
2007-03-16 11:19:15 +03:00
struct qdisc_watchdog {
struct hrtimer timer ;
struct Qdisc * qdisc ;
} ;
extern void qdisc_watchdog_init ( struct qdisc_watchdog * wd , struct Qdisc * qdisc ) ;
extern void qdisc_watchdog_schedule ( struct qdisc_watchdog * wd ,
psched_time_t expires ) ;
extern void qdisc_watchdog_cancel ( struct qdisc_watchdog * wd ) ;
2005-04-17 02:20:36 +04:00
extern struct Qdisc_ops pfifo_qdisc_ops ;
extern struct Qdisc_ops bfifo_qdisc_ops ;
extern int register_qdisc ( struct Qdisc_ops * qops ) ;
extern int unregister_qdisc ( struct Qdisc_ops * qops ) ;
extern struct Qdisc * qdisc_lookup ( struct net_device * dev , u32 handle ) ;
extern struct Qdisc * qdisc_lookup_class ( struct net_device * dev , u32 handle ) ;
extern struct qdisc_rate_table * qdisc_get_rtab ( struct tc_ratespec * r ,
struct rtattr * tab ) ;
extern void qdisc_put_rtab ( struct qdisc_rate_table * tab ) ;
2006-06-20 10:57:59 +04:00
extern void __qdisc_run ( struct net_device * dev ) ;
2005-04-17 02:20:36 +04:00
static inline void qdisc_run ( struct net_device * dev )
{
2006-06-20 10:57:59 +04:00
if ( ! netif_queue_stopped ( dev ) & &
! test_and_set_bit ( __LINK_STATE_QDISC_RUNNING , & dev - > state ) )
__qdisc_run ( dev ) ;
2005-04-17 02:20:36 +04:00
}
extern int tc_classify ( struct sk_buff * skb , struct tcf_proto * tp ,
struct tcf_result * res ) ;
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device .
*/
static inline unsigned psched_mtu ( struct net_device * dev )
{
unsigned mtu = dev - > mtu ;
return dev - > hard_header ? mtu + dev - > hard_header_len : mtu ;
}
# endif