2005-04-17 02:20:36 +04:00
# ifndef __NET_PKT_SCHED_H
# define __NET_PKT_SCHED_H
2006-01-09 09:12:03 +03:00
# include <linux/jiffies.h>
2007-03-16 11:18:42 +03:00
# include <linux/ktime.h>
2015-01-13 19:13:43 +03:00
# include <linux/if_vlan.h>
2005-04-17 02:20:36 +04:00
# include <net/sch_generic.h>
2009-11-03 06:26:03 +03:00
struct qdisc_walker {
2005-04-17 02:20:36 +04:00
int stop ;
int skip ;
int count ;
int ( * fn ) ( struct Qdisc * , unsigned long cl , struct qdisc_walker * ) ;
} ;
2010-03-31 11:06:04 +04:00
# define QDISC_ALIGNTO 64
2005-07-06 01:15:09 +04:00
# define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
2005-04-17 02:20:36 +04:00
static inline void * qdisc_priv ( struct Qdisc * q )
{
2005-07-06 01:15:09 +04:00
return ( char * ) q + QDISC_ALIGN ( sizeof ( struct Qdisc ) ) ;
2005-04-17 02:20:36 +04:00
}
/*
Timer resolution MUST BE < 10 % of min_schedulable_packet_size / bandwidth
Normal IP packet size ~ 512 byte , hence :
0.5 Kbyte / 1 Mbyte / sec = 0.5 msec , so that we need 50u sec timer for
10 Mbit ethernet .
10 msec resolution - > < 50 Kbit / sec .
The result : [ 34 ] 86 is not good choice for QoS router : - (
2011-03-31 05:57:33 +04:00
The things are not so bad , because we may use artificial
2005-04-17 02:20:36 +04:00
clock evaluated by integration of network data flow
in the most critical places .
*/
typedef u64 psched_time_t ;
typedef long psched_tdiff_t ;
2009-06-09 02:05:13 +04:00
/* Avoid doing 64 bit divide */
# define PSCHED_SHIFT 6
2009-06-15 13:31:47 +04:00
# define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT)
# define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT)
2005-04-17 02:20:36 +04:00
2009-06-15 13:31:47 +04:00
# define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC)
2007-03-23 21:28:30 +03:00
# define PSCHED_PASTPERFECT 0
2005-04-17 02:20:36 +04:00
2007-03-23 21:29:25 +03:00
static inline psched_time_t psched_get_time ( void )
{
2014-08-23 05:32:09 +04:00
return PSCHED_NS2TICKS ( ktime_get_ns ( ) ) ;
2007-03-23 21:29:25 +03:00
}
2007-03-23 21:29:11 +03:00
static inline psched_tdiff_t
psched_tdiff_bounded ( psched_time_t tv1 , psched_time_t tv2 , psched_time_t bound )
{
return min ( tv1 - tv2 , bound ) ;
}
2007-03-16 11:19:15 +03:00
struct qdisc_watchdog {
2016-05-24 00:24:56 +03:00
u64 last_expires ;
2007-03-16 11:19:15 +03:00
struct hrtimer timer ;
struct Qdisc * qdisc ;
} ;
2013-07-31 09:47:13 +04:00
void qdisc_watchdog_init ( struct qdisc_watchdog * wd , struct Qdisc * qdisc ) ;
2014-10-04 21:11:31 +04:00
void qdisc_watchdog_schedule_ns ( struct qdisc_watchdog * wd , u64 expires , bool throttle ) ;
2013-02-12 04:12:04 +04:00
static inline void qdisc_watchdog_schedule ( struct qdisc_watchdog * wd ,
psched_time_t expires )
{
2014-10-04 21:11:31 +04:00
qdisc_watchdog_schedule_ns ( wd , PSCHED_TICKS2NS ( expires ) , true ) ;
2013-02-12 04:12:04 +04:00
}
2013-07-31 09:47:13 +04:00
void qdisc_watchdog_cancel ( struct qdisc_watchdog * wd ) ;
2007-03-16 11:19:15 +03:00
2005-04-17 02:20:36 +04:00
extern struct Qdisc_ops pfifo_qdisc_ops ;
extern struct Qdisc_ops bfifo_qdisc_ops ;
2010-01-24 15:30:59 +03:00
extern struct Qdisc_ops pfifo_head_drop_qdisc_ops ;
2005-04-17 02:20:36 +04:00
2013-07-31 09:47:13 +04:00
int fifo_set_limit ( struct Qdisc * q , unsigned int limit ) ;
struct Qdisc * fifo_create_dflt ( struct Qdisc * sch , struct Qdisc_ops * ops ,
unsigned int limit ) ;
int register_qdisc ( struct Qdisc_ops * qops ) ;
int unregister_qdisc ( struct Qdisc_ops * qops ) ;
2013-08-28 03:19:08 +04:00
void qdisc_get_default ( char * id , size_t len ) ;
int qdisc_set_default ( const char * id ) ;
2013-12-05 23:12:02 +04:00
void qdisc_list_add ( struct Qdisc * q ) ;
2013-07-31 09:47:13 +04:00
void qdisc_list_del ( struct Qdisc * q ) ;
struct Qdisc * qdisc_lookup ( struct net_device * dev , u32 handle ) ;
struct Qdisc * qdisc_lookup_class ( struct net_device * dev , u32 handle ) ;
struct qdisc_rate_table * qdisc_get_rtab ( struct tc_ratespec * r ,
struct nlattr * tab ) ;
void qdisc_put_rtab ( struct qdisc_rate_table * tab ) ;
void qdisc_put_stab ( struct qdisc_size_table * tab ) ;
2014-06-11 22:35:18 +04:00
void qdisc_warn_nonwc ( const char * txt , struct Qdisc * qdisc ) ;
2013-07-31 09:47:13 +04:00
int sch_direct_xmit ( struct sk_buff * skb , struct Qdisc * q ,
struct net_device * dev , struct netdev_queue * txq ,
2014-10-04 02:31:07 +04:00
spinlock_t * root_lock , bool validate ) ;
2013-07-31 09:47:13 +04:00
void __qdisc_run ( struct Qdisc * q ) ;
2005-04-17 02:20:36 +04:00
2008-07-16 13:15:04 +04:00
static inline void qdisc_run ( struct Qdisc * q )
2005-04-17 02:20:36 +04:00
{
2010-06-02 14:23:51 +04:00
if ( qdisc_run_begin ( q ) )
2008-07-16 13:15:04 +04:00
__qdisc_run ( q ) ;
2005-04-17 02:20:36 +04:00
}
2013-07-31 09:47:13 +04:00
int tc_classify ( struct sk_buff * skb , const struct tcf_proto * tp ,
2015-08-27 00:00:06 +03:00
struct tcf_result * res , bool compat_mode ) ;
2005-04-17 02:20:36 +04:00
2015-01-13 19:13:43 +03:00
static inline __be16 tc_skb_protocol ( const struct sk_buff * skb )
{
/* We need to take extra care in case the skb came via
* vlan accelerated path . In that case , use skb - > vlan_proto
* as the original vlan header was already stripped .
*/
2015-01-13 19:13:44 +03:00
if ( skb_vlan_tag_present ( skb ) )
2015-01-13 19:13:43 +03:00
return skb - > vlan_proto ;
return skb - > protocol ;
}
2005-04-17 02:20:36 +04:00
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device .
*/
2012-04-15 09:58:06 +04:00
static inline unsigned int psched_mtu ( const struct net_device * dev )
2005-04-17 02:20:36 +04:00
{
2007-10-09 12:40:57 +04:00
return dev - > mtu + dev - > hard_header_len ;
2005-04-17 02:20:36 +04:00
}
# endif