2019-05-29 17:12:31 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2016-04-22 15:20:13 +03:00
/*
* Copyright ( c ) 2016 Qualcomm Atheros , Inc
*
* Based on net / sched / sch_fq_codel . c
*/
# ifndef __NET_SCHED_FQ_H
# define __NET_SCHED_FQ_H
2022-07-21 02:57:58 +03:00
# include <linux/skbuff.h>
# include <linux/spinlock.h>
# include <linux/types.h>
2016-04-22 15:20:13 +03:00
struct fq_tin ;
/**
* struct fq_flow - per traffic flow queue
*
* @ tin : owner of this flow . Used to manage collisions , i . e . when a packet
* hashes to an index which points to a flow that is already owned by a
* different tin the packet is destined to . In such case the implementer
* must provide a fallback flow
* @ flowchain : can be linked to fq_tin ' s new_flows or old_flows . Used for DRR + +
* ( deficit round robin ) based round robin queuing similar to the one
* found in net / sched / sch_fq_codel . c
* @ queue : sk_buff queue to hold packets
* @ backlog : number of bytes pending in the queue . The number of packets can be
* found in @ queue . qlen
* @ deficit : used for DRR + +
*/
struct fq_flow {
struct fq_tin * tin ;
struct list_head flowchain ;
struct sk_buff_head queue ;
u32 backlog ;
int deficit ;
} ;
/**
* struct fq_tin - a logical container of fq_flows
*
* Used to group fq_flows into a logical aggregate . DRR + + scheme is used to
* pull interleaved packets out of the associated flows .
*
* @ new_flows : linked list of fq_flow
* @ old_flows : linked list of fq_flow
*/
struct fq_tin {
struct list_head new_flows ;
struct list_head old_flows ;
2020-12-18 21:47:15 +03:00
struct list_head tin_list ;
2020-12-18 21:47:14 +03:00
struct fq_flow default_flow ;
2016-04-22 15:20:13 +03:00
u32 backlog_bytes ;
u32 backlog_packets ;
u32 overlimit ;
u32 collisions ;
u32 flows ;
u32 tx_bytes ;
u32 tx_packets ;
} ;
/**
* struct fq - main container for fair queuing purposes
*
* @ limit : max number of packets that can be queued across all flows
* @ backlog : number of packets queued across all flows
*/
struct fq {
struct fq_flow * flows ;
2020-12-18 21:47:15 +03:00
unsigned long * flows_bitmap ;
struct list_head tin_backlog ;
2016-04-22 15:20:13 +03:00
spinlock_t lock ;
u32 flows_cnt ;
u32 limit ;
2016-09-23 22:59:09 +03:00
u32 memory_limit ;
u32 memory_usage ;
2016-04-22 15:20:13 +03:00
u32 quantum ;
u32 backlog ;
u32 overlimit ;
2016-09-23 22:59:09 +03:00
u32 overmemory ;
2016-04-22 15:20:13 +03:00
u32 collisions ;
} ;
typedef struct sk_buff * fq_tin_dequeue_t ( struct fq * ,
struct fq_tin * ,
struct fq_flow * flow ) ;
typedef void fq_skb_free_t ( struct fq * ,
struct fq_tin * ,
struct fq_flow * ,
struct sk_buff * ) ;
2017-10-06 12:53:32 +03:00
/* Return %true to filter (drop) the frame. */
typedef bool fq_skb_filter_t ( struct fq * ,
struct fq_tin * ,
struct fq_flow * ,
struct sk_buff * ,
void * ) ;
2016-04-22 15:20:13 +03:00
typedef struct fq_flow * fq_flow_get_default_t ( struct fq * ,
struct fq_tin * ,
int idx ,
struct sk_buff * ) ;
# endif