2019-05-29 07:12:31 -07:00
/* SPDX-License-Identifier: GPL-2.0-only */
2016-04-22 14:20:13 +02:00
/*
* Copyright ( c ) 2016 Qualcomm Atheros , Inc
*
* Based on net / sched / sch_fq_codel . c
*/
# ifndef __NET_SCHED_FQ_IMPL_H
# define __NET_SCHED_FQ_IMPL_H
# include <net/fq.h>
/* functions that are embedded into includer */
2020-12-18 19:47:13 +01:00
static void
__fq_adjust_removal ( struct fq * fq , struct fq_flow * flow , unsigned int packets ,
unsigned int bytes , unsigned int truesize )
{
struct fq_tin * tin = flow - > tin ;
2020-12-18 19:47:15 +01:00
int idx ;
2020-12-18 19:47:13 +01:00
tin - > backlog_bytes - = bytes ;
tin - > backlog_packets - = packets ;
flow - > backlog - = bytes ;
fq - > backlog - = packets ;
fq - > memory_usage - = truesize ;
2020-12-18 19:47:15 +01:00
if ( flow - > backlog )
return ;
if ( flow = = & tin - > default_flow ) {
list_del_init ( & tin - > tin_list ) ;
return ;
}
idx = flow - fq - > flows ;
__clear_bit ( idx , fq - > flows_bitmap ) ;
2020-12-18 19:47:13 +01:00
}
2017-10-06 11:53:32 +02:00
static void fq_adjust_removal ( struct fq * fq ,
struct fq_flow * flow ,
struct sk_buff * skb )
2016-04-22 14:20:13 +02:00
{
2020-12-18 19:47:13 +01:00
__fq_adjust_removal ( fq , flow , 1 , skb - > len , skb - > truesize ) ;
2017-10-06 11:53:32 +02:00
}
static struct sk_buff * fq_flow_dequeue ( struct fq * fq ,
struct fq_flow * flow )
{
struct sk_buff * skb ;
lockdep_assert_held ( & fq - > lock ) ;
skb = __skb_dequeue ( & flow - > queue ) ;
if ( ! skb )
return NULL ;
fq_adjust_removal ( fq , flow , skb ) ;
2016-04-22 14:20:13 +02:00
return skb ;
}
2020-12-18 19:47:13 +01:00
static int fq_flow_drop ( struct fq * fq , struct fq_flow * flow ,
fq_skb_free_t free_func )
{
unsigned int packets = 0 , bytes = 0 , truesize = 0 ;
struct fq_tin * tin = flow - > tin ;
struct sk_buff * skb ;
int pending ;
lockdep_assert_held ( & fq - > lock ) ;
pending = min_t ( int , 32 , skb_queue_len ( & flow - > queue ) / 2 ) ;
do {
skb = __skb_dequeue ( & flow - > queue ) ;
if ( ! skb )
break ;
packets + + ;
bytes + = skb - > len ;
truesize + = skb - > truesize ;
free_func ( fq , tin , flow , skb ) ;
} while ( packets < pending ) ;
__fq_adjust_removal ( fq , flow , packets , bytes , truesize ) ;
return packets ;
}
2016-04-22 14:20:13 +02:00
static struct sk_buff * fq_tin_dequeue ( struct fq * fq ,
struct fq_tin * tin ,
fq_tin_dequeue_t dequeue_func )
{
struct fq_flow * flow ;
struct list_head * head ;
struct sk_buff * skb ;
lockdep_assert_held ( & fq - > lock ) ;
begin :
head = & tin - > new_flows ;
if ( list_empty ( head ) ) {
head = & tin - > old_flows ;
if ( list_empty ( head ) )
return NULL ;
}
flow = list_first_entry ( head , struct fq_flow , flowchain ) ;
if ( flow - > deficit < = 0 ) {
flow - > deficit + = fq - > quantum ;
list_move_tail ( & flow - > flowchain ,
& tin - > old_flows ) ;
goto begin ;
}
skb = dequeue_func ( fq , tin , flow ) ;
if ( ! skb ) {
/* force a pass through old_flows to prevent starvation */
if ( ( head = = & tin - > new_flows ) & &
! list_empty ( & tin - > old_flows ) ) {
list_move_tail ( & flow - > flowchain , & tin - > old_flows ) ;
} else {
list_del_init ( & flow - > flowchain ) ;
flow - > tin = NULL ;
}
goto begin ;
}
flow - > deficit - = skb - > len ;
tin - > tx_bytes + = skb - > len ;
tin - > tx_packets + + ;
return skb ;
}
2019-03-16 18:06:32 +01:00
static u32 fq_flow_idx ( struct fq * fq , struct sk_buff * skb )
{
2020-07-26 15:09:46 +02:00
u32 hash = skb_get_hash ( skb ) ;
2019-03-16 18:06:32 +01:00
return reciprocal_scale ( hash , fq - > flows_cnt ) ;
}
2016-04-22 14:20:13 +02:00
static struct fq_flow * fq_flow_classify ( struct fq * fq ,
2019-03-16 18:06:32 +01:00
struct fq_tin * tin , u32 idx ,
2020-12-18 19:47:14 +01:00
struct sk_buff * skb )
2016-04-22 14:20:13 +02:00
{
struct fq_flow * flow ;
lockdep_assert_held ( & fq - > lock ) ;
flow = & fq - > flows [ idx ] ;
if ( flow - > tin & & flow - > tin ! = tin ) {
2020-12-18 19:47:14 +01:00
flow = & tin - > default_flow ;
2016-04-22 14:20:13 +02:00
tin - > collisions + + ;
fq - > collisions + + ;
}
if ( ! flow - > tin )
tin - > flows + + ;
return flow ;
}
2020-12-18 19:47:15 +01:00
static struct fq_flow * fq_find_fattest_flow ( struct fq * fq )
2016-04-27 12:59:13 +02:00
{
2020-12-18 19:47:15 +01:00
struct fq_tin * tin ;
struct fq_flow * flow = NULL ;
u32 len = 0 ;
int i ;
2016-04-27 12:59:13 +02:00
2020-12-18 19:47:15 +01:00
for_each_set_bit ( i , fq - > flows_bitmap , fq - > flows_cnt ) {
struct fq_flow * cur = & fq - > flows [ i ] ;
unsigned int cur_len ;
2016-04-27 12:59:13 +02:00
2020-12-18 19:47:15 +01:00
cur_len = cur - > backlog ;
if ( cur_len < = len )
continue ;
flow = cur ;
len = cur_len ;
}
list_for_each_entry ( tin , & fq - > tin_backlog , tin_list ) {
unsigned int cur_len = tin - > default_flow . backlog ;
2016-04-27 12:59:13 +02:00
2020-12-18 19:47:15 +01:00
if ( cur_len < = len )
continue ;
flow = & tin - > default_flow ;
len = cur_len ;
}
return flow ;
2016-04-27 12:59:13 +02:00
}
2016-04-22 14:20:13 +02:00
static void fq_tin_enqueue ( struct fq * fq ,
2019-03-16 18:06:32 +01:00
struct fq_tin * tin , u32 idx ,
2016-04-22 14:20:13 +02:00
struct sk_buff * skb ,
2020-12-18 19:47:14 +01:00
fq_skb_free_t free_func )
2016-04-22 14:20:13 +02:00
{
struct fq_flow * flow ;
2022-10-10 11:43:38 +02:00
struct sk_buff * next ;
2017-10-16 17:05:57 +02:00
bool oom ;
2016-04-22 14:20:13 +02:00
lockdep_assert_held ( & fq - > lock ) ;
2020-12-18 19:47:14 +01:00
flow = fq_flow_classify ( fq , tin , idx , skb ) ;
2016-04-22 14:20:13 +02:00
2020-12-18 19:47:15 +01:00
if ( ! flow - > backlog ) {
if ( flow ! = & tin - > default_flow )
__set_bit ( idx , fq - > flows_bitmap ) ;
else if ( list_empty ( & tin - > tin_list ) )
list_add ( & tin - > tin_list , & fq - > tin_backlog ) ;
}
2016-04-22 14:20:13 +02:00
flow - > tin = tin ;
2022-10-10 11:43:38 +02:00
skb_list_walk_safe ( skb , skb , next ) {
skb_mark_not_on_list ( skb ) ;
flow - > backlog + = skb - > len ;
tin - > backlog_bytes + = skb - > len ;
tin - > backlog_packets + + ;
fq - > memory_usage + = skb - > truesize ;
fq - > backlog + + ;
__skb_queue_tail ( & flow - > queue , skb ) ;
}
2016-04-22 14:20:13 +02:00
if ( list_empty ( & flow - > flowchain ) ) {
flow - > deficit = fq - > quantum ;
list_add_tail ( & flow - > flowchain ,
& tin - > new_flows ) ;
}
2017-10-16 17:05:57 +02:00
oom = ( fq - > memory_usage > fq - > memory_limit ) ;
while ( fq - > backlog > fq - > limit | | oom ) {
2020-12-18 19:47:15 +01:00
flow = fq_find_fattest_flow ( fq ) ;
2016-04-22 14:20:13 +02:00
if ( ! flow )
return ;
2020-12-18 19:47:13 +01:00
if ( ! fq_flow_drop ( fq , flow , free_func ) )
2016-04-22 14:20:13 +02:00
return ;
flow - > tin - > overlimit + + ;
fq - > overlimit + + ;
2017-10-16 17:05:57 +02:00
if ( oom ) {
2016-09-23 21:59:09 +02:00
fq - > overmemory + + ;
2017-10-16 17:05:57 +02:00
oom = ( fq - > memory_usage > fq - > memory_limit ) ;
}
2016-04-22 14:20:13 +02:00
}
}
2017-10-06 11:53:32 +02:00
static void fq_flow_filter ( struct fq * fq ,
struct fq_flow * flow ,
fq_skb_filter_t filter_func ,
void * filter_data ,
fq_skb_free_t free_func )
{
struct fq_tin * tin = flow - > tin ;
struct sk_buff * skb , * tmp ;
lockdep_assert_held ( & fq - > lock ) ;
skb_queue_walk_safe ( & flow - > queue , skb , tmp ) {
if ( ! filter_func ( fq , tin , flow , skb , filter_data ) )
continue ;
__skb_unlink ( skb , & flow - > queue ) ;
fq_adjust_removal ( fq , flow , skb ) ;
free_func ( fq , tin , flow , skb ) ;
}
}
static void fq_tin_filter ( struct fq * fq ,
struct fq_tin * tin ,
fq_skb_filter_t filter_func ,
void * filter_data ,
fq_skb_free_t free_func )
{
struct fq_flow * flow ;
lockdep_assert_held ( & fq - > lock ) ;
list_for_each_entry ( flow , & tin - > new_flows , flowchain )
fq_flow_filter ( fq , flow , filter_func , filter_data , free_func ) ;
list_for_each_entry ( flow , & tin - > old_flows , flowchain )
fq_flow_filter ( fq , flow , filter_func , filter_data , free_func ) ;
}
2016-04-22 14:20:13 +02:00
static void fq_flow_reset ( struct fq * fq ,
struct fq_flow * flow ,
fq_skb_free_t free_func )
{
2020-12-18 19:47:15 +01:00
struct fq_tin * tin = flow - > tin ;
2016-04-22 14:20:13 +02:00
struct sk_buff * skb ;
while ( ( skb = fq_flow_dequeue ( fq , flow ) ) )
2020-12-18 19:47:15 +01:00
free_func ( fq , tin , flow , skb ) ;
2016-04-22 14:20:13 +02:00
2020-12-18 19:47:15 +01:00
if ( ! list_empty ( & flow - > flowchain ) ) {
2016-04-22 14:20:13 +02:00
list_del_init ( & flow - > flowchain ) ;
2020-12-18 19:47:15 +01:00
if ( list_empty ( & tin - > new_flows ) & &
list_empty ( & tin - > old_flows ) )
list_del_init ( & tin - > tin_list ) ;
}
2016-04-22 14:20:13 +02:00
flow - > tin = NULL ;
WARN_ON_ONCE ( flow - > backlog ) ;
}
static void fq_tin_reset ( struct fq * fq ,
struct fq_tin * tin ,
fq_skb_free_t free_func )
{
struct list_head * head ;
struct fq_flow * flow ;
for ( ; ; ) {
head = & tin - > new_flows ;
if ( list_empty ( head ) ) {
head = & tin - > old_flows ;
if ( list_empty ( head ) )
break ;
}
flow = list_first_entry ( head , struct fq_flow , flowchain ) ;
fq_flow_reset ( fq , flow , free_func ) ;
}
2020-12-18 19:47:15 +01:00
WARN_ON_ONCE ( ! list_empty ( & tin - > tin_list ) ) ;
2016-04-22 14:20:13 +02:00
WARN_ON_ONCE ( tin - > backlog_bytes ) ;
WARN_ON_ONCE ( tin - > backlog_packets ) ;
}
static void fq_flow_init ( struct fq_flow * flow )
{
INIT_LIST_HEAD ( & flow - > flowchain ) ;
__skb_queue_head_init ( & flow - > queue ) ;
}
static void fq_tin_init ( struct fq_tin * tin )
{
INIT_LIST_HEAD ( & tin - > new_flows ) ;
INIT_LIST_HEAD ( & tin - > old_flows ) ;
2020-12-18 19:47:15 +01:00
INIT_LIST_HEAD ( & tin - > tin_list ) ;
2020-12-18 19:47:14 +01:00
fq_flow_init ( & tin - > default_flow ) ;
2016-04-22 14:20:13 +02:00
}
static int fq_init ( struct fq * fq , int flows_cnt )
{
int i ;
memset ( fq , 0 , sizeof ( fq [ 0 ] ) ) ;
spin_lock_init ( & fq - > lock ) ;
2020-12-18 19:47:15 +01:00
INIT_LIST_HEAD ( & fq - > tin_backlog ) ;
2016-04-22 14:20:13 +02:00
fq - > flows_cnt = max_t ( u32 , flows_cnt , 1 ) ;
fq - > quantum = 300 ;
fq - > limit = 8192 ;
2016-09-23 21:59:09 +02:00
fq - > memory_limit = 16 < < 20 ; /* 16 MBytes */
2016-04-22 14:20:13 +02:00
2019-11-05 16:57:50 +01:00
fq - > flows = kvcalloc ( fq - > flows_cnt , sizeof ( fq - > flows [ 0 ] ) , GFP_KERNEL ) ;
2016-04-22 14:20:13 +02:00
if ( ! fq - > flows )
return - ENOMEM ;
2022-07-09 16:37:53 +02:00
fq - > flows_bitmap = bitmap_zalloc ( fq - > flows_cnt , GFP_KERNEL ) ;
2020-12-18 19:47:15 +01:00
if ( ! fq - > flows_bitmap ) {
kvfree ( fq - > flows ) ;
fq - > flows = NULL ;
return - ENOMEM ;
}
2016-04-22 14:20:13 +02:00
for ( i = 0 ; i < fq - > flows_cnt ; i + + )
fq_flow_init ( & fq - > flows [ i ] ) ;
return 0 ;
}
static void fq_reset ( struct fq * fq ,
fq_skb_free_t free_func )
{
int i ;
for ( i = 0 ; i < fq - > flows_cnt ; i + + )
fq_flow_reset ( fq , & fq - > flows [ i ] , free_func ) ;
2019-11-05 16:57:50 +01:00
kvfree ( fq - > flows ) ;
2016-04-22 14:20:13 +02:00
fq - > flows = NULL ;
2020-12-18 19:47:15 +01:00
2022-07-09 16:37:53 +02:00
bitmap_free ( fq - > flows_bitmap ) ;
2020-12-18 19:47:15 +01:00
fq - > flows_bitmap = NULL ;
2016-04-22 14:20:13 +02:00
}
# endif