2019-05-30 02:57:58 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2019-10-07 10:59:49 +03:00
/* Copyright (c) 2014 Protonic Holland,
* David Jander
* Copyright ( C ) 2014 - 2017 Pengutronix ,
* Marc Kleine - Budde < kernel @ pengutronix . de >
2014-10-10 19:30:10 +04:00
*/
# include <linux/can/dev.h>
# include <linux/can/rx-offload.h>
2014-09-23 17:28:21 +04:00
struct can_rx_offload_cb {
u32 timestamp ;
} ;
2019-10-07 10:59:49 +03:00
static inline struct can_rx_offload_cb *
can_rx_offload_get_cb ( struct sk_buff * skb )
2014-09-23 17:28:21 +04:00
{
BUILD_BUG_ON ( sizeof ( struct can_rx_offload_cb ) > sizeof ( skb - > cb ) ) ;
return ( struct can_rx_offload_cb * ) skb - > cb ;
}
2019-10-07 10:59:49 +03:00
static inline bool
can_rx_offload_le ( struct can_rx_offload * offload ,
unsigned int a , unsigned int b )
2014-09-23 17:28:21 +04:00
{
if ( offload - > inc )
return a < = b ;
else
return a > = b ;
}
2019-10-07 10:59:49 +03:00
static inline unsigned int
can_rx_offload_inc ( struct can_rx_offload * offload , unsigned int * val )
2014-09-23 17:28:21 +04:00
{
if ( offload - > inc )
return ( * val ) + + ;
else
return ( * val ) - - ;
}
2014-10-10 19:30:10 +04:00
static int can_rx_offload_napi_poll ( struct napi_struct * napi , int quota )
{
2019-10-07 10:59:49 +03:00
struct can_rx_offload * offload = container_of ( napi ,
struct can_rx_offload ,
napi ) ;
2014-10-10 19:30:10 +04:00
struct net_device * dev = offload - > dev ;
struct net_device_stats * stats = & dev - > stats ;
struct sk_buff * skb ;
int work_done = 0 ;
while ( ( work_done < quota ) & &
( skb = skb_dequeue ( & offload - > skb_queue ) ) ) {
struct can_frame * cf = ( struct can_frame * ) skb - > data ;
work_done + + ;
stats - > rx_packets + + ;
stats - > rx_bytes + = cf - > can_dlc ;
netif_receive_skb ( skb ) ;
}
if ( work_done < quota ) {
napi_complete_done ( napi , work_done ) ;
/* Check if there was another interrupt */
if ( ! skb_queue_empty ( & offload - > skb_queue ) )
napi_reschedule ( & offload - > napi ) ;
}
can_led_event ( offload - > dev , CAN_LED_EVENT_RX ) ;
return work_done ;
}
2019-10-07 10:59:49 +03:00
static inline void
__skb_queue_add_sort ( struct sk_buff_head * head , struct sk_buff * new ,
int ( * compare ) ( struct sk_buff * a , struct sk_buff * b ) )
2014-09-23 17:28:21 +04:00
{
2018-08-23 02:43:34 +03:00
struct sk_buff * pos , * insert = NULL ;
2014-09-23 17:28:21 +04:00
skb_queue_reverse_walk ( head , pos ) {
const struct can_rx_offload_cb * cb_pos , * cb_new ;
cb_pos = can_rx_offload_get_cb ( pos ) ;
cb_new = can_rx_offload_get_cb ( new ) ;
netdev_dbg ( new - > dev ,
" %s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d \n " ,
__func__ ,
cb_pos - > timestamp , cb_new - > timestamp ,
cb_new - > timestamp - cb_pos - > timestamp ,
skb_queue_len ( head ) ) ;
if ( compare ( pos , new ) < 0 )
continue ;
insert = pos ;
break ;
}
2018-08-23 02:43:34 +03:00
if ( ! insert )
__skb_queue_head ( head , new ) ;
else
__skb_queue_after ( head , insert , new ) ;
2014-09-23 17:28:21 +04:00
}
static int can_rx_offload_compare ( struct sk_buff * a , struct sk_buff * b )
{
const struct can_rx_offload_cb * cb_a , * cb_b ;
cb_a = can_rx_offload_get_cb ( a ) ;
cb_b = can_rx_offload_get_cb ( b ) ;
2019-10-07 11:00:25 +03:00
/* Subtract two u32 and return result as int, to keep
2014-09-23 17:28:21 +04:00
* difference steady around the u32 overflow .
*/
return cb_b - > timestamp - cb_a - > timestamp ;
}
2019-10-09 22:00:32 +03:00
/**
* can_rx_offload_offload_one ( ) - Read one CAN frame from HW
* @ offload : pointer to rx_offload context
* @ n : number of mailbox to read
*
* The task of this function is to read a CAN frame from mailbox @ n
* from the device and return the mailbox ' s content as a struct
* sk_buff .
*
* If the struct can_rx_offload : : skb_queue exceeds the maximal queue
* length ( struct can_rx_offload : : skb_queue_len_max ) or no skb can be
* allocated , the mailbox contents is discarded by reading it into an
* overflow buffer . This way the mailbox is marked as free by the
* driver .
*
* Return : A pointer to skb containing the CAN frame on success .
*
* NULL if the mailbox @ n is empty .
*
* ERR_PTR ( ) in case of an error
*/
static struct sk_buff *
can_rx_offload_offload_one ( struct can_rx_offload * offload , unsigned int n )
2014-10-10 19:30:10 +04:00
{
2019-07-12 11:02:38 +03:00
struct sk_buff * skb ;
2014-09-23 17:28:21 +04:00
struct can_rx_offload_cb * cb ;
2019-07-12 11:02:38 +03:00
bool drop = false ;
u32 timestamp ;
2014-10-10 19:30:10 +04:00
2019-07-12 11:02:38 +03:00
/* If queue is full drop frame */
if ( unlikely ( skb_queue_len ( & offload - > skb_queue ) >
offload - > skb_queue_len_max ) )
drop = true ;
2019-10-09 22:00:32 +03:00
2019-07-12 11:02:38 +03:00
skb = offload - > mailbox_read ( offload , n , & timestamp , drop ) ;
2019-10-09 22:00:32 +03:00
/* Mailbox was empty. */
2019-07-12 11:02:38 +03:00
if ( unlikely ( ! skb ) )
2014-10-10 19:30:10 +04:00
return NULL ;
2019-10-09 22:00:32 +03:00
2019-07-12 11:02:38 +03:00
/* There was a problem reading the mailbox, propagate
* error value .
*/
if ( unlikely ( IS_ERR ( skb ) ) ) {
2019-10-09 22:00:32 +03:00
offload - > dev - > stats . rx_dropped + + ;
offload - > dev - > stats . rx_fifo_errors + + ;
2019-07-12 11:02:38 +03:00
return skb ;
2019-10-09 22:00:32 +03:00
}
/* Mailbox was read. */
2019-07-12 11:02:38 +03:00
cb = can_rx_offload_get_cb ( skb ) ;
cb - > timestamp = timestamp ;
2014-10-10 19:30:10 +04:00
return skb ;
}
2019-10-07 10:59:49 +03:00
int can_rx_offload_irq_offload_timestamp ( struct can_rx_offload * offload ,
u64 pending )
2014-09-23 17:28:21 +04:00
{
struct sk_buff_head skb_queue ;
unsigned int i ;
__skb_queue_head_init ( & skb_queue ) ;
for ( i = offload - > mb_first ;
can_rx_offload_le ( offload , i , offload - > mb_last ) ;
can_rx_offload_inc ( offload , & i ) ) {
struct sk_buff * skb ;
if ( ! ( pending & BIT_ULL ( i ) ) )
continue ;
skb = can_rx_offload_offload_one ( offload , i ) ;
2019-10-09 22:00:32 +03:00
if ( IS_ERR_OR_NULL ( skb ) )
2019-09-24 21:45:38 +03:00
continue ;
2014-09-23 17:28:21 +04:00
__skb_queue_add_sort ( & skb_queue , skb , can_rx_offload_compare ) ;
}
if ( ! skb_queue_empty ( & skb_queue ) ) {
unsigned long flags ;
u32 queue_len ;
spin_lock_irqsave ( & offload - > skb_queue . lock , flags ) ;
skb_queue_splice_tail ( & skb_queue , & offload - > skb_queue ) ;
spin_unlock_irqrestore ( & offload - > skb_queue . lock , flags ) ;
2019-10-07 11:00:52 +03:00
queue_len = skb_queue_len ( & offload - > skb_queue ) ;
if ( queue_len > offload - > skb_queue_len_max / 8 )
2014-09-23 17:28:21 +04:00
netdev_dbg ( offload - > dev , " %s: queue_len=%d \n " ,
__func__ , queue_len ) ;
can_rx_offload_schedule ( offload ) ;
}
return skb_queue_len ( & skb_queue ) ;
}
EXPORT_SYMBOL_GPL ( can_rx_offload_irq_offload_timestamp ) ;
2014-10-10 19:30:10 +04:00
int can_rx_offload_irq_offload_fifo ( struct can_rx_offload * offload )
{
struct sk_buff * skb ;
int received = 0 ;
2019-10-09 22:00:32 +03:00
while ( 1 ) {
skb = can_rx_offload_offload_one ( offload , 0 ) ;
2019-09-24 21:45:38 +03:00
if ( IS_ERR ( skb ) )
continue ;
if ( ! skb )
2019-10-09 22:00:32 +03:00
break ;
2014-10-10 19:30:10 +04:00
skb_queue_tail ( & offload - > skb_queue , skb ) ;
received + + ;
}
if ( received )
can_rx_offload_schedule ( offload ) ;
return received ;
}
EXPORT_SYMBOL_GPL ( can_rx_offload_irq_offload_fifo ) ;
2018-09-18 12:40:38 +03:00
int can_rx_offload_queue_sorted ( struct can_rx_offload * offload ,
struct sk_buff * skb , u32 timestamp )
{
struct can_rx_offload_cb * cb ;
unsigned long flags ;
if ( skb_queue_len ( & offload - > skb_queue ) >
2019-10-09 16:48:48 +03:00
offload - > skb_queue_len_max ) {
kfree_skb ( skb ) ;
return - ENOBUFS ;
}
2018-09-18 12:40:38 +03:00
cb = can_rx_offload_get_cb ( skb ) ;
cb - > timestamp = timestamp ;
spin_lock_irqsave ( & offload - > skb_queue . lock , flags ) ;
__skb_queue_add_sort ( & offload - > skb_queue , skb , can_rx_offload_compare ) ;
spin_unlock_irqrestore ( & offload - > skb_queue . lock , flags ) ;
can_rx_offload_schedule ( offload ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( can_rx_offload_queue_sorted ) ;
unsigned int can_rx_offload_get_echo_skb ( struct can_rx_offload * offload ,
unsigned int idx , u32 timestamp )
{
struct net_device * dev = offload - > dev ;
struct net_device_stats * stats = & dev - > stats ;
struct sk_buff * skb ;
u8 len ;
int err ;
skb = __can_get_echo_skb ( dev , idx , & len ) ;
if ( ! skb )
return 0 ;
err = can_rx_offload_queue_sorted ( offload , skb , timestamp ) ;
if ( err ) {
stats - > rx_errors + + ;
stats - > tx_fifo_errors + + ;
}
return len ;
}
EXPORT_SYMBOL_GPL ( can_rx_offload_get_echo_skb ) ;
2018-09-18 12:40:40 +03:00
int can_rx_offload_queue_tail ( struct can_rx_offload * offload ,
struct sk_buff * skb )
2014-10-10 19:30:10 +04:00
{
if ( skb_queue_len ( & offload - > skb_queue ) >
2019-10-09 16:48:48 +03:00
offload - > skb_queue_len_max ) {
kfree_skb ( skb ) ;
return - ENOBUFS ;
}
2014-10-10 19:30:10 +04:00
skb_queue_tail ( & offload - > skb_queue , skb ) ;
can_rx_offload_schedule ( offload ) ;
return 0 ;
}
2018-09-18 12:40:40 +03:00
EXPORT_SYMBOL_GPL ( can_rx_offload_queue_tail ) ;
2014-10-10 19:30:10 +04:00
2019-10-07 10:59:49 +03:00
static int can_rx_offload_init_queue ( struct net_device * dev ,
struct can_rx_offload * offload ,
unsigned int weight )
2014-10-10 19:30:10 +04:00
{
offload - > dev = dev ;
/* Limit queue len to 4x the weight (rounted to next power of two) */
offload - > skb_queue_len_max = 2 < < fls ( weight ) ;
offload - > skb_queue_len_max * = 4 ;
skb_queue_head_init ( & offload - > skb_queue ) ;
netif_napi_add ( dev , & offload - > napi , can_rx_offload_napi_poll , weight ) ;
dev_dbg ( dev - > dev . parent , " %s: skb_queue_len_max=%d \n " ,
__func__ , offload - > skb_queue_len_max ) ;
return 0 ;
}
2019-10-07 10:59:49 +03:00
int can_rx_offload_add_timestamp ( struct net_device * dev ,
struct can_rx_offload * offload )
2014-09-23 17:28:21 +04:00
{
unsigned int weight ;
if ( offload - > mb_first > BITS_PER_LONG_LONG | |
offload - > mb_last > BITS_PER_LONG_LONG | | ! offload - > mailbox_read )
return - EINVAL ;
if ( offload - > mb_first < offload - > mb_last ) {
offload - > inc = true ;
weight = offload - > mb_last - offload - > mb_first ;
} else {
offload - > inc = false ;
weight = offload - > mb_first - offload - > mb_last ;
}
2017-12-06 16:19:08 +03:00
return can_rx_offload_init_queue ( dev , offload , weight ) ;
2014-09-23 17:28:21 +04:00
}
EXPORT_SYMBOL_GPL ( can_rx_offload_add_timestamp ) ;
2019-10-07 10:59:49 +03:00
int can_rx_offload_add_fifo ( struct net_device * dev ,
struct can_rx_offload * offload , unsigned int weight )
2014-10-10 19:30:10 +04:00
{
if ( ! offload - > mailbox_read )
return - EINVAL ;
return can_rx_offload_init_queue ( dev , offload , weight ) ;
}
EXPORT_SYMBOL_GPL ( can_rx_offload_add_fifo ) ;
void can_rx_offload_enable ( struct can_rx_offload * offload )
{
napi_enable ( & offload - > napi ) ;
}
EXPORT_SYMBOL_GPL ( can_rx_offload_enable ) ;
void can_rx_offload_del ( struct can_rx_offload * offload )
{
netif_napi_del ( & offload - > napi ) ;
skb_queue_purge ( & offload - > skb_queue ) ;
}
EXPORT_SYMBOL_GPL ( can_rx_offload_del ) ;