can: c_can: support tx ring algorithm
The algorithm is already used successfully by other CAN drivers (e.g. mcp251xfd). Its implementation was kindly suggested to me by Marc Kleine-Budde following a patch I had previously submitted. You can find every detail at https://lore.kernel.org/patchwork/patch/1422929/. The idea is that after this patch, it will be easier to patch the driver to use the message object memory as a true FIFO. Link: https://lore.kernel.org/r/20210807130800.5246-4-dariobin@libero.it Suggested-by: Marc Kleine-Budde <mkl@pengutronix.de> Signed-off-by: Dario Binacchi <dariobin@libero.it> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
This commit is contained in:
parent
a54cdbba9d
commit
28e86e9ab5
@ -176,6 +176,13 @@ struct c_can_raminit {
|
||||
bool needs_pulse;
|
||||
};
|
||||
|
||||
/* c_can tx ring structure */
|
||||
struct c_can_tx_ring {
|
||||
unsigned int head;
|
||||
unsigned int tail;
|
||||
unsigned int obj_num;
|
||||
};
|
||||
|
||||
/* c_can private data structure */
|
||||
struct c_can_priv {
|
||||
struct can_priv can; /* must be the first member */
|
||||
@ -190,10 +197,10 @@ struct c_can_priv {
|
||||
unsigned int msg_obj_tx_first;
|
||||
unsigned int msg_obj_tx_last;
|
||||
u32 msg_obj_rx_mask;
|
||||
atomic_t tx_active;
|
||||
atomic_t sie_pending;
|
||||
unsigned long tx_dir;
|
||||
int last_status;
|
||||
struct c_can_tx_ring tx;
|
||||
u16 (*read_reg)(const struct c_can_priv *priv, enum reg index);
|
||||
void (*write_reg)(const struct c_can_priv *priv, enum reg index, u16 val);
|
||||
u32 (*read_reg32)(const struct c_can_priv *priv, enum reg index);
|
||||
@ -219,4 +226,28 @@ int c_can_power_down(struct net_device *dev);
|
||||
|
||||
void c_can_set_ethtool_ops(struct net_device *dev);
|
||||
|
||||
static inline u8 c_can_get_tx_head(const struct c_can_tx_ring *ring)
|
||||
{
|
||||
return ring->head & (ring->obj_num - 1);
|
||||
}
|
||||
|
||||
static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
|
||||
{
|
||||
return ring->tail & (ring->obj_num - 1);
|
||||
}
|
||||
|
||||
static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
|
||||
{
|
||||
u8 head = c_can_get_tx_head(ring);
|
||||
u8 tail = c_can_get_tx_tail(ring);
|
||||
|
||||
/* This is not a FIFO. C/D_CAN sends out the buffers
|
||||
* prioritized. The lowest buffer number wins.
|
||||
*/
|
||||
if (head < tail)
|
||||
return 0;
|
||||
|
||||
return ring->obj_num - head;
|
||||
}
|
||||
|
||||
#endif /* C_CAN_H */
|
||||
|
@ -427,24 +427,50 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
|
||||
c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
|
||||
}
|
||||
|
||||
static bool c_can_tx_busy(const struct c_can_priv *priv,
|
||||
const struct c_can_tx_ring *tx_ring)
|
||||
{
|
||||
if (c_can_get_tx_free(tx_ring) > 0)
|
||||
return false;
|
||||
|
||||
netif_stop_queue(priv->dev);
|
||||
|
||||
/* Memory barrier before checking tx_free (head and tail) */
|
||||
smp_mb();
|
||||
|
||||
if (c_can_get_tx_free(tx_ring) == 0) {
|
||||
netdev_dbg(priv->dev,
|
||||
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
|
||||
tx_ring->head, tx_ring->tail,
|
||||
tx_ring->head - tx_ring->tail);
|
||||
return true;
|
||||
}
|
||||
|
||||
netif_start_queue(priv->dev);
|
||||
return false;
|
||||
}
|
||||
|
||||
static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct can_frame *frame = (struct can_frame *)skb->data;
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
struct c_can_tx_ring *tx_ring = &priv->tx;
|
||||
u32 idx, obj;
|
||||
|
||||
if (can_dropped_invalid_skb(dev, skb))
|
||||
return NETDEV_TX_OK;
|
||||
/* This is not a FIFO. C/D_CAN sends out the buffers
|
||||
* prioritized. The lowest buffer number wins.
|
||||
*/
|
||||
idx = fls(atomic_read(&priv->tx_active));
|
||||
|
||||
if (c_can_tx_busy(priv, tx_ring))
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
idx = c_can_get_tx_head(tx_ring);
|
||||
tx_ring->head++;
|
||||
if (c_can_get_tx_free(tx_ring) == 0)
|
||||
netif_stop_queue(dev);
|
||||
|
||||
obj = idx + priv->msg_obj_tx_first;
|
||||
|
||||
/* If this is the last buffer, stop the xmit queue */
|
||||
if (idx == priv->msg_obj_tx_num - 1)
|
||||
netif_stop_queue(dev);
|
||||
/* Store the message in the interface so we can call
|
||||
* can_put_echo_skb(). We must do this before we enable
|
||||
* transmit as we might race against do_tx().
|
||||
@ -453,8 +479,6 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
|
||||
priv->dlc[idx] = frame->len;
|
||||
can_put_echo_skb(skb, dev, idx, 0);
|
||||
|
||||
/* Update the active bits */
|
||||
atomic_add(BIT(idx), &priv->tx_active);
|
||||
/* Start transmission */
|
||||
c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
|
||||
|
||||
@ -567,6 +591,7 @@ static int c_can_software_reset(struct net_device *dev)
|
||||
static int c_can_chip_config(struct net_device *dev)
|
||||
{
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
struct c_can_tx_ring *tx_ring = &priv->tx;
|
||||
int err;
|
||||
|
||||
err = c_can_software_reset(dev);
|
||||
@ -598,7 +623,8 @@ static int c_can_chip_config(struct net_device *dev)
|
||||
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
|
||||
|
||||
/* Clear all internal status */
|
||||
atomic_set(&priv->tx_active, 0);
|
||||
tx_ring->head = 0;
|
||||
tx_ring->tail = 0;
|
||||
priv->tx_dir = 0;
|
||||
|
||||
/* set bittiming params */
|
||||
@ -696,14 +722,14 @@ static int c_can_get_berr_counter(const struct net_device *dev,
|
||||
static void c_can_do_tx(struct net_device *dev)
|
||||
{
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
struct c_can_tx_ring *tx_ring = &priv->tx;
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
|
||||
u32 idx, obj, pkts = 0, bytes = 0, pend;
|
||||
|
||||
if (priv->msg_obj_tx_last > 32)
|
||||
pend = priv->read_reg32(priv, C_CAN_INTPND3_REG);
|
||||
else
|
||||
pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
|
||||
clr = pend;
|
||||
|
||||
while ((idx = ffs(pend))) {
|
||||
idx--;
|
||||
@ -723,11 +749,14 @@ static void c_can_do_tx(struct net_device *dev)
|
||||
if (!pkts)
|
||||
return;
|
||||
|
||||
/* Clear the bits in the tx_active mask */
|
||||
atomic_sub(clr, &priv->tx_active);
|
||||
|
||||
if (clr & BIT(priv->msg_obj_tx_num - 1))
|
||||
netif_wake_queue(dev);
|
||||
tx_ring->tail += pkts;
|
||||
if (c_can_get_tx_free(tx_ring)) {
|
||||
/* Make sure that anybody stopping the queue after
|
||||
* this sees the new tx_ring->tail.
|
||||
*/
|
||||
smp_mb();
|
||||
netif_wake_queue(priv->dev);
|
||||
}
|
||||
|
||||
stats->tx_bytes += bytes;
|
||||
stats->tx_packets += pkts;
|
||||
@ -1208,6 +1237,10 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
|
||||
priv->msg_obj_tx_last =
|
||||
priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1;
|
||||
|
||||
priv->tx.head = 0;
|
||||
priv->tx.tail = 0;
|
||||
priv->tx.obj_num = msg_obj_tx_num;
|
||||
|
||||
netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num);
|
||||
|
||||
priv->dev = dev;
|
||||
|
Loading…
x
Reference in New Issue
Block a user