net: lan966x: Add support for XDP_TX
Extend lan966x XDP support with the action XDP_TX. In this case when the received buffer needs to execute XDP_TX, the buffer will be moved to the TX buffers. So a new RX buffer will be allocated. When the TX finish with the frame, it would give back the buffer to the page pool. Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
560c7223d6
commit
19c6f534f6
@ -410,12 +410,17 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
|
||||
dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
|
||||
|
||||
dcb_buf->used = false;
|
||||
dma_unmap_single(lan966x->dev,
|
||||
dcb_buf->dma_addr,
|
||||
dcb_buf->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (!dcb_buf->ptp)
|
||||
dev_kfree_skb_any(dcb_buf->skb);
|
||||
if (dcb_buf->use_skb) {
|
||||
dma_unmap_single(lan966x->dev,
|
||||
dcb_buf->dma_addr,
|
||||
dcb_buf->len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (!dcb_buf->ptp)
|
||||
napi_consume_skb(dcb_buf->data.skb, weight);
|
||||
} else {
|
||||
xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
|
||||
}
|
||||
|
||||
clear = true;
|
||||
}
|
||||
@ -548,6 +553,9 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
|
||||
lan966x_fdma_rx_free_page(rx);
|
||||
lan966x_fdma_rx_advance_dcb(rx);
|
||||
goto allocate_new;
|
||||
case FDMA_TX:
|
||||
lan966x_fdma_rx_advance_dcb(rx);
|
||||
continue;
|
||||
case FDMA_DROP:
|
||||
lan966x_fdma_rx_free_page(rx);
|
||||
lan966x_fdma_rx_advance_dcb(rx);
|
||||
@ -669,6 +677,62 @@ static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
|
||||
tx->last_in_use = next_to_use;
|
||||
}
|
||||
|
||||
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
|
||||
struct xdp_frame *xdpf,
|
||||
struct page *page)
|
||||
{
|
||||
struct lan966x *lan966x = port->lan966x;
|
||||
struct lan966x_tx_dcb_buf *next_dcb_buf;
|
||||
struct lan966x_tx *tx = &lan966x->tx;
|
||||
dma_addr_t dma_addr;
|
||||
int next_to_use;
|
||||
__be32 *ifh;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&lan966x->tx_lock);
|
||||
|
||||
/* Get next index */
|
||||
next_to_use = lan966x_fdma_get_next_dcb(tx);
|
||||
if (next_to_use < 0) {
|
||||
netif_stop_queue(port->dev);
|
||||
ret = NETDEV_TX_BUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Generate new IFH */
|
||||
ifh = page_address(page) + XDP_PACKET_HEADROOM;
|
||||
memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
|
||||
lan966x_ifh_set_bypass(ifh, 1);
|
||||
lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
|
||||
|
||||
dma_addr = page_pool_get_dma_addr(page);
|
||||
dma_sync_single_for_device(lan966x->dev, dma_addr + XDP_PACKET_HEADROOM,
|
||||
xdpf->len + IFH_LEN_BYTES,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
/* Setup next dcb */
|
||||
lan966x_fdma_tx_setup_dcb(tx, next_to_use, xdpf->len + IFH_LEN_BYTES,
|
||||
dma_addr + XDP_PACKET_HEADROOM);
|
||||
|
||||
/* Fill up the buffer */
|
||||
next_dcb_buf = &tx->dcbs_buf[next_to_use];
|
||||
next_dcb_buf->use_skb = false;
|
||||
next_dcb_buf->data.xdpf = xdpf;
|
||||
next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
|
||||
next_dcb_buf->dma_addr = dma_addr;
|
||||
next_dcb_buf->used = true;
|
||||
next_dcb_buf->ptp = false;
|
||||
next_dcb_buf->dev = port->dev;
|
||||
|
||||
/* Start the transmission */
|
||||
lan966x_fdma_tx_start(tx, next_to_use);
|
||||
|
||||
out:
|
||||
spin_unlock(&lan966x->tx_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
|
||||
{
|
||||
struct lan966x_port *port = netdev_priv(dev);
|
||||
@ -724,7 +788,8 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
|
||||
|
||||
/* Fill up the buffer */
|
||||
next_dcb_buf = &tx->dcbs_buf[next_to_use];
|
||||
next_dcb_buf->skb = skb;
|
||||
next_dcb_buf->use_skb = true;
|
||||
next_dcb_buf->data.skb = skb;
|
||||
next_dcb_buf->len = skb->len;
|
||||
next_dcb_buf->dma_addr = dma_addr;
|
||||
next_dcb_buf->used = true;
|
||||
|
@ -302,13 +302,13 @@ err:
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
|
||||
void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
|
||||
{
|
||||
packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
|
||||
IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
|
||||
}
|
||||
|
||||
static void lan966x_ifh_set_port(void *ifh, u64 bypass)
|
||||
void lan966x_ifh_set_port(void *ifh, u64 bypass)
|
||||
{
|
||||
packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
|
||||
IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
|
||||
|
@ -105,11 +105,13 @@ enum macaccess_entry_type {
|
||||
* FDMA_PASS, frame is valid and can be used
|
||||
* FDMA_ERROR, something went wrong, stop getting more frames
|
||||
* FDMA_DROP, frame is dropped, but continue to get more frames
|
||||
* FDMA_TX, frame is given to TX, but continue to get more frames
|
||||
*/
|
||||
enum lan966x_fdma_action {
|
||||
FDMA_PASS = 0,
|
||||
FDMA_ERROR,
|
||||
FDMA_DROP,
|
||||
FDMA_TX,
|
||||
};
|
||||
|
||||
struct lan966x_port;
|
||||
@ -175,10 +177,14 @@ struct lan966x_rx {
|
||||
struct lan966x_tx_dcb_buf {
|
||||
dma_addr_t dma_addr;
|
||||
struct net_device *dev;
|
||||
struct sk_buff *skb;
|
||||
union {
|
||||
struct sk_buff *skb;
|
||||
struct xdp_frame *xdpf;
|
||||
} data;
|
||||
u32 len;
|
||||
u32 used : 1;
|
||||
u32 ptp : 1;
|
||||
u32 use_skb : 1;
|
||||
};
|
||||
|
||||
struct lan966x_tx {
|
||||
@ -360,6 +366,8 @@ bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
|
||||
|
||||
void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
|
||||
void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
|
||||
void lan966x_ifh_set_bypass(void *ifh, u64 bypass);
|
||||
void lan966x_ifh_set_port(void *ifh, u64 bypass);
|
||||
|
||||
void lan966x_stats_get(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *stats);
|
||||
@ -460,6 +468,9 @@ u32 lan966x_ptp_get_period_ps(void);
|
||||
int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
|
||||
|
||||
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
|
||||
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
|
||||
struct xdp_frame *frame,
|
||||
struct page *page);
|
||||
int lan966x_fdma_change_mtu(struct lan966x *lan966x);
|
||||
void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
|
||||
void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
|
||||
|
@ -54,6 +54,7 @@ int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
|
||||
{
|
||||
struct bpf_prog *xdp_prog = port->xdp_prog;
|
||||
struct lan966x *lan966x = port->lan966x;
|
||||
struct xdp_frame *xdpf;
|
||||
struct xdp_buff xdp;
|
||||
u32 act;
|
||||
|
||||
@ -66,6 +67,13 @@ int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
|
||||
switch (act) {
|
||||
case XDP_PASS:
|
||||
return FDMA_PASS;
|
||||
case XDP_TX:
|
||||
xdpf = xdp_convert_buff_to_frame(&xdp);
|
||||
if (!xdpf)
|
||||
return FDMA_DROP;
|
||||
|
||||
return lan966x_fdma_xmit_xdpf(port, xdpf, page) ?
|
||||
FDMA_DROP : FDMA_TX;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(port->dev, xdp_prog, act);
|
||||
fallthrough;
|
||||
|
Loading…
x
Reference in New Issue
Block a user