Merge branch 'add-ethtool-ntuple-filters-support'
Naveen Mamindlapalli says: ==================== Add ethtool ntuple filters support This patch series adds support for ethtool ntuple filters, unicast address filtering, VLAN offload and SR-IOV ndo handlers. All of the above features are based on the Admin Function(AF) driver support to install and delete the low level MCAM entries. Each MCAM entry is programmed with the packet fields to match and what actions to take if the match succeeds. The PF driver requests AF driver to allocate set of MCAM entries to be used to install the flows by that PF. The entries will be freed when the PF driver is unloaded. * The patches 1 to 4 adds AF driver infrastructure to install and delete the low level MCAM flow entries. * Patch 5 adds ethtool ntuple filter support. * Patch 6 adds unicast MAC address filtering. * Patch 7 adds support for dumping the MCAM entries via debugfs. * Patches 8 to 10 adds support for VLAN offload. * Patch 10 to 11 adds support for SR-IOV ndo handlers. * Patch 12 adds support to read the MCAM entries. Misc: * Removed redundant mailbox NIX_RXVLAN_ALLOC. ==================== Link: https://lore.kernel.org/r/20201114195303.25967-1-naveenm@marvell.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
5cfa9a6128
@ -9,4 +9,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
|
||||
|
||||
octeontx2_mbox-y := mbox.o rvu_trace.o
|
||||
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
|
||||
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
|
||||
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o
|
||||
|
@ -162,6 +162,8 @@ enum nix_scheduler {
|
||||
#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
|
||||
#define NIX_RX_ACTIONOP_MCAST (0x3ull)
|
||||
#define NIX_RX_ACTIONOP_RSS (0x4ull)
|
||||
/* Use the RX action set in the default unicast entry */
|
||||
#define NIX_RX_ACTION_DEFAULT (0xfull)
|
||||
|
||||
/* NIX TX action operation*/
|
||||
#define NIX_TX_ACTIONOP_DROP (0x0ull)
|
||||
|
@ -188,10 +188,19 @@ M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
|
||||
npc_mcam_alloc_and_write_entry_rsp) \
|
||||
M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
|
||||
msg_req, npc_get_kex_cfg_rsp) \
|
||||
M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
|
||||
npc_install_flow_req, npc_install_flow_rsp) \
|
||||
M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
|
||||
npc_delete_flow_req, msg_rsp) \
|
||||
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
|
||||
npc_mcam_read_entry_req, \
|
||||
npc_mcam_read_entry_rsp) \
|
||||
M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
|
||||
msg_req, npc_mcam_read_base_rule_rsp) \
|
||||
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
|
||||
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
|
||||
nix_lf_alloc_req, nix_lf_alloc_rsp) \
|
||||
M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
|
||||
M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
|
||||
M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
|
||||
M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
|
||||
hwctx_disable_req, msg_rsp) \
|
||||
@ -200,7 +209,8 @@ M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
|
||||
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
|
||||
M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
|
||||
M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
|
||||
M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
|
||||
M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
|
||||
nix_vtag_config_rsp) \
|
||||
M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
|
||||
nix_rss_flowkey_cfg, \
|
||||
nix_rss_flowkey_cfg_rsp) \
|
||||
@ -216,7 +226,6 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
|
||||
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
|
||||
nix_lso_format_cfg, \
|
||||
nix_lso_format_cfg_rsp) \
|
||||
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
|
||||
M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
|
||||
M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
|
||||
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
|
||||
@ -473,6 +482,20 @@ enum nix_af_status {
|
||||
NIX_AF_ERR_LSO_CFG_FAIL = -418,
|
||||
NIX_AF_INVAL_NPA_PF_FUNC = -419,
|
||||
NIX_AF_INVAL_SSO_PF_FUNC = -420,
|
||||
NIX_AF_ERR_TX_VTAG_NOSPC = -421,
|
||||
NIX_AF_ERR_RX_VTAG_INUSE = -422,
|
||||
};
|
||||
|
||||
/* For NIX RX vtag action */
|
||||
enum nix_rx_vtag0_type {
|
||||
NIX_AF_LFX_RX_VTAG_TYPE0, /* reserved for rx vlan offload */
|
||||
NIX_AF_LFX_RX_VTAG_TYPE1,
|
||||
NIX_AF_LFX_RX_VTAG_TYPE2,
|
||||
NIX_AF_LFX_RX_VTAG_TYPE3,
|
||||
NIX_AF_LFX_RX_VTAG_TYPE4,
|
||||
NIX_AF_LFX_RX_VTAG_TYPE5,
|
||||
NIX_AF_LFX_RX_VTAG_TYPE6,
|
||||
NIX_AF_LFX_RX_VTAG_TYPE7,
|
||||
};
|
||||
|
||||
/* For NIX LF context alloc and init */
|
||||
@ -510,6 +533,13 @@ struct nix_lf_alloc_rsp {
|
||||
u8 sdp_links; /* No. of SDP links present in HW */
|
||||
};
|
||||
|
||||
struct nix_lf_free_req {
|
||||
struct mbox_msghdr hdr;
|
||||
#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
|
||||
#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
|
||||
u64 flags;
|
||||
};
|
||||
|
||||
/* NIX AQ enqueue msg */
|
||||
struct nix_aq_enq_req {
|
||||
struct mbox_msghdr hdr;
|
||||
@ -600,14 +630,40 @@ struct nix_vtag_config {
|
||||
union {
|
||||
/* valid when cfg_type is '0' */
|
||||
struct {
|
||||
/* tx vlan0 tag(C-VLAN) */
|
||||
u64 vlan0;
|
||||
/* tx vlan1 tag(S-VLAN) */
|
||||
u64 vlan1;
|
||||
/* insert tx vlan tag */
|
||||
u8 insert_vlan :1;
|
||||
/* insert tx double vlan tag */
|
||||
u8 double_vlan :1;
|
||||
u64 vtag0;
|
||||
u64 vtag1;
|
||||
|
||||
/* cfg_vtag0 & cfg_vtag1 fields are valid
|
||||
* when free_vtag0 & free_vtag1 are '0's.
|
||||
*/
|
||||
/* cfg_vtag0 = 1 to configure vtag0 */
|
||||
u8 cfg_vtag0 :1;
|
||||
/* cfg_vtag1 = 1 to configure vtag1 */
|
||||
u8 cfg_vtag1 :1;
|
||||
|
||||
/* vtag0_idx & vtag1_idx are only valid when
|
||||
* both cfg_vtag0 & cfg_vtag1 are '0's,
|
||||
* these fields are used along with free_vtag0
|
||||
* & free_vtag1 to free the nix lf's tx_vlan
|
||||
* configuration.
|
||||
*
|
||||
* Denotes the indices of tx_vtag def registers
|
||||
* that needs to be cleared and freed.
|
||||
*/
|
||||
int vtag0_idx;
|
||||
int vtag1_idx;
|
||||
|
||||
/* free_vtag0 & free_vtag1 fields are valid
|
||||
* when cfg_vtag0 & cfg_vtag1 are '0's.
|
||||
*/
|
||||
/* free_vtag0 = 1 clears vtag0 configuration
|
||||
* vtag0_idx denotes the index to be cleared.
|
||||
*/
|
||||
u8 free_vtag0 :1;
|
||||
/* free_vtag1 = 1 clears vtag1 configuration
|
||||
* vtag1_idx denotes the index to be cleared.
|
||||
*/
|
||||
u8 free_vtag1 :1;
|
||||
} tx;
|
||||
|
||||
/* valid when cfg_type is '1' */
|
||||
@ -622,6 +678,17 @@ struct nix_vtag_config {
|
||||
};
|
||||
};
|
||||
|
||||
struct nix_vtag_config_rsp {
|
||||
struct mbox_msghdr hdr;
|
||||
int vtag0_idx;
|
||||
int vtag1_idx;
|
||||
/* Indices of tx_vtag def registers used to configure
|
||||
* tx vtag0 & vtag1 headers, these indices are valid
|
||||
* when nix_vtag_config mbox requested for vtag0 and/
|
||||
* or vtag1 configuration.
|
||||
*/
|
||||
};
|
||||
|
||||
struct nix_rss_flowkey_cfg {
|
||||
struct mbox_msghdr hdr;
|
||||
int mcam_index; /* MCAM entry index to modify */
|
||||
@ -882,6 +949,87 @@ struct npc_get_kex_cfg_rsp {
|
||||
u8 mkex_pfl_name[MKEX_NAME_LEN];
|
||||
};
|
||||
|
||||
struct flow_msg {
|
||||
unsigned char dmac[6];
|
||||
unsigned char smac[6];
|
||||
__be16 etype;
|
||||
__be16 vlan_etype;
|
||||
__be16 vlan_tci;
|
||||
union {
|
||||
__be32 ip4src;
|
||||
__be32 ip6src[4];
|
||||
};
|
||||
union {
|
||||
__be32 ip4dst;
|
||||
__be32 ip6dst[4];
|
||||
};
|
||||
u8 tos;
|
||||
u8 ip_ver;
|
||||
u8 ip_proto;
|
||||
u8 tc;
|
||||
__be16 sport;
|
||||
__be16 dport;
|
||||
};
|
||||
|
||||
struct npc_install_flow_req {
|
||||
struct mbox_msghdr hdr;
|
||||
struct flow_msg packet;
|
||||
struct flow_msg mask;
|
||||
u64 features;
|
||||
u16 entry;
|
||||
u16 channel;
|
||||
u8 intf;
|
||||
u8 set_cntr; /* If counter is available set counter for this entry ? */
|
||||
u8 default_rule;
|
||||
u8 append; /* overwrite(0) or append(1) flow to default rule? */
|
||||
u16 vf;
|
||||
/* action */
|
||||
u32 index;
|
||||
u16 match_id;
|
||||
u8 flow_key_alg;
|
||||
u8 op;
|
||||
/* vtag rx action */
|
||||
u8 vtag0_type;
|
||||
u8 vtag0_valid;
|
||||
u8 vtag1_type;
|
||||
u8 vtag1_valid;
|
||||
/* vtag tx action */
|
||||
u16 vtag0_def;
|
||||
u8 vtag0_op;
|
||||
u16 vtag1_def;
|
||||
u8 vtag1_op;
|
||||
};
|
||||
|
||||
struct npc_install_flow_rsp {
|
||||
struct mbox_msghdr hdr;
|
||||
int counter; /* negative if no counter else counter number */
|
||||
};
|
||||
|
||||
struct npc_delete_flow_req {
|
||||
struct mbox_msghdr hdr;
|
||||
u16 entry;
|
||||
u16 start;/*Disable range of entries */
|
||||
u16 end;
|
||||
u8 all; /* PF + VFs */
|
||||
};
|
||||
|
||||
struct npc_mcam_read_entry_req {
|
||||
struct mbox_msghdr hdr;
|
||||
u16 entry; /* MCAM entry to read */
|
||||
};
|
||||
|
||||
struct npc_mcam_read_entry_rsp {
|
||||
struct mbox_msghdr hdr;
|
||||
struct mcam_entry entry_data;
|
||||
u8 intf;
|
||||
u8 enable;
|
||||
};
|
||||
|
||||
struct npc_mcam_read_base_rule_rsp {
|
||||
struct mbox_msghdr hdr;
|
||||
struct mcam_entry entry;
|
||||
};
|
||||
|
||||
enum ptp_op {
|
||||
PTP_OP_ADJFINE = 0,
|
||||
PTP_OP_GET_CLOCK = 1,
|
||||
|
@ -140,6 +140,63 @@ enum npc_kpu_lh_ltype {
|
||||
NPC_LT_LH_CUSTOM1 = 0xF,
|
||||
};
|
||||
|
||||
/* NPC port kind defines how the incoming or outgoing packets
|
||||
* are processed. NPC accepts packets from up to 64 pkinds.
|
||||
* Software assigns pkind for each incoming port such as CGX
|
||||
* Ethernet interfaces, LBK interfaces, etc.
|
||||
*/
|
||||
enum npc_pkind_type {
|
||||
NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
|
||||
};
|
||||
|
||||
/* list of known and supported fields in packet header and
|
||||
* fields present in key structure.
|
||||
*/
|
||||
enum key_fields {
|
||||
NPC_DMAC,
|
||||
NPC_SMAC,
|
||||
NPC_ETYPE,
|
||||
NPC_OUTER_VID,
|
||||
NPC_TOS,
|
||||
NPC_SIP_IPV4,
|
||||
NPC_DIP_IPV4,
|
||||
NPC_SIP_IPV6,
|
||||
NPC_DIP_IPV6,
|
||||
NPC_SPORT_TCP,
|
||||
NPC_DPORT_TCP,
|
||||
NPC_SPORT_UDP,
|
||||
NPC_DPORT_UDP,
|
||||
NPC_SPORT_SCTP,
|
||||
NPC_DPORT_SCTP,
|
||||
NPC_HEADER_FIELDS_MAX,
|
||||
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
|
||||
NPC_PF_FUNC, /* Valid when Tx */
|
||||
NPC_ERRLEV,
|
||||
NPC_ERRCODE,
|
||||
NPC_LXMB,
|
||||
NPC_LA,
|
||||
NPC_LB,
|
||||
NPC_LC,
|
||||
NPC_LD,
|
||||
NPC_LE,
|
||||
NPC_LF,
|
||||
NPC_LG,
|
||||
NPC_LH,
|
||||
/* Ethertype for untagged frame */
|
||||
NPC_ETYPE_ETHER,
|
||||
/* Ethertype for single tagged frame */
|
||||
NPC_ETYPE_TAG1,
|
||||
/* Ethertype for double tagged frame */
|
||||
NPC_ETYPE_TAG2,
|
||||
/* outer vlan tci for single tagged frame */
|
||||
NPC_VLAN_TAG1,
|
||||
/* outer vlan tci for double tagged frame */
|
||||
NPC_VLAN_TAG2,
|
||||
/* other header fields programmed to extract but not of our interest */
|
||||
NPC_UNKNOWN,
|
||||
NPC_KEY_FIELDS_MAX,
|
||||
};
|
||||
|
||||
struct npc_kpu_profile_cam {
|
||||
u8 state;
|
||||
u8 state_mask;
|
||||
@ -300,11 +357,63 @@ struct nix_rx_action {
|
||||
/* NPC_AF_INTFX_KEX_CFG field masks */
|
||||
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
|
||||
|
||||
/* NPC_PARSE_KEX_S nibble definitions for each field */
|
||||
#define NPC_PARSE_NIBBLE_CHAN GENMASK_ULL(2, 0)
|
||||
#define NPC_PARSE_NIBBLE_ERRLEV BIT_ULL(3)
|
||||
#define NPC_PARSE_NIBBLE_ERRCODE GENMASK_ULL(5, 4)
|
||||
#define NPC_PARSE_NIBBLE_L2L3_BCAST BIT_ULL(6)
|
||||
#define NPC_PARSE_NIBBLE_LA_FLAGS GENMASK_ULL(8, 7)
|
||||
#define NPC_PARSE_NIBBLE_LA_LTYPE BIT_ULL(9)
|
||||
#define NPC_PARSE_NIBBLE_LB_FLAGS GENMASK_ULL(11, 10)
|
||||
#define NPC_PARSE_NIBBLE_LB_LTYPE BIT_ULL(12)
|
||||
#define NPC_PARSE_NIBBLE_LC_FLAGS GENMASK_ULL(14, 13)
|
||||
#define NPC_PARSE_NIBBLE_LC_LTYPE BIT_ULL(15)
|
||||
#define NPC_PARSE_NIBBLE_LD_FLAGS GENMASK_ULL(17, 16)
|
||||
#define NPC_PARSE_NIBBLE_LD_LTYPE BIT_ULL(18)
|
||||
#define NPC_PARSE_NIBBLE_LE_FLAGS GENMASK_ULL(20, 19)
|
||||
#define NPC_PARSE_NIBBLE_LE_LTYPE BIT_ULL(21)
|
||||
#define NPC_PARSE_NIBBLE_LF_FLAGS GENMASK_ULL(23, 22)
|
||||
#define NPC_PARSE_NIBBLE_LF_LTYPE BIT_ULL(24)
|
||||
#define NPC_PARSE_NIBBLE_LG_FLAGS GENMASK_ULL(26, 25)
|
||||
#define NPC_PARSE_NIBBLE_LG_LTYPE BIT_ULL(27)
|
||||
#define NPC_PARSE_NIBBLE_LH_FLAGS GENMASK_ULL(29, 28)
|
||||
#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30)
|
||||
|
||||
struct nix_tx_action {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 rsvd_63_48 :16;
|
||||
u64 match_id :16;
|
||||
u64 index :20;
|
||||
u64 rsvd_11_8 :8;
|
||||
u64 op :4;
|
||||
#else
|
||||
u64 op :4;
|
||||
u64 rsvd_11_8 :8;
|
||||
u64 index :20;
|
||||
u64 match_id :16;
|
||||
u64 rsvd_63_48 :16;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* NIX Receive Vtag Action Structure */
|
||||
#define VTAG0_VALID_BIT BIT_ULL(15)
|
||||
#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
|
||||
#define VTAG0_LID_MASK GENMASK_ULL(10, 8)
|
||||
#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
|
||||
#define RX_VTAG0_VALID_BIT BIT_ULL(15)
|
||||
#define RX_VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
|
||||
#define RX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
|
||||
#define RX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
|
||||
#define RX_VTAG1_VALID_BIT BIT_ULL(47)
|
||||
#define RX_VTAG1_TYPE_MASK GENMASK_ULL(46, 44)
|
||||
#define RX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
|
||||
#define RX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
|
||||
|
||||
/* NIX Transmit Vtag Action Structure */
|
||||
#define TX_VTAG0_DEF_MASK GENMASK_ULL(25, 16)
|
||||
#define TX_VTAG0_OP_MASK GENMASK_ULL(13, 12)
|
||||
#define TX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
|
||||
#define TX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
|
||||
#define TX_VTAG1_DEF_MASK GENMASK_ULL(57, 48)
|
||||
#define TX_VTAG1_OP_MASK GENMASK_ULL(45, 44)
|
||||
#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
|
||||
#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
|
||||
|
||||
struct npc_mcam_kex {
|
||||
/* MKEX Profle Header */
|
||||
@ -357,4 +466,24 @@ struct npc_lt_def_cfg {
|
||||
struct npc_lt_def pck_iip4;
|
||||
};
|
||||
|
||||
struct rvu_npc_mcam_rule {
|
||||
struct flow_msg packet;
|
||||
struct flow_msg mask;
|
||||
u8 intf;
|
||||
union {
|
||||
struct nix_tx_action tx_action;
|
||||
struct nix_rx_action rx_action;
|
||||
};
|
||||
u64 vtag_action;
|
||||
struct list_head list;
|
||||
u64 features;
|
||||
u16 owner;
|
||||
u16 entry;
|
||||
u16 cntr;
|
||||
bool has_cntr;
|
||||
u8 default_rule;
|
||||
bool enable;
|
||||
bool vfvlan_cfg;
|
||||
};
|
||||
|
||||
#endif /* NPC_H */
|
||||
|
@ -148,6 +148,20 @@
|
||||
(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
|
||||
((flags_ena) << 6) | ((key_ofs) & 0x3F))
|
||||
|
||||
/* Rx parse key extract nibble enable */
|
||||
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
|
||||
NPC_PARSE_NIBBLE_LA_LTYPE | \
|
||||
NPC_PARSE_NIBBLE_LB_LTYPE | \
|
||||
NPC_PARSE_NIBBLE_LC_LTYPE | \
|
||||
NPC_PARSE_NIBBLE_LD_LTYPE | \
|
||||
NPC_PARSE_NIBBLE_LE_LTYPE)
|
||||
/* Tx parse key extract nibble enable */
|
||||
#define NPC_PARSE_NIBBLE_INTF_TX (NPC_PARSE_NIBBLE_LA_LTYPE | \
|
||||
NPC_PARSE_NIBBLE_LB_LTYPE | \
|
||||
NPC_PARSE_NIBBLE_LC_LTYPE | \
|
||||
NPC_PARSE_NIBBLE_LD_LTYPE | \
|
||||
NPC_PARSE_NIBBLE_LE_LTYPE)
|
||||
|
||||
enum npc_kpu_parser_state {
|
||||
NPC_S_NA = 0,
|
||||
NPC_S_KPU1_ETHER,
|
||||
@ -13385,9 +13399,10 @@ static struct npc_mcam_kex npc_mkex_default = {
|
||||
.name = "default",
|
||||
.kpu_version = NPC_KPU_PROFILE_VER,
|
||||
.keyx_cfg = {
|
||||
/* nibble: LA..LE (ltype only) + Channel */
|
||||
[NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x49247,
|
||||
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | ((1ULL << 19) - 1),
|
||||
/* nibble: LA..LE (ltype only) + channel */
|
||||
[NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
|
||||
/* nibble: LA..LE (ltype only) */
|
||||
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
|
||||
},
|
||||
.intf_lid_lt_ld = {
|
||||
/* Default RX MCAM KEX profile */
|
||||
@ -13405,12 +13420,14 @@ static struct npc_mcam_kex npc_mkex_default = {
|
||||
/* Layer B: Single VLAN (CTAG) */
|
||||
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
|
||||
[NPC_LT_LB_CTAG] = {
|
||||
KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4),
|
||||
KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
|
||||
},
|
||||
/* Layer B: Stacked VLAN (STAG|QinQ) */
|
||||
[NPC_LT_LB_STAG_QINQ] = {
|
||||
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
|
||||
KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4),
|
||||
/* Outer VLAN: 2 bytes, KW0[63:48] */
|
||||
KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
|
||||
/* Ethertype: 2 bytes, KW0[47:32] */
|
||||
KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
|
||||
},
|
||||
[NPC_LT_LB_FDSA] = {
|
||||
/* SWITCH PORT: 1 byte, KW0[63:48] */
|
||||
@ -13436,17 +13453,71 @@ static struct npc_mcam_kex npc_mkex_default = {
|
||||
[NPC_LID_LD] = {
|
||||
/* Layer D:UDP */
|
||||
[NPC_LT_LD_UDP] = {
|
||||
/* SPORT: 2 bytes, KW3[15:0] */
|
||||
KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
|
||||
/* DPORT: 2 bytes, KW3[31:16] */
|
||||
KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
|
||||
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
|
||||
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
|
||||
},
|
||||
/* Layer D:TCP */
|
||||
[NPC_LT_LD_TCP] = {
|
||||
/* SPORT: 2 bytes, KW3[15:0] */
|
||||
KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
|
||||
/* DPORT: 2 bytes, KW3[31:16] */
|
||||
KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
|
||||
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
|
||||
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
/* Default TX MCAM KEX profile */
|
||||
[NIX_INTF_TX] = {
|
||||
[NPC_LID_LA] = {
|
||||
/* Layer A: NIX_INST_HDR_S + Ethernet */
|
||||
/* NIX appends 8 bytes of NIX_INST_HDR_S at the
|
||||
* start of each TX packet supplied to NPC.
|
||||
*/
|
||||
[NPC_LT_LA_IH_NIX_ETHER] = {
|
||||
/* PF_FUNC: 2B , KW0 [47:32] */
|
||||
KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
|
||||
/* DMAC: 6 bytes, KW1[63:16] */
|
||||
KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
|
||||
},
|
||||
},
|
||||
[NPC_LID_LB] = {
|
||||
/* Layer B: Single VLAN (CTAG) */
|
||||
[NPC_LT_LB_CTAG] = {
|
||||
/* CTAG VLAN[2..3] KW0[63:48] */
|
||||
KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
|
||||
/* CTAG VLAN[2..3] KW1[15:0] */
|
||||
KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8),
|
||||
},
|
||||
/* Layer B: Stacked VLAN (STAG|QinQ) */
|
||||
[NPC_LT_LB_STAG_QINQ] = {
|
||||
/* Outer VLAN: 2 bytes, KW0[63:48] */
|
||||
KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
|
||||
/* Outer VLAN: 2 Bytes, KW1[15:0] */
|
||||
KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8),
|
||||
},
|
||||
},
|
||||
[NPC_LID_LC] = {
|
||||
/* Layer C: IPv4 */
|
||||
[NPC_LT_LC_IP] = {
|
||||
/* SIP+DIP: 8 bytes, KW2[63:0] */
|
||||
KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
|
||||
/* TOS: 1 byte, KW1[63:56] */
|
||||
KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
|
||||
},
|
||||
/* Layer C: IPv6 */
|
||||
[NPC_LT_LC_IP6] = {
|
||||
/* Everything up to SADDR: 8 bytes, KW2[63:0] */
|
||||
KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
|
||||
},
|
||||
},
|
||||
[NPC_LID_LD] = {
|
||||
/* Layer D:UDP */
|
||||
[NPC_LT_LD_UDP] = {
|
||||
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
|
||||
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
|
||||
},
|
||||
/* Layer D:TCP */
|
||||
[NPC_LT_LD_TCP] = {
|
||||
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
|
||||
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -727,6 +727,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
|
||||
u64 *mac;
|
||||
|
||||
for (pf = 0; pf < hw->total_pfs; pf++) {
|
||||
/* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
|
||||
if (!pf)
|
||||
goto lbkvf;
|
||||
|
||||
if (!is_pf_cgxmapped(rvu, pf))
|
||||
continue;
|
||||
/* Assign MAC address to PF */
|
||||
@ -740,8 +744,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
|
||||
} else {
|
||||
eth_random_addr(pfvf->mac_addr);
|
||||
}
|
||||
ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
|
||||
|
||||
/* Assign MAC address to VFs */
|
||||
lbkvf:
|
||||
/* Assign MAC address to VFs*/
|
||||
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
|
||||
for (vf = 0; vf < numvfs; vf++, hwvf++) {
|
||||
pfvf = &rvu->hwvf[hwvf];
|
||||
@ -754,6 +760,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
|
||||
} else {
|
||||
eth_random_addr(pfvf->mac_addr);
|
||||
}
|
||||
ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1176,6 +1183,9 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
|
||||
if (blkaddr < 0)
|
||||
return;
|
||||
|
||||
if (blktype == BLKTYPE_NIX)
|
||||
rvu_nix_reset_mac(pfvf, pcifunc);
|
||||
|
||||
block = &hw->block[blkaddr];
|
||||
|
||||
num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
|
||||
@ -2642,7 +2652,7 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
|
||||
|
||||
#define PCI_DEVID_OCTEONTX2_LBK 0xA061
|
||||
|
||||
static int lbk_get_num_chans(void)
|
||||
int rvu_get_num_lbk_chans(void)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
void __iomem *base;
|
||||
@ -2677,7 +2687,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
chans = lbk_get_num_chans();
|
||||
chans = rvu_get_num_lbk_chans();
|
||||
if (chans < 0)
|
||||
return chans;
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "rvu_struct.h"
|
||||
#include "common.h"
|
||||
#include "mbox.h"
|
||||
#include "npc.h"
|
||||
|
||||
/* PCI device IDs */
|
||||
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
|
||||
@ -105,6 +106,36 @@ struct nix_mce_list {
|
||||
int max;
|
||||
};
|
||||
|
||||
/* layer metadata to uniquely identify a packet header field */
|
||||
struct npc_layer_mdata {
|
||||
u8 lid;
|
||||
u8 ltype;
|
||||
u8 hdr;
|
||||
u8 key;
|
||||
u8 len;
|
||||
};
|
||||
|
||||
/* Structure to represent a field present in the
|
||||
* generated key. A key field may present anywhere and can
|
||||
* be of any size in the generated key. Once this structure
|
||||
* is populated for fields of interest then field's presence
|
||||
* and location (if present) can be known.
|
||||
*/
|
||||
struct npc_key_field {
|
||||
/* Masks where all set bits indicate position
|
||||
* of a field in the key
|
||||
*/
|
||||
u64 kw_mask[NPC_MAX_KWS_IN_KEY];
|
||||
/* Number of words in the key a field spans. If a field is
|
||||
* of 16 bytes and key offset is 4 then the field will use
|
||||
* 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and
|
||||
* nr_kws will be 3(KW0, KW1 and KW2).
|
||||
*/
|
||||
int nr_kws;
|
||||
/* used by packet header fields */
|
||||
struct npc_layer_mdata layer_mdata;
|
||||
};
|
||||
|
||||
struct npc_mcam {
|
||||
struct rsrc_bmap counters;
|
||||
struct mutex lock; /* MCAM entries and counters update lock */
|
||||
@ -116,6 +147,7 @@ struct npc_mcam {
|
||||
u16 *entry2cntr_map;
|
||||
u16 *cntr2pfvf_map;
|
||||
u16 *cntr_refcnt;
|
||||
u16 *entry2target_pffunc;
|
||||
u8 keysize; /* MCAM keysize 112/224/448 bits */
|
||||
u8 banks; /* Number of MCAM banks */
|
||||
u8 banks_per_entry;/* Number of keywords in key */
|
||||
@ -128,6 +160,12 @@ struct npc_mcam {
|
||||
u16 hprio_count;
|
||||
u16 hprio_end;
|
||||
u16 rx_miss_act_cntr; /* Counter for RX MISS action */
|
||||
/* fields present in the generated key */
|
||||
struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX];
|
||||
struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX];
|
||||
u64 tx_features;
|
||||
u64 rx_features;
|
||||
struct list_head mcam_rules;
|
||||
};
|
||||
|
||||
/* Structure for per RVU func info ie PF/VF */
|
||||
@ -171,16 +209,15 @@ struct rvu_pfvf {
|
||||
u16 maxlen;
|
||||
u16 minlen;
|
||||
|
||||
u8 pf_set_vf_cfg;
|
||||
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
|
||||
u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
|
||||
|
||||
/* Broadcast pkt replication info */
|
||||
u16 bcast_mce_idx;
|
||||
struct nix_mce_list bcast_mce_list;
|
||||
|
||||
/* VLAN offload */
|
||||
struct mcam_entry entry;
|
||||
int rxvlan_index;
|
||||
bool rxvlan;
|
||||
struct rvu_npc_mcam_rule *def_ucast_rule;
|
||||
|
||||
bool cgx_in_use; /* this PF/VF using CGX? */
|
||||
int cgx_users; /* number of cgx users - used only by PFs */
|
||||
@ -224,6 +261,13 @@ struct nix_lso {
|
||||
u8 in_use;
|
||||
};
|
||||
|
||||
struct nix_txvlan {
|
||||
#define NIX_TX_VTAG_DEF_MAX 0x400
|
||||
struct rsrc_bmap rsrc;
|
||||
u16 *entry2pfvf_map;
|
||||
struct mutex rsrc_lock; /* Serialize resource alloc/free */
|
||||
};
|
||||
|
||||
struct nix_hw {
|
||||
int blkaddr;
|
||||
struct rvu *rvu;
|
||||
@ -232,6 +276,7 @@ struct nix_hw {
|
||||
struct nix_flowkey flowkey;
|
||||
struct nix_mark_format mark_format;
|
||||
struct nix_lso lso;
|
||||
struct nix_txvlan txvlan;
|
||||
};
|
||||
|
||||
/* RVU block's capabilities or functionality,
|
||||
@ -445,6 +490,7 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
|
||||
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
|
||||
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
|
||||
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
|
||||
int rvu_get_num_lbk_chans(void);
|
||||
|
||||
/* RVU HW reg validation */
|
||||
enum regmap_block {
|
||||
@ -503,6 +549,7 @@ int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
|
||||
int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
|
||||
struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
|
||||
int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
|
||||
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
|
||||
|
||||
/* NPC APIs */
|
||||
int rvu_npc_init(struct rvu *rvu);
|
||||
@ -519,8 +566,8 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
|
||||
int nixlf, u64 chan);
|
||||
void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable);
|
||||
int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
|
||||
@ -535,6 +582,20 @@ bool is_npc_intf_tx(u8 intf);
|
||||
bool is_npc_intf_rx(u8 intf);
|
||||
bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
|
||||
int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
|
||||
int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel);
|
||||
int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
|
||||
const char *npc_get_field_name(u8 hdr);
|
||||
bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
|
||||
u16 pcifunc, u8 intf, struct mcam_entry *entry,
|
||||
int *entry_index);
|
||||
int npc_get_bank(struct npc_mcam *mcam, int index);
|
||||
void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
|
||||
void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
|
||||
void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
int blkaddr, int index, bool enable);
|
||||
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
int blkaddr, u16 src, struct mcam_entry *entry,
|
||||
u8 *intf, u8 *ena);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void rvu_dbg_init(struct rvu *rvu);
|
||||
|
@ -1770,6 +1770,198 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
|
||||
|
||||
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
|
||||
|
||||
static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
|
||||
struct rvu_npc_mcam_rule *rule)
|
||||
{
|
||||
u8 bit;
|
||||
|
||||
for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
|
||||
seq_printf(s, "\t%s ", npc_get_field_name(bit));
|
||||
switch (bit) {
|
||||
case NPC_DMAC:
|
||||
seq_printf(s, "%pM ", rule->packet.dmac);
|
||||
seq_printf(s, "mask %pM\n", rule->mask.dmac);
|
||||
break;
|
||||
case NPC_SMAC:
|
||||
seq_printf(s, "%pM ", rule->packet.smac);
|
||||
seq_printf(s, "mask %pM\n", rule->mask.smac);
|
||||
break;
|
||||
case NPC_ETYPE:
|
||||
seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
|
||||
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
|
||||
break;
|
||||
case NPC_OUTER_VID:
|
||||
seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci));
|
||||
seq_printf(s, "mask 0x%x\n",
|
||||
ntohs(rule->mask.vlan_tci));
|
||||
break;
|
||||
case NPC_TOS:
|
||||
seq_printf(s, "%d ", rule->packet.tos);
|
||||
seq_printf(s, "mask 0x%x\n", rule->mask.tos);
|
||||
break;
|
||||
case NPC_SIP_IPV4:
|
||||
seq_printf(s, "%pI4 ", &rule->packet.ip4src);
|
||||
seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
|
||||
break;
|
||||
case NPC_DIP_IPV4:
|
||||
seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
|
||||
seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
|
||||
break;
|
||||
case NPC_SIP_IPV6:
|
||||
seq_printf(s, "%pI6 ", rule->packet.ip6src);
|
||||
seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
|
||||
break;
|
||||
case NPC_DIP_IPV6:
|
||||
seq_printf(s, "%pI6 ", rule->packet.ip6dst);
|
||||
seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
|
||||
break;
|
||||
case NPC_SPORT_TCP:
|
||||
case NPC_SPORT_UDP:
|
||||
case NPC_SPORT_SCTP:
|
||||
seq_printf(s, "%d ", ntohs(rule->packet.sport));
|
||||
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
|
||||
break;
|
||||
case NPC_DPORT_TCP:
|
||||
case NPC_DPORT_UDP:
|
||||
case NPC_DPORT_SCTP:
|
||||
seq_printf(s, "%d ", ntohs(rule->packet.dport));
|
||||
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
|
||||
struct rvu_npc_mcam_rule *rule)
|
||||
{
|
||||
if (rule->intf == NIX_INTF_TX) {
|
||||
switch (rule->tx_action.op) {
|
||||
case NIX_TX_ACTIONOP_DROP:
|
||||
seq_puts(s, "\taction: Drop\n");
|
||||
break;
|
||||
case NIX_TX_ACTIONOP_UCAST_DEFAULT:
|
||||
seq_puts(s, "\taction: Unicast to default channel\n");
|
||||
break;
|
||||
case NIX_TX_ACTIONOP_UCAST_CHAN:
|
||||
seq_printf(s, "\taction: Unicast to channel %d\n",
|
||||
rule->tx_action.index);
|
||||
break;
|
||||
case NIX_TX_ACTIONOP_MCAST:
|
||||
seq_puts(s, "\taction: Multicast\n");
|
||||
break;
|
||||
case NIX_TX_ACTIONOP_DROP_VIOL:
|
||||
seq_puts(s, "\taction: Lockdown Violation Drop\n");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
};
|
||||
} else {
|
||||
switch (rule->rx_action.op) {
|
||||
case NIX_RX_ACTIONOP_DROP:
|
||||
seq_puts(s, "\taction: Drop\n");
|
||||
break;
|
||||
case NIX_RX_ACTIONOP_UCAST:
|
||||
seq_printf(s, "\taction: Direct to queue %d\n",
|
||||
rule->rx_action.index);
|
||||
break;
|
||||
case NIX_RX_ACTIONOP_RSS:
|
||||
seq_puts(s, "\taction: RSS\n");
|
||||
break;
|
||||
case NIX_RX_ACTIONOP_UCAST_IPSEC:
|
||||
seq_puts(s, "\taction: Unicast ipsec\n");
|
||||
break;
|
||||
case NIX_RX_ACTIONOP_MCAST:
|
||||
seq_puts(s, "\taction: Multicast\n");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
static const char *rvu_dbg_get_intf_name(int intf)
|
||||
{
|
||||
switch (intf) {
|
||||
case NIX_INTFX_RX(0):
|
||||
return "NIX0_RX";
|
||||
case NIX_INTFX_RX(1):
|
||||
return "NIX1_RX";
|
||||
case NIX_INTFX_TX(0):
|
||||
return "NIX0_TX";
|
||||
case NIX_INTFX_TX(1):
|
||||
return "NIX1_TX";
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
|
||||
{
|
||||
struct rvu_npc_mcam_rule *iter;
|
||||
struct rvu *rvu = s->private;
|
||||
struct npc_mcam *mcam;
|
||||
int pf, vf = -1;
|
||||
int blkaddr;
|
||||
u16 target;
|
||||
u64 hits;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
|
||||
if (blkaddr < 0)
|
||||
return 0;
|
||||
|
||||
mcam = &rvu->hw->mcam;
|
||||
|
||||
mutex_lock(&mcam->lock);
|
||||
list_for_each_entry(iter, &mcam->mcam_rules, list) {
|
||||
pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
|
||||
seq_printf(s, "\n\tInstalled by: PF%d ", pf);
|
||||
|
||||
if (iter->owner & RVU_PFVF_FUNC_MASK) {
|
||||
vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
|
||||
seq_printf(s, "VF%d", vf);
|
||||
}
|
||||
seq_puts(s, "\n");
|
||||
|
||||
seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
|
||||
"RX" : "TX");
|
||||
seq_printf(s, "\tinterface: %s\n",
|
||||
rvu_dbg_get_intf_name(iter->intf));
|
||||
seq_printf(s, "\tmcam entry: %d\n", iter->entry);
|
||||
|
||||
rvu_dbg_npc_mcam_show_flows(s, iter);
|
||||
if (iter->intf == NIX_INTF_RX) {
|
||||
target = iter->rx_action.pf_func;
|
||||
pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
|
||||
seq_printf(s, "\tForward to: PF%d ", pf);
|
||||
|
||||
if (target & RVU_PFVF_FUNC_MASK) {
|
||||
vf = (target & RVU_PFVF_FUNC_MASK) - 1;
|
||||
seq_printf(s, "VF%d", vf);
|
||||
}
|
||||
seq_puts(s, "\n");
|
||||
}
|
||||
|
||||
rvu_dbg_npc_mcam_show_action(s, iter);
|
||||
seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
|
||||
|
||||
if (!iter->has_cntr)
|
||||
continue;
|
||||
seq_printf(s, "\tcounter: %d\n", iter->cntr);
|
||||
|
||||
hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
|
||||
seq_printf(s, "\thits: %lld\n", hits);
|
||||
}
|
||||
mutex_unlock(&mcam->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
|
||||
|
||||
static void rvu_dbg_npc_init(struct rvu *rvu)
|
||||
{
|
||||
const struct device *dev = &rvu->pdev->dev;
|
||||
@ -1784,6 +1976,11 @@ static void rvu_dbg_npc_init(struct rvu *rvu)
|
||||
if (!pfile)
|
||||
goto create_failed;
|
||||
|
||||
pfile = debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc,
|
||||
rvu, &rvu_dbg_npc_mcam_rules_fops);
|
||||
if (!pfile)
|
||||
goto create_failed;
|
||||
|
||||
pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
|
||||
rvu, &rvu_dbg_npc_rx_miss_act_fops);
|
||||
if (!pfile)
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "npc.h"
|
||||
#include "cgx.h"
|
||||
|
||||
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
|
||||
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
|
||||
int type, int chan_id);
|
||||
|
||||
@ -302,7 +303,6 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
|
||||
|
||||
pfvf->maxlen = 0;
|
||||
pfvf->minlen = 0;
|
||||
pfvf->rxvlan = false;
|
||||
|
||||
/* Remove this PF_FUNC from bcast pkt replication list */
|
||||
err = nix_update_bcast_mce_list(rvu, pcifunc, false);
|
||||
@ -1182,6 +1182,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
||||
/* Config Rx pkt length, csum checks and apad enable / disable */
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
|
||||
|
||||
/* Configure pkind for TX parse config */
|
||||
cfg = NPC_TX_DEF_PKIND;
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
|
||||
|
||||
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
|
||||
err = nix_interface_init(rvu, pcifunc, intf, nixlf);
|
||||
if (err)
|
||||
@ -1190,6 +1194,11 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
||||
/* Disable NPC entries as NIXLF's contexts are not initialized yet */
|
||||
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
|
||||
|
||||
/* Configure RX VTAG Type 7 (strip) for vf vlan */
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
|
||||
VTAGSIZE_T4 | VTAG_STRIP);
|
||||
|
||||
goto exit;
|
||||
|
||||
free_mem:
|
||||
@ -1224,7 +1233,7 @@ exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
|
||||
int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
|
||||
struct msg_rsp *rsp)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
@ -1243,6 +1252,15 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
|
||||
if (nixlf < 0)
|
||||
return NIX_AF_ERR_AF_LF_INVALID;
|
||||
|
||||
if (req->flags & NIX_LF_DISABLE_FLOWS)
|
||||
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
|
||||
else
|
||||
rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
|
||||
|
||||
/* Free any tx vtag def entries used by this NIX LF */
|
||||
if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
|
||||
nix_free_tx_vtag_entries(rvu, pcifunc);
|
||||
|
||||
nix_interface_deinit(rvu, pcifunc, nixlf);
|
||||
|
||||
/* Reset this NIX LF */
|
||||
@ -1971,9 +1989,14 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
|
||||
{
|
||||
u64 regval = req->vtag_size;
|
||||
|
||||
if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
|
||||
if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
|
||||
req->vtag_size > VTAGSIZE_T8)
|
||||
return -EINVAL;
|
||||
|
||||
/* RX VTAG Type 7 reserved for vf vlan */
|
||||
if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
|
||||
return NIX_AF_ERR_RX_VTAG_INUSE;
|
||||
|
||||
if (req->rx.capture_vtag)
|
||||
regval |= BIT_ULL(5);
|
||||
if (req->rx.strip_vtag)
|
||||
@ -1984,9 +2007,149 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
|
||||
u16 pcifunc, int index)
|
||||
{
|
||||
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
|
||||
struct nix_txvlan *vlan = &nix_hw->txvlan;
|
||||
|
||||
if (vlan->entry2pfvf_map[index] != pcifunc)
|
||||
return NIX_AF_ERR_PARAM;
|
||||
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
|
||||
|
||||
vlan->entry2pfvf_map[index] = 0;
|
||||
rvu_free_rsrc(&vlan->rsrc, index);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
|
||||
{
|
||||
struct nix_txvlan *vlan;
|
||||
struct nix_hw *nix_hw;
|
||||
int index, blkaddr;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
|
||||
if (blkaddr < 0)
|
||||
return;
|
||||
|
||||
nix_hw = get_nix_hw(rvu->hw, blkaddr);
|
||||
vlan = &nix_hw->txvlan;
|
||||
|
||||
mutex_lock(&vlan->rsrc_lock);
|
||||
/* Scan all the entries and free the ones mapped to 'pcifunc' */
|
||||
for (index = 0; index < vlan->rsrc.max; index++) {
|
||||
if (vlan->entry2pfvf_map[index] == pcifunc)
|
||||
nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
|
||||
}
|
||||
mutex_unlock(&vlan->rsrc_lock);
|
||||
}
|
||||
|
||||
static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
|
||||
u64 vtag, u8 size)
|
||||
{
|
||||
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
|
||||
struct nix_txvlan *vlan = &nix_hw->txvlan;
|
||||
u64 regval;
|
||||
int index;
|
||||
|
||||
mutex_lock(&vlan->rsrc_lock);
|
||||
|
||||
index = rvu_alloc_rsrc(&vlan->rsrc);
|
||||
if (index < 0) {
|
||||
mutex_unlock(&vlan->rsrc_lock);
|
||||
return index;
|
||||
}
|
||||
|
||||
mutex_unlock(&vlan->rsrc_lock);
|
||||
|
||||
regval = size ? vtag : vtag << 32;
|
||||
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NIX_AF_TX_VTAG_DEFX_CTL(index), size);
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
|
||||
struct nix_vtag_config *req)
|
||||
{
|
||||
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
|
||||
struct nix_txvlan *vlan = &nix_hw->txvlan;
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
int idx0 = req->tx.vtag0_idx;
|
||||
int idx1 = req->tx.vtag1_idx;
|
||||
int err;
|
||||
|
||||
if (req->tx.free_vtag0 && req->tx.free_vtag1)
|
||||
if (vlan->entry2pfvf_map[idx0] != pcifunc ||
|
||||
vlan->entry2pfvf_map[idx1] != pcifunc)
|
||||
return NIX_AF_ERR_PARAM;
|
||||
|
||||
mutex_lock(&vlan->rsrc_lock);
|
||||
|
||||
if (req->tx.free_vtag0) {
|
||||
err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
|
||||
if (err)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (req->tx.free_vtag1)
|
||||
err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
|
||||
|
||||
exit:
|
||||
mutex_unlock(&vlan->rsrc_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
|
||||
struct nix_vtag_config *req,
|
||||
struct nix_vtag_config_rsp *rsp)
|
||||
{
|
||||
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
|
||||
struct nix_txvlan *vlan = &nix_hw->txvlan;
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
|
||||
if (req->tx.cfg_vtag0) {
|
||||
rsp->vtag0_idx =
|
||||
nix_tx_vtag_alloc(rvu, blkaddr,
|
||||
req->tx.vtag0, req->vtag_size);
|
||||
|
||||
if (rsp->vtag0_idx < 0)
|
||||
return NIX_AF_ERR_TX_VTAG_NOSPC;
|
||||
|
||||
vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
|
||||
}
|
||||
|
||||
if (req->tx.cfg_vtag1) {
|
||||
rsp->vtag1_idx =
|
||||
nix_tx_vtag_alloc(rvu, blkaddr,
|
||||
req->tx.vtag1, req->vtag_size);
|
||||
|
||||
if (rsp->vtag1_idx < 0)
|
||||
goto err_free;
|
||||
|
||||
vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
if (req->tx.cfg_vtag0)
|
||||
nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
|
||||
|
||||
return NIX_AF_ERR_TX_VTAG_NOSPC;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
|
||||
struct nix_vtag_config *req,
|
||||
struct msg_rsp *rsp)
|
||||
struct nix_vtag_config_rsp *rsp)
|
||||
{
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
int blkaddr, nixlf, err;
|
||||
@ -1996,12 +2159,21 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
|
||||
return err;
|
||||
|
||||
if (req->cfg_type) {
|
||||
/* rx vtag configuration */
|
||||
err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
|
||||
if (err)
|
||||
return NIX_AF_ERR_PARAM;
|
||||
} else {
|
||||
/* TODO: handle tx vtag configuration */
|
||||
return 0;
|
||||
/* tx vtag configuration */
|
||||
if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
|
||||
(req->tx.free_vtag0 || req->tx.free_vtag1))
|
||||
return NIX_AF_ERR_PARAM;
|
||||
|
||||
if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
|
||||
return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
|
||||
|
||||
if (req->tx.free_vtag0 || req->tx.free_vtag1)
|
||||
return nix_tx_vtag_decfg(rvu, blkaddr, req);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2239,6 +2411,31 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
|
||||
return nix_setup_bcast_tables(rvu, nix_hw);
|
||||
}
|
||||
|
||||
static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
|
||||
{
|
||||
struct nix_txvlan *vlan = &nix_hw->txvlan;
|
||||
int err;
|
||||
|
||||
/* Allocate resource bimap for tx vtag def registers*/
|
||||
vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
|
||||
err = rvu_alloc_bitmap(&vlan->rsrc);
|
||||
if (err)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
|
||||
vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
|
||||
sizeof(u16), GFP_KERNEL);
|
||||
if (!vlan->entry2pfvf_map)
|
||||
goto free_mem;
|
||||
|
||||
mutex_init(&vlan->rsrc_lock);
|
||||
return 0;
|
||||
|
||||
free_mem:
|
||||
kfree(vlan->rsrc.bmap);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
|
||||
{
|
||||
struct nix_txsch *txsch;
|
||||
@ -2743,6 +2940,7 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
|
||||
struct nix_set_mac_addr *req,
|
||||
struct msg_rsp *rsp)
|
||||
{
|
||||
bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
int blkaddr, nixlf, err;
|
||||
struct rvu_pfvf *pfvf;
|
||||
@ -2753,13 +2951,15 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
|
||||
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
|
||||
/* VF can't overwrite admin(PF) changes */
|
||||
if (from_vf && pfvf->pf_set_vf_cfg)
|
||||
return -EPERM;
|
||||
|
||||
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
|
||||
|
||||
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
|
||||
pfvf->rx_chan_base, req->mac_addr);
|
||||
|
||||
rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2806,9 +3006,6 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
|
||||
else
|
||||
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
|
||||
pfvf->rx_chan_base, allmulti);
|
||||
|
||||
rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2945,65 +3142,6 @@ linkcfg:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp)
|
||||
{
|
||||
struct npc_mcam_alloc_entry_req alloc_req = { };
|
||||
struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
|
||||
struct npc_mcam_free_entry_req free_req = { };
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
int blkaddr, nixlf, err;
|
||||
struct rvu_pfvf *pfvf;
|
||||
|
||||
/* LBK VFs do not have separate MCAM UCAST entry hence
|
||||
* skip allocating rxvlan for them
|
||||
*/
|
||||
if (is_afvf(pcifunc))
|
||||
return 0;
|
||||
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
if (pfvf->rxvlan)
|
||||
return 0;
|
||||
|
||||
/* alloc new mcam entry */
|
||||
alloc_req.hdr.pcifunc = pcifunc;
|
||||
alloc_req.count = 1;
|
||||
|
||||
err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
|
||||
&alloc_rsp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* update entry to enable rxvlan offload */
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
|
||||
if (blkaddr < 0) {
|
||||
err = NIX_AF_ERR_AF_LF_INVALID;
|
||||
goto free_entry;
|
||||
}
|
||||
|
||||
nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
|
||||
if (nixlf < 0) {
|
||||
err = NIX_AF_ERR_AF_LF_INVALID;
|
||||
goto free_entry;
|
||||
}
|
||||
|
||||
pfvf->rxvlan_index = alloc_rsp.entry_list[0];
|
||||
/* all it means is that rxvlan_index is valid */
|
||||
pfvf->rxvlan = true;
|
||||
|
||||
err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
|
||||
if (err)
|
||||
goto free_entry;
|
||||
|
||||
return 0;
|
||||
free_entry:
|
||||
free_req.hdr.pcifunc = pcifunc;
|
||||
free_req.entry = alloc_rsp.entry_list[0];
|
||||
rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
|
||||
pfvf->rxvlan = false;
|
||||
return err;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
|
||||
struct msg_rsp *rsp)
|
||||
{
|
||||
@ -3238,6 +3376,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = nix_setup_txvlan(rvu, nix_hw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Configure segmentation offload formats */
|
||||
nix_setup_lso(rvu, nix_hw, blkaddr);
|
||||
|
||||
@ -3324,6 +3466,7 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
|
||||
{
|
||||
struct nix_txsch *txsch;
|
||||
struct nix_mcast *mcast;
|
||||
struct nix_txvlan *vlan;
|
||||
struct nix_hw *nix_hw;
|
||||
int lvl;
|
||||
|
||||
@ -3339,6 +3482,11 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
|
||||
kfree(txsch->schq.bmap);
|
||||
}
|
||||
|
||||
vlan = &nix_hw->txvlan;
|
||||
kfree(vlan->rsrc.bmap);
|
||||
mutex_destroy(&vlan->rsrc_lock);
|
||||
devm_kfree(rvu->dev, vlan->entry2pfvf_map);
|
||||
|
||||
mcast = &nix_hw->mcast;
|
||||
qmem_free(rvu->dev, mcast->mce_ctx);
|
||||
qmem_free(rvu->dev, mcast->mcast_buf);
|
||||
@ -3372,6 +3520,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
|
||||
|
||||
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
|
||||
|
||||
npc_mcam_enable_flows(rvu, pcifunc);
|
||||
|
||||
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
|
||||
}
|
||||
|
||||
@ -3387,6 +3537,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
|
||||
|
||||
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
|
||||
|
||||
npc_mcam_disable_flows(rvu, pcifunc);
|
||||
|
||||
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
|
||||
}
|
||||
|
||||
@ -3399,6 +3551,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
|
||||
ctx_req.hdr.pcifunc = pcifunc;
|
||||
|
||||
/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
|
||||
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
|
||||
rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
|
||||
nix_interface_deinit(rvu, pcifunc, nixlf);
|
||||
nix_rx_sync(rvu, blkaddr);
|
||||
nix_txschq_free(rvu, pcifunc);
|
||||
@ -3522,3 +3676,12 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
|
||||
{
|
||||
bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
|
||||
|
||||
/* overwrite vf mac address with default_mac */
|
||||
if (from_vf)
|
||||
ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
|
||||
}
|
||||
|
@ -28,6 +28,8 @@
|
||||
|
||||
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
|
||||
#define NPC_HW_TSTAMP_OFFSET 8
|
||||
#define NPC_KEX_CHAN_MASK 0xFFFULL
|
||||
#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
|
||||
|
||||
static const char def_pfl_name[] = "default";
|
||||
|
||||
@ -63,6 +65,54 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int npc_mcam_verify_pf_func(struct rvu *rvu,
|
||||
struct mcam_entry *entry_data, u8 intf,
|
||||
u16 pcifunc)
|
||||
{
|
||||
u16 pf_func, pf_func_mask;
|
||||
|
||||
if (is_npc_intf_rx(intf))
|
||||
return 0;
|
||||
|
||||
pf_func_mask = (entry_data->kw_mask[0] >> 32) &
|
||||
NPC_KEX_PF_FUNC_MASK;
|
||||
pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
|
||||
|
||||
pf_func = be16_to_cpu((__force __be16)pf_func);
|
||||
if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
|
||||
((pf_func & ~RVU_PFVF_FUNC_MASK) !=
|
||||
(pcifunc & ~RVU_PFVF_FUNC_MASK)))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel)
|
||||
{
|
||||
int pf = rvu_get_pf(pcifunc);
|
||||
u8 cgx_id, lmac_id;
|
||||
int base = 0, end;
|
||||
|
||||
if (is_npc_intf_tx(intf))
|
||||
return 0;
|
||||
|
||||
if (is_afvf(pcifunc)) {
|
||||
end = rvu_get_num_lbk_chans();
|
||||
if (end < 0)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
|
||||
base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0x0);
|
||||
/* CGX mapped functions has maximum of 16 channels */
|
||||
end = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0xF);
|
||||
}
|
||||
|
||||
if (channel < base || channel > end)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
|
||||
{
|
||||
int blkaddr;
|
||||
@ -169,7 +219,7 @@ static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
|
||||
return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf);
|
||||
}
|
||||
|
||||
static int npc_get_bank(struct npc_mcam *mcam, int index)
|
||||
int npc_get_bank(struct npc_mcam *mcam, int index)
|
||||
{
|
||||
int bank = index / mcam->banksize;
|
||||
|
||||
@ -191,8 +241,8 @@ static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
return (cfg & 1);
|
||||
}
|
||||
|
||||
static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
int blkaddr, int index, bool enable)
|
||||
void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
int blkaddr, int index, bool enable)
|
||||
{
|
||||
int bank = npc_get_bank(mcam, index);
|
||||
int actbank = bank;
|
||||
@ -309,6 +359,93 @@ static void npc_get_keyword(struct mcam_entry *entry, int idx,
|
||||
*cam0 = ~*cam1 & kw_mask;
|
||||
}
|
||||
|
||||
static void npc_fill_entryword(struct mcam_entry *entry, int idx,
|
||||
u64 cam0, u64 cam1)
|
||||
{
|
||||
/* Similar to npc_get_keyword, but fills mcam_entry structure from
|
||||
* CAM registers.
|
||||
*/
|
||||
switch (idx) {
|
||||
case 0:
|
||||
entry->kw[0] = cam1;
|
||||
entry->kw_mask[0] = cam1 ^ cam0;
|
||||
break;
|
||||
case 1:
|
||||
entry->kw[1] = cam1;
|
||||
entry->kw_mask[1] = cam1 ^ cam0;
|
||||
break;
|
||||
case 2:
|
||||
entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48;
|
||||
entry->kw[2] = (cam1 >> 16) & CAM_MASK(48);
|
||||
entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
|
||||
entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48);
|
||||
break;
|
||||
case 3:
|
||||
entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48;
|
||||
entry->kw[3] = (cam1 >> 16) & CAM_MASK(32);
|
||||
entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
|
||||
entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32);
|
||||
break;
|
||||
case 4:
|
||||
entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32;
|
||||
entry->kw[4] = (cam1 >> 32) & CAM_MASK(32);
|
||||
entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
|
||||
entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32);
|
||||
break;
|
||||
case 5:
|
||||
entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32;
|
||||
entry->kw[5] = (cam1 >> 32) & CAM_MASK(16);
|
||||
entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
|
||||
entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16);
|
||||
break;
|
||||
case 6:
|
||||
entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16;
|
||||
entry->kw[6] = (cam1 >> 48) & CAM_MASK(16);
|
||||
entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
|
||||
entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16);
|
||||
break;
|
||||
case 7:
|
||||
entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16;
|
||||
entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
int blkaddr, int index,
|
||||
struct mcam_entry *entry)
|
||||
{
|
||||
u16 owner, target_func;
|
||||
struct rvu_pfvf *pfvf;
|
||||
int bank, nixlf;
|
||||
u64 rx_action;
|
||||
|
||||
owner = mcam->entry2pfvf_map[index];
|
||||
target_func = (entry->action >> 4) & 0xffff;
|
||||
/* return incase target is PF or LBK or rule owner is not PF */
|
||||
if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) ||
|
||||
!(target_func & RVU_PFVF_FUNC_MASK))
|
||||
return;
|
||||
|
||||
pfvf = rvu_get_pfvf(rvu, target_func);
|
||||
mcam->entry2target_pffunc[index] = target_func;
|
||||
/* return if nixlf is not attached or initialized */
|
||||
if (!is_nixlf_attached(rvu, target_func) || !pfvf->def_ucast_rule)
|
||||
return;
|
||||
|
||||
/* get VF ucast entry rule */
|
||||
nix_get_nixlf(rvu, target_func, &nixlf, NULL);
|
||||
index = npc_get_nixlf_mcam_index(mcam, target_func,
|
||||
nixlf, NIXLF_UCAST_ENTRY);
|
||||
bank = npc_get_bank(mcam, index);
|
||||
index &= (mcam->banksize - 1);
|
||||
|
||||
rx_action = rvu_read64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
|
||||
if (rx_action)
|
||||
entry->action = rx_action;
|
||||
}
|
||||
|
||||
static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
int blkaddr, int index, u8 intf,
|
||||
struct mcam_entry *entry, bool enable)
|
||||
@ -356,6 +493,11 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
|
||||
}
|
||||
|
||||
/* copy VF default entry action to the VF mcam entry */
|
||||
if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
|
||||
npc_get_default_entry_action(rvu, mcam, blkaddr, actindex,
|
||||
entry);
|
||||
|
||||
/* Set 'action' */
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
|
||||
@ -369,6 +511,42 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
|
||||
}
|
||||
|
||||
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
int blkaddr, u16 src,
|
||||
struct mcam_entry *entry, u8 *intf, u8 *ena)
|
||||
{
|
||||
int sbank = npc_get_bank(mcam, src);
|
||||
int bank, kw = 0;
|
||||
u64 cam0, cam1;
|
||||
|
||||
src &= (mcam->banksize - 1);
|
||||
bank = sbank;
|
||||
|
||||
for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
|
||||
cam1 = rvu_read64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1));
|
||||
cam0 = rvu_read64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0));
|
||||
npc_fill_entryword(entry, kw, cam0, cam1);
|
||||
|
||||
cam1 = rvu_read64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1));
|
||||
cam0 = rvu_read64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0));
|
||||
npc_fill_entryword(entry, kw + 1, cam0, cam1);
|
||||
}
|
||||
|
||||
entry->action = rvu_read64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
|
||||
entry->vtag_action =
|
||||
rvu_read64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
|
||||
*intf = rvu_read64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3;
|
||||
*ena = rvu_read64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1;
|
||||
}
|
||||
|
||||
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
int blkaddr, u16 src, u16 dest)
|
||||
{
|
||||
@ -423,11 +601,11 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
|
||||
int nixlf, u64 chan, u8 *mac_addr)
|
||||
{
|
||||
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
struct npc_install_flow_req req = { 0 };
|
||||
struct npc_install_flow_rsp rsp = { 0 };
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
struct mcam_entry entry = { {0} };
|
||||
struct nix_rx_action action;
|
||||
int blkaddr, index, kwi;
|
||||
u64 mac = 0;
|
||||
int blkaddr, index;
|
||||
|
||||
/* AF's VFs work in promiscuous mode */
|
||||
if (is_afvf(pcifunc))
|
||||
@ -437,20 +615,9 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
|
||||
if (blkaddr < 0)
|
||||
return;
|
||||
|
||||
for (index = ETH_ALEN - 1; index >= 0; index--)
|
||||
mac |= ((u64)*mac_addr++) << (8 * index);
|
||||
|
||||
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
|
||||
nixlf, NIXLF_UCAST_ENTRY);
|
||||
|
||||
/* Match ingress channel and DMAC */
|
||||
entry.kw[0] = chan;
|
||||
entry.kw_mask[0] = 0xFFFULL;
|
||||
|
||||
kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
|
||||
entry.kw[kwi] = mac;
|
||||
entry.kw_mask[kwi] = BIT_ULL(48) - 1;
|
||||
|
||||
/* Don't change the action if entry is already enabled
|
||||
* Otherwise RSS action may get overwritten.
|
||||
*/
|
||||
@ -463,20 +630,20 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
|
||||
action.pf_func = pcifunc;
|
||||
}
|
||||
|
||||
entry.action = *(u64 *)&action;
|
||||
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
|
||||
pfvf->nix_rx_intf, &entry, true);
|
||||
req.default_rule = 1;
|
||||
ether_addr_copy(req.packet.dmac, mac_addr);
|
||||
eth_broadcast_addr((u8 *)&req.mask.dmac);
|
||||
req.features = BIT_ULL(NPC_DMAC);
|
||||
req.channel = chan;
|
||||
req.intf = pfvf->nix_rx_intf;
|
||||
req.op = action.op;
|
||||
req.hdr.pcifunc = 0; /* AF is requester */
|
||||
req.vf = action.pf_func;
|
||||
req.index = action.index;
|
||||
req.match_id = action.match_id;
|
||||
req.flow_key_alg = action.flow_key_alg;
|
||||
|
||||
/* add VLAN matching, setup action and save entry back for later */
|
||||
entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
|
||||
entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
|
||||
|
||||
entry.vtag_action = VTAG0_VALID_BIT |
|
||||
FIELD_PREP(VTAG0_TYPE_MASK, 0) |
|
||||
FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
|
||||
FIELD_PREP(VTAG0_RELPTR_MASK, 12);
|
||||
|
||||
memcpy(&pfvf->entry, &entry, sizeof(entry));
|
||||
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
|
||||
}
|
||||
|
||||
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
|
||||
@ -632,12 +799,47 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
|
||||
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
|
||||
}
|
||||
|
||||
static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
int blkaddr, u16 pcifunc, u64 rx_action)
|
||||
{
|
||||
int actindex, index, bank;
|
||||
bool enable;
|
||||
|
||||
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
|
||||
return;
|
||||
|
||||
mutex_lock(&mcam->lock);
|
||||
for (index = 0; index < mcam->bmap_entries; index++) {
|
||||
if (mcam->entry2target_pffunc[index] == pcifunc) {
|
||||
bank = npc_get_bank(mcam, index);
|
||||
actindex = index;
|
||||
index &= (mcam->banksize - 1);
|
||||
|
||||
/* read vf flow entry enable status */
|
||||
enable = is_mcam_entry_enabled(rvu, mcam, blkaddr,
|
||||
actindex);
|
||||
/* disable before mcam entry update */
|
||||
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex,
|
||||
false);
|
||||
/* update 'action' */
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
|
||||
rx_action);
|
||||
if (enable)
|
||||
npc_enable_mcam_entry(rvu, mcam, blkaddr,
|
||||
actindex, true);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&mcam->lock);
|
||||
}
|
||||
|
||||
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
|
||||
int group, int alg_idx, int mcam_index)
|
||||
{
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
struct nix_rx_action action;
|
||||
int blkaddr, index, bank;
|
||||
struct rvu_pfvf *pfvf;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
|
||||
if (blkaddr < 0)
|
||||
@ -674,6 +876,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
|
||||
|
||||
/* update the VF flow rule action with the VF default entry action */
|
||||
if (mcam_index < 0)
|
||||
npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc,
|
||||
*(u64 *)&action);
|
||||
|
||||
/* update the action change in default rule */
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
if (pfvf->def_ucast_rule)
|
||||
pfvf->def_ucast_rule->rx_action = action;
|
||||
|
||||
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
|
||||
nixlf, NIXLF_PROMISC_ENTRY);
|
||||
|
||||
@ -688,8 +900,6 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
|
||||
NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
|
||||
*(u64 *)&action);
|
||||
}
|
||||
|
||||
rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
|
||||
}
|
||||
|
||||
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
|
||||
@ -741,8 +951,6 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
|
||||
rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
|
||||
else
|
||||
rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
|
||||
|
||||
rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
|
||||
}
|
||||
|
||||
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
|
||||
@ -757,7 +965,9 @@ void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
|
||||
|
||||
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
|
||||
{
|
||||
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
struct rvu_npc_mcam_rule *rule;
|
||||
int blkaddr;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
|
||||
@ -766,12 +976,52 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
|
||||
|
||||
mutex_lock(&mcam->lock);
|
||||
|
||||
/* Disable and free all MCAM entries mapped to this 'pcifunc' */
|
||||
/* Disable MCAM entries directing traffic to this 'pcifunc' */
|
||||
list_for_each_entry(rule, &mcam->mcam_rules, list) {
|
||||
if (is_npc_intf_rx(rule->intf) &&
|
||||
rule->rx_action.pf_func == pcifunc) {
|
||||
npc_enable_mcam_entry(rvu, mcam, blkaddr,
|
||||
rule->entry, false);
|
||||
rule->enable = false;
|
||||
/* Indicate that default rule is disabled */
|
||||
if (rule->default_rule)
|
||||
pfvf->def_ucast_rule = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&mcam->lock);
|
||||
|
||||
npc_mcam_disable_flows(rvu, pcifunc);
|
||||
|
||||
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
|
||||
}
|
||||
|
||||
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
|
||||
{
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
struct rvu_npc_mcam_rule *rule, *tmp;
|
||||
int blkaddr;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
|
||||
if (blkaddr < 0)
|
||||
return;
|
||||
|
||||
mutex_lock(&mcam->lock);
|
||||
|
||||
/* Free all MCAM entries owned by this 'pcifunc' */
|
||||
npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
|
||||
|
||||
/* Free all MCAM counters mapped to this 'pcifunc' */
|
||||
/* Free all MCAM counters owned by this 'pcifunc' */
|
||||
npc_mcam_free_all_counters(rvu, mcam, pcifunc);
|
||||
|
||||
/* Delete MCAM entries owned by this 'pcifunc' */
|
||||
list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
|
||||
if (rule->owner == pcifunc && !rule->default_rule) {
|
||||
list_del(&rule->list);
|
||||
kfree(rule);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&mcam->lock);
|
||||
|
||||
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
|
||||
@ -1181,6 +1431,12 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
|
||||
if (!mcam->cntr_refcnt)
|
||||
goto free_mem;
|
||||
|
||||
/* Alloc memory for saving target device of mcam rule */
|
||||
mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries,
|
||||
sizeof(u16), GFP_KERNEL);
|
||||
if (!mcam->entry2target_pffunc)
|
||||
goto free_mem;
|
||||
|
||||
mutex_init(&mcam->lock);
|
||||
|
||||
return 0;
|
||||
@ -1350,12 +1606,19 @@ int rvu_npc_init(struct rvu *rvu)
|
||||
|
||||
rvu_npc_setup_interfaces(rvu, blkaddr);
|
||||
|
||||
/* Configure MKEX profile */
|
||||
npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
|
||||
|
||||
err = npc_mcam_rsrcs_init(rvu, blkaddr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Configure MKEX profile */
|
||||
npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
|
||||
err = npc_flow_steering_init(rvu, blkaddr);
|
||||
if (err) {
|
||||
dev_err(rvu->dev,
|
||||
"Incorrect mkex profile loaded using default mkex\n");
|
||||
npc_load_mkex_profile(rvu, blkaddr, def_pfl_name);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1523,6 +1786,7 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
npc_unmap_mcam_entry_and_cntr(rvu, mcam,
|
||||
blkaddr, index,
|
||||
cntr);
|
||||
mcam->entry2target_pffunc[index] = 0x0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1909,6 +2173,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
|
||||
goto exit;
|
||||
|
||||
mcam->entry2pfvf_map[req->entry] = 0;
|
||||
mcam->entry2target_pffunc[req->entry] = 0x0;
|
||||
npc_mcam_clear_bit(mcam, req->entry);
|
||||
npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
|
||||
|
||||
@ -1928,6 +2193,30 @@ exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu,
|
||||
struct npc_mcam_read_entry_req *req,
|
||||
struct npc_mcam_read_entry_rsp *rsp)
|
||||
{
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
int blkaddr, rc;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
|
||||
if (blkaddr < 0)
|
||||
return NPC_MCAM_INVALID_REQ;
|
||||
|
||||
mutex_lock(&mcam->lock);
|
||||
rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
|
||||
if (!rc) {
|
||||
npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry,
|
||||
&rsp->entry_data,
|
||||
&rsp->intf, &rsp->enable);
|
||||
}
|
||||
|
||||
mutex_unlock(&mcam->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
|
||||
struct npc_mcam_write_entry_req *req,
|
||||
struct msg_rsp *rsp)
|
||||
@ -1935,6 +2224,7 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
|
||||
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
u16 channel, chan_mask;
|
||||
int blkaddr, rc;
|
||||
u8 nix_intf;
|
||||
|
||||
@ -1942,6 +2232,10 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
|
||||
if (blkaddr < 0)
|
||||
return NPC_MCAM_INVALID_REQ;
|
||||
|
||||
chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
|
||||
channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK;
|
||||
channel &= chan_mask;
|
||||
|
||||
mutex_lock(&mcam->lock);
|
||||
rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
|
||||
if (rc)
|
||||
@ -1963,6 +2257,17 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
|
||||
else
|
||||
nix_intf = pfvf->nix_rx_intf;
|
||||
|
||||
if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
|
||||
rc = NPC_MCAM_INVALID_REQ;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
|
||||
pcifunc)) {
|
||||
rc = NPC_MCAM_INVALID_REQ;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf,
|
||||
&req->entry_data, req->enable_entry);
|
||||
|
||||
@ -2299,6 +2604,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
u16 entry = NPC_MCAM_ENTRY_INVALID;
|
||||
u16 cntr = NPC_MCAM_ENTRY_INVALID;
|
||||
u16 channel, chan_mask;
|
||||
int blkaddr, rc;
|
||||
u8 nix_intf;
|
||||
|
||||
@ -2309,6 +2615,17 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
|
||||
if (!is_npc_interface_valid(rvu, req->intf))
|
||||
return NPC_MCAM_INVALID_REQ;
|
||||
|
||||
chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
|
||||
channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK;
|
||||
channel &= chan_mask;
|
||||
|
||||
if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel))
|
||||
return NPC_MCAM_INVALID_REQ;
|
||||
|
||||
if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
|
||||
req->hdr.pcifunc))
|
||||
return NPC_MCAM_INVALID_REQ;
|
||||
|
||||
/* Try to allocate a MCAM entry */
|
||||
entry_req.hdr.pcifunc = req->hdr.pcifunc;
|
||||
entry_req.contig = true;
|
||||
@ -2413,26 +2730,72 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
|
||||
bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
|
||||
u16 pcifunc, u8 intf, struct mcam_entry *entry,
|
||||
int *index)
|
||||
{
|
||||
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
int blkaddr, index;
|
||||
bool enable;
|
||||
u8 nix_intf;
|
||||
|
||||
if (is_npc_intf_tx(intf))
|
||||
nix_intf = pfvf->nix_tx_intf;
|
||||
else
|
||||
nix_intf = pfvf->nix_rx_intf;
|
||||
|
||||
*index = npc_get_nixlf_mcam_index(mcam, pcifunc,
|
||||
nixlf, NIXLF_UCAST_ENTRY);
|
||||
/* dont force enable unicast entry */
|
||||
enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, *index);
|
||||
npc_config_mcam_entry(rvu, mcam, blkaddr, *index, nix_intf,
|
||||
entry, enable);
|
||||
|
||||
return enable;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
|
||||
struct msg_req *req,
|
||||
struct npc_mcam_read_base_rule_rsp *rsp)
|
||||
{
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
int index, blkaddr, nixlf, rc = 0;
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
struct rvu_pfvf *pfvf;
|
||||
u8 intf, enable;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
|
||||
if (blkaddr < 0)
|
||||
return NIX_AF_ERR_AF_LF_INVALID;
|
||||
return NPC_MCAM_INVALID_REQ;
|
||||
|
||||
if (!pfvf->rxvlan)
|
||||
return 0;
|
||||
/* Return the channel number in case of PF */
|
||||
if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
rsp->entry.kw[0] = pfvf->rx_chan_base;
|
||||
rsp->entry.kw_mask[0] = 0xFFFULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Find the pkt steering rule installed by PF to this VF */
|
||||
mutex_lock(&mcam->lock);
|
||||
for (index = 0; index < mcam->bmap_entries; index++) {
|
||||
if (mcam->entry2target_pffunc[index] == pcifunc)
|
||||
goto read_entry;
|
||||
}
|
||||
|
||||
rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
|
||||
if (rc < 0) {
|
||||
mutex_unlock(&mcam->lock);
|
||||
goto out;
|
||||
}
|
||||
/* Read the default ucast entry if there is no pkt steering rule */
|
||||
index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
|
||||
NIXLF_UCAST_ENTRY);
|
||||
pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
|
||||
enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
|
||||
npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
|
||||
pfvf->nix_rx_intf, &pfvf->entry, enable);
|
||||
|
||||
return 0;
|
||||
read_entry:
|
||||
/* Read the mcam entry */
|
||||
npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf,
|
||||
&enable);
|
||||
mutex_unlock(&mcam->lock);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
1334
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
Normal file
1334
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -921,4 +921,15 @@ enum nix_vtag_size {
|
||||
VTAGSIZE_T4 = 0x0,
|
||||
VTAGSIZE_T8 = 0x1,
|
||||
};
|
||||
|
||||
enum nix_tx_vtag_op {
|
||||
NOP = 0x0,
|
||||
VTAG_INSERT = 0x1,
|
||||
VTAG_REPLACE = 0x2,
|
||||
};
|
||||
|
||||
/* NIX RX VTAG actions */
|
||||
#define VTAG_STRIP BIT_ULL(4)
|
||||
#define VTAG_CAPTURE BIT_ULL(5)
|
||||
|
||||
#endif /* RVU_STRUCT_H */
|
||||
|
@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
|
||||
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
|
||||
|
||||
octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
|
||||
otx2_ptp.o
|
||||
otx2_ptp.o otx2_flows.o
|
||||
octeontx2_nicvf-y := otx2_vf.o
|
||||
|
||||
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
|
||||
|
@ -191,10 +191,14 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
|
||||
if (!is_valid_ether_addr(addr->sa_data))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
|
||||
if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
|
||||
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
|
||||
else
|
||||
/* update dmac field in vlan offload rule */
|
||||
if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
|
||||
otx2_install_rxvlan_offload_flow(pfvf);
|
||||
} else {
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/timecounter.h>
|
||||
|
||||
#include <mbox.h>
|
||||
#include <npc.h>
|
||||
#include "otx2_reg.h"
|
||||
#include "otx2_txrx.h"
|
||||
#include <rvu_trace.h>
|
||||
@ -205,6 +206,9 @@ struct otx2_vf_config {
|
||||
struct otx2_nic *pf;
|
||||
struct delayed_work link_event_work;
|
||||
bool intf_down; /* interface was either configured or not */
|
||||
u8 mac[ETH_ALEN];
|
||||
u16 vlan;
|
||||
int tx_vtag_idx;
|
||||
};
|
||||
|
||||
struct flr_work {
|
||||
@ -228,6 +232,32 @@ struct otx2_ptp {
|
||||
|
||||
#define OTX2_HW_TIMESTAMP_LEN 8
|
||||
|
||||
struct otx2_mac_table {
|
||||
u8 addr[ETH_ALEN];
|
||||
u16 mcam_entry;
|
||||
bool inuse;
|
||||
};
|
||||
|
||||
struct otx2_flow_config {
|
||||
u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
|
||||
u32 nr_flows;
|
||||
#define OTX2_MAX_NTUPLE_FLOWS 32
|
||||
#define OTX2_MAX_UNICAST_FLOWS 8
|
||||
#define OTX2_MAX_VLAN_FLOWS 1
|
||||
#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
|
||||
OTX2_MAX_UNICAST_FLOWS + \
|
||||
OTX2_MAX_VLAN_FLOWS)
|
||||
u32 ntuple_offset;
|
||||
u32 unicast_offset;
|
||||
u32 rx_vlan_offset;
|
||||
u32 vf_vlan_offset;
|
||||
#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
|
||||
#define OTX2_VF_VLAN_RX_INDEX 0
|
||||
#define OTX2_VF_VLAN_TX_INDEX 1
|
||||
u32 ntuple_max_flows;
|
||||
struct list_head flow_list;
|
||||
};
|
||||
|
||||
struct otx2_nic {
|
||||
void __iomem *reg_base;
|
||||
struct net_device *netdev;
|
||||
@ -238,6 +268,12 @@ struct otx2_nic {
|
||||
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
|
||||
#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
|
||||
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
|
||||
#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
|
||||
#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
|
||||
#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
|
||||
#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
|
||||
#define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
|
||||
#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
|
||||
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
|
||||
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
|
||||
u64 flags;
|
||||
@ -266,6 +302,7 @@ struct otx2_nic {
|
||||
struct refill_work *refill_wrk;
|
||||
struct workqueue_struct *otx2_wq;
|
||||
struct work_struct rx_mode_work;
|
||||
struct otx2_mac_table *mac_table;
|
||||
|
||||
/* Ethtool stuff */
|
||||
u32 msg_enable;
|
||||
@ -275,6 +312,8 @@ struct otx2_nic {
|
||||
|
||||
struct otx2_ptp *ptp;
|
||||
struct hwtstamp_config tstamp;
|
||||
|
||||
struct otx2_flow_config *flow_cfg;
|
||||
};
|
||||
|
||||
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
|
||||
@ -644,4 +683,24 @@ int otx2_open(struct net_device *netdev);
|
||||
int otx2_stop(struct net_device *netdev);
|
||||
int otx2_set_real_num_queues(struct net_device *netdev,
|
||||
int tx_queues, int rx_queues);
|
||||
/* MCAM filter related APIs */
|
||||
int otx2_mcam_flow_init(struct otx2_nic *pf);
|
||||
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf);
|
||||
void otx2_mcam_flow_del(struct otx2_nic *pf);
|
||||
int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
|
||||
int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
|
||||
int otx2_get_flow(struct otx2_nic *pfvf,
|
||||
struct ethtool_rxnfc *nfc, u32 location);
|
||||
int otx2_get_all_flows(struct otx2_nic *pfvf,
|
||||
struct ethtool_rxnfc *nfc, u32 *rule_locs);
|
||||
int otx2_add_flow(struct otx2_nic *pfvf,
|
||||
struct ethtool_rx_flow_spec *fsp);
|
||||
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
|
||||
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
|
||||
struct npc_install_flow_req *req);
|
||||
int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
|
||||
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
|
||||
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
|
||||
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
|
||||
|
||||
#endif /* OTX2_COMMON_H */
|
||||
|
@ -551,6 +551,16 @@ static int otx2_get_rxnfc(struct net_device *dev,
|
||||
nfc->data = pfvf->hw.rx_queues;
|
||||
ret = 0;
|
||||
break;
|
||||
case ETHTOOL_GRXCLSRLCNT:
|
||||
nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
|
||||
ret = 0;
|
||||
break;
|
||||
case ETHTOOL_GRXCLSRULE:
|
||||
ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
|
||||
break;
|
||||
case ETHTOOL_GRXCLSRLALL:
|
||||
ret = otx2_get_all_flows(pfvf, nfc, rules);
|
||||
break;
|
||||
case ETHTOOL_GRXFH:
|
||||
return otx2_get_rss_hash_opts(pfvf, nfc);
|
||||
default:
|
||||
@ -560,6 +570,50 @@ static int otx2_get_rxnfc(struct net_device *dev,
|
||||
}
|
||||
|
||||
static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
|
||||
{
|
||||
bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
|
||||
struct otx2_nic *pfvf = netdev_priv(dev);
|
||||
int ret = -EOPNOTSUPP;
|
||||
|
||||
switch (nfc->cmd) {
|
||||
case ETHTOOL_SRXFH:
|
||||
ret = otx2_set_rss_hash_opts(pfvf, nfc);
|
||||
break;
|
||||
case ETHTOOL_SRXCLSRLINS:
|
||||
if (netif_running(dev) && ntuple)
|
||||
ret = otx2_add_flow(pfvf, &nfc->fs);
|
||||
break;
|
||||
case ETHTOOL_SRXCLSRLDEL:
|
||||
if (netif_running(dev) && ntuple)
|
||||
ret = otx2_remove_flow(pfvf, nfc->fs.location);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int otx2vf_get_rxnfc(struct net_device *dev,
|
||||
struct ethtool_rxnfc *nfc, u32 *rules)
|
||||
{
|
||||
struct otx2_nic *pfvf = netdev_priv(dev);
|
||||
int ret = -EOPNOTSUPP;
|
||||
|
||||
switch (nfc->cmd) {
|
||||
case ETHTOOL_GRXRINGS:
|
||||
nfc->data = pfvf->hw.rx_queues;
|
||||
ret = 0;
|
||||
break;
|
||||
case ETHTOOL_GRXFH:
|
||||
return otx2_get_rss_hash_opts(pfvf, nfc);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
|
||||
{
|
||||
struct otx2_nic *pfvf = netdev_priv(dev);
|
||||
int ret = -EOPNOTSUPP;
|
||||
@ -806,8 +860,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
|
||||
.get_sset_count = otx2vf_get_sset_count,
|
||||
.set_channels = otx2_set_channels,
|
||||
.get_channels = otx2_get_channels,
|
||||
.get_rxnfc = otx2_get_rxnfc,
|
||||
.set_rxnfc = otx2_set_rxnfc,
|
||||
.get_rxnfc = otx2vf_get_rxnfc,
|
||||
.set_rxnfc = otx2vf_set_rxnfc,
|
||||
.get_rxfh_key_size = otx2_get_rxfh_key_size,
|
||||
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
|
||||
.get_rxfh = otx2_get_rxfh,
|
||||
|
820
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
Normal file
820
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
Normal file
@ -0,0 +1,820 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
|
||||
*
|
||||
* Copyright (C) 2020 Marvell.
|
||||
*/
|
||||
|
||||
#include <net/ipv6.h>
|
||||
|
||||
#include "otx2_common.h"
|
||||
|
||||
#define OTX2_DEFAULT_ACTION 0x1
|
||||
|
||||
struct otx2_flow {
|
||||
struct ethtool_rx_flow_spec flow_spec;
|
||||
struct list_head list;
|
||||
u32 location;
|
||||
u16 entry;
|
||||
bool is_vf;
|
||||
int vf;
|
||||
};
|
||||
|
||||
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
|
||||
{
|
||||
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
|
||||
struct npc_mcam_alloc_entry_req *req;
|
||||
struct npc_mcam_alloc_entry_rsp *rsp;
|
||||
int vf_vlan_max_flows;
|
||||
int i;
|
||||
|
||||
mutex_lock(&pfvf->mbox.lock);
|
||||
|
||||
req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
|
||||
if (!req) {
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
|
||||
req->contig = false;
|
||||
req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
|
||||
|
||||
/* Send message to AF */
|
||||
if (otx2_sync_mbox_msg(&pfvf->mbox)) {
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
|
||||
(&pfvf->mbox.mbox, 0, &req->hdr);
|
||||
|
||||
if (rsp->count != req->count) {
|
||||
netdev_info(pfvf->netdev,
|
||||
"Unable to allocate %d MCAM entries, got %d\n",
|
||||
req->count, rsp->count);
|
||||
/* support only ntuples here */
|
||||
flow_cfg->ntuple_max_flows = rsp->count;
|
||||
flow_cfg->ntuple_offset = 0;
|
||||
pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
|
||||
} else {
|
||||
flow_cfg->vf_vlan_offset = 0;
|
||||
flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
|
||||
vf_vlan_max_flows;
|
||||
flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
|
||||
OTX2_MAX_NTUPLE_FLOWS;
|
||||
flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
|
||||
OTX2_MAX_UNICAST_FLOWS;
|
||||
pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
|
||||
pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
|
||||
pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
|
||||
pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
|
||||
}
|
||||
|
||||
for (i = 0; i < rsp->count; i++)
|
||||
flow_cfg->entry[i] = rsp->entry_list[i];
|
||||
|
||||
pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
|
||||
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int otx2_mcam_flow_init(struct otx2_nic *pf)
|
||||
{
|
||||
int err;
|
||||
|
||||
pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
|
||||
GFP_KERNEL);
|
||||
if (!pf->flow_cfg)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
|
||||
|
||||
pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
|
||||
|
||||
err = otx2_alloc_mcam_entries(pf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
|
||||
* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
|
||||
if (!pf->mac_table)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void otx2_mcam_flow_del(struct otx2_nic *pf)
|
||||
{
|
||||
otx2_destroy_mcam_flows(pf);
|
||||
}
|
||||
|
||||
/* On success adds mcam entry
|
||||
* On failure enable promisous mode
|
||||
*/
|
||||
static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
|
||||
{
|
||||
struct otx2_flow_config *flow_cfg = pf->flow_cfg;
|
||||
struct npc_install_flow_req *req;
|
||||
int err, i;
|
||||
|
||||
if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
|
||||
return -ENOMEM;
|
||||
|
||||
/* dont have free mcam entries or uc list is greater than alloted */
|
||||
if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&pf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
|
||||
if (!req) {
|
||||
mutex_unlock(&pf->mbox.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* unicast offset starts with 32 0..31 for ntuple */
|
||||
for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
|
||||
if (pf->mac_table[i].inuse)
|
||||
continue;
|
||||
ether_addr_copy(pf->mac_table[i].addr, mac);
|
||||
pf->mac_table[i].inuse = true;
|
||||
pf->mac_table[i].mcam_entry =
|
||||
flow_cfg->entry[i + flow_cfg->unicast_offset];
|
||||
req->entry = pf->mac_table[i].mcam_entry;
|
||||
break;
|
||||
}
|
||||
|
||||
ether_addr_copy(req->packet.dmac, mac);
|
||||
eth_broadcast_addr((u8 *)&req->mask.dmac);
|
||||
req->features = BIT_ULL(NPC_DMAC);
|
||||
req->channel = pf->hw.rx_chan_base;
|
||||
req->intf = NIX_INTF_RX;
|
||||
req->op = NIX_RX_ACTION_DEFAULT;
|
||||
req->set_cntr = 1;
|
||||
|
||||
err = otx2_sync_mbox_msg(&pf->mbox);
|
||||
mutex_unlock(&pf->mbox.lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
|
||||
{
|
||||
struct otx2_nic *pf = netdev_priv(netdev);
|
||||
|
||||
return otx2_do_add_macfilter(pf, mac);
|
||||
}
|
||||
|
||||
static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
|
||||
int *mcam_entry)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
|
||||
if (!pf->mac_table[i].inuse)
|
||||
continue;
|
||||
|
||||
if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
|
||||
*mcam_entry = pf->mac_table[i].mcam_entry;
|
||||
pf->mac_table[i].inuse = false;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
|
||||
{
|
||||
struct otx2_nic *pf = netdev_priv(netdev);
|
||||
struct npc_delete_flow_req *req;
|
||||
int err, mcam_entry;
|
||||
|
||||
/* check does mcam entry exists for given mac */
|
||||
if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&pf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
|
||||
if (!req) {
|
||||
mutex_unlock(&pf->mbox.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
req->entry = mcam_entry;
|
||||
/* Send message to AF */
|
||||
err = otx2_sync_mbox_msg(&pf->mbox);
|
||||
mutex_unlock(&pf->mbox.lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
|
||||
{
|
||||
struct otx2_flow *iter;
|
||||
|
||||
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
|
||||
if (iter->location == location)
|
||||
return iter;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
|
||||
{
|
||||
struct list_head *head = &pfvf->flow_cfg->flow_list;
|
||||
struct otx2_flow *iter;
|
||||
|
||||
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
|
||||
if (iter->location > flow->location)
|
||||
break;
|
||||
head = &iter->list;
|
||||
}
|
||||
|
||||
list_add(&flow->list, head);
|
||||
}
|
||||
|
||||
int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
|
||||
u32 location)
|
||||
{
|
||||
struct otx2_flow *iter;
|
||||
|
||||
if (location >= pfvf->flow_cfg->ntuple_max_flows)
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
|
||||
if (iter->location == location) {
|
||||
nfc->fs = iter->flow_spec;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
|
||||
u32 *rule_locs)
|
||||
{
|
||||
u32 location = 0;
|
||||
int idx = 0;
|
||||
int err = 0;
|
||||
|
||||
nfc->data = pfvf->flow_cfg->ntuple_max_flows;
|
||||
while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
|
||||
err = otx2_get_flow(pfvf, nfc, location);
|
||||
if (!err)
|
||||
rule_locs[idx++] = location;
|
||||
location++;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
|
||||
struct npc_install_flow_req *req,
|
||||
u32 flow_type)
|
||||
{
|
||||
struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
|
||||
struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
|
||||
struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
|
||||
struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
|
||||
struct flow_msg *pmask = &req->mask;
|
||||
struct flow_msg *pkt = &req->packet;
|
||||
|
||||
switch (flow_type) {
|
||||
case IP_USER_FLOW:
|
||||
if (ipv4_usr_mask->ip4src) {
|
||||
memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
|
||||
sizeof(pkt->ip4src));
|
||||
memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
|
||||
sizeof(pmask->ip4src));
|
||||
req->features |= BIT_ULL(NPC_SIP_IPV4);
|
||||
}
|
||||
if (ipv4_usr_mask->ip4dst) {
|
||||
memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
|
||||
sizeof(pkt->ip4dst));
|
||||
memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
|
||||
sizeof(pmask->ip4dst));
|
||||
req->features |= BIT_ULL(NPC_DIP_IPV4);
|
||||
}
|
||||
break;
|
||||
case TCP_V4_FLOW:
|
||||
case UDP_V4_FLOW:
|
||||
case SCTP_V4_FLOW:
|
||||
if (ipv4_l4_mask->ip4src) {
|
||||
memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
|
||||
sizeof(pkt->ip4src));
|
||||
memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
|
||||
sizeof(pmask->ip4src));
|
||||
req->features |= BIT_ULL(NPC_SIP_IPV4);
|
||||
}
|
||||
if (ipv4_l4_mask->ip4dst) {
|
||||
memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
|
||||
sizeof(pkt->ip4dst));
|
||||
memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
|
||||
sizeof(pmask->ip4dst));
|
||||
req->features |= BIT_ULL(NPC_DIP_IPV4);
|
||||
}
|
||||
if (ipv4_l4_mask->psrc) {
|
||||
memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
|
||||
sizeof(pkt->sport));
|
||||
memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
|
||||
sizeof(pmask->sport));
|
||||
if (flow_type == UDP_V4_FLOW)
|
||||
req->features |= BIT_ULL(NPC_SPORT_UDP);
|
||||
else if (flow_type == TCP_V4_FLOW)
|
||||
req->features |= BIT_ULL(NPC_SPORT_TCP);
|
||||
else
|
||||
req->features |= BIT_ULL(NPC_SPORT_SCTP);
|
||||
}
|
||||
if (ipv4_l4_mask->pdst) {
|
||||
memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
|
||||
sizeof(pkt->dport));
|
||||
memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
|
||||
sizeof(pmask->dport));
|
||||
if (flow_type == UDP_V4_FLOW)
|
||||
req->features |= BIT_ULL(NPC_DPORT_UDP);
|
||||
else if (flow_type == TCP_V4_FLOW)
|
||||
req->features |= BIT_ULL(NPC_DPORT_TCP);
|
||||
else
|
||||
req->features |= BIT_ULL(NPC_DPORT_SCTP);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
|
||||
struct npc_install_flow_req *req,
|
||||
u32 flow_type)
|
||||
{
|
||||
struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
|
||||
struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
|
||||
struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
|
||||
struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
|
||||
struct flow_msg *pmask = &req->mask;
|
||||
struct flow_msg *pkt = &req->packet;
|
||||
|
||||
switch (flow_type) {
|
||||
case IPV6_USER_FLOW:
|
||||
if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
|
||||
memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
|
||||
sizeof(pkt->ip6src));
|
||||
memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
|
||||
sizeof(pmask->ip6src));
|
||||
req->features |= BIT_ULL(NPC_SIP_IPV6);
|
||||
}
|
||||
if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
|
||||
memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
|
||||
sizeof(pkt->ip6dst));
|
||||
memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
|
||||
sizeof(pmask->ip6dst));
|
||||
req->features |= BIT_ULL(NPC_DIP_IPV6);
|
||||
}
|
||||
break;
|
||||
case TCP_V6_FLOW:
|
||||
case UDP_V6_FLOW:
|
||||
case SCTP_V6_FLOW:
|
||||
if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
|
||||
memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
|
||||
sizeof(pkt->ip6src));
|
||||
memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
|
||||
sizeof(pmask->ip6src));
|
||||
req->features |= BIT_ULL(NPC_SIP_IPV6);
|
||||
}
|
||||
if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
|
||||
memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
|
||||
sizeof(pkt->ip6dst));
|
||||
memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
|
||||
sizeof(pmask->ip6dst));
|
||||
req->features |= BIT_ULL(NPC_DIP_IPV6);
|
||||
}
|
||||
if (ipv6_l4_mask->psrc) {
|
||||
memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
|
||||
sizeof(pkt->sport));
|
||||
memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
|
||||
sizeof(pmask->sport));
|
||||
if (flow_type == UDP_V6_FLOW)
|
||||
req->features |= BIT_ULL(NPC_SPORT_UDP);
|
||||
else if (flow_type == TCP_V6_FLOW)
|
||||
req->features |= BIT_ULL(NPC_SPORT_TCP);
|
||||
else
|
||||
req->features |= BIT_ULL(NPC_SPORT_SCTP);
|
||||
}
|
||||
if (ipv6_l4_mask->pdst) {
|
||||
memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
|
||||
sizeof(pkt->dport));
|
||||
memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
|
||||
sizeof(pmask->dport));
|
||||
if (flow_type == UDP_V6_FLOW)
|
||||
req->features |= BIT_ULL(NPC_DPORT_UDP);
|
||||
else if (flow_type == TCP_V6_FLOW)
|
||||
req->features |= BIT_ULL(NPC_DPORT_TCP);
|
||||
else
|
||||
req->features |= BIT_ULL(NPC_DPORT_SCTP);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
|
||||
struct npc_install_flow_req *req)
|
||||
{
|
||||
struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
|
||||
struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
|
||||
struct flow_msg *pmask = &req->mask;
|
||||
struct flow_msg *pkt = &req->packet;
|
||||
u32 flow_type;
|
||||
|
||||
flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
|
||||
switch (flow_type) {
|
||||
/* bits not set in mask are don't care */
|
||||
case ETHER_FLOW:
|
||||
if (!is_zero_ether_addr(eth_mask->h_source)) {
|
||||
ether_addr_copy(pkt->smac, eth_hdr->h_source);
|
||||
ether_addr_copy(pmask->smac, eth_mask->h_source);
|
||||
req->features |= BIT_ULL(NPC_SMAC);
|
||||
}
|
||||
if (!is_zero_ether_addr(eth_mask->h_dest)) {
|
||||
ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
|
||||
ether_addr_copy(pmask->dmac, eth_mask->h_dest);
|
||||
req->features |= BIT_ULL(NPC_DMAC);
|
||||
}
|
||||
if (eth_mask->h_proto) {
|
||||
memcpy(&pkt->etype, ð_hdr->h_proto,
|
||||
sizeof(pkt->etype));
|
||||
memcpy(&pmask->etype, ð_mask->h_proto,
|
||||
sizeof(pmask->etype));
|
||||
req->features |= BIT_ULL(NPC_ETYPE);
|
||||
}
|
||||
break;
|
||||
case IP_USER_FLOW:
|
||||
case TCP_V4_FLOW:
|
||||
case UDP_V4_FLOW:
|
||||
case SCTP_V4_FLOW:
|
||||
otx2_prepare_ipv4_flow(fsp, req, flow_type);
|
||||
break;
|
||||
case IPV6_USER_FLOW:
|
||||
case TCP_V6_FLOW:
|
||||
case UDP_V6_FLOW:
|
||||
case SCTP_V6_FLOW:
|
||||
otx2_prepare_ipv6_flow(fsp, req, flow_type);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (fsp->flow_type & FLOW_EXT) {
|
||||
if (fsp->m_ext.vlan_etype)
|
||||
return -EINVAL;
|
||||
if (fsp->m_ext.vlan_tci) {
|
||||
if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
|
||||
return -EINVAL;
|
||||
if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
|
||||
sizeof(pkt->vlan_tci));
|
||||
memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
|
||||
sizeof(pmask->vlan_tci));
|
||||
req->features |= BIT_ULL(NPC_OUTER_VID);
|
||||
}
|
||||
|
||||
/* Not Drop/Direct to queue but use action in default entry */
|
||||
if (fsp->m_ext.data[1] &&
|
||||
fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
|
||||
req->op = NIX_RX_ACTION_DEFAULT;
|
||||
}
|
||||
|
||||
if (fsp->flow_type & FLOW_MAC_EXT &&
|
||||
!is_zero_ether_addr(fsp->m_ext.h_dest)) {
|
||||
ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
|
||||
ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
|
||||
req->features |= BIT_ULL(NPC_DMAC);
|
||||
}
|
||||
|
||||
if (!req->features)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
|
||||
{
|
||||
u64 ring_cookie = flow->flow_spec.ring_cookie;
|
||||
struct npc_install_flow_req *req;
|
||||
int err, vf = 0;
|
||||
|
||||
mutex_lock(&pfvf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
|
||||
if (!req) {
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = otx2_prepare_flow_request(&flow->flow_spec, req);
|
||||
if (err) {
|
||||
/* free the allocated msg above */
|
||||
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
req->entry = flow->entry;
|
||||
req->intf = NIX_INTF_RX;
|
||||
req->set_cntr = 1;
|
||||
req->channel = pfvf->hw.rx_chan_base;
|
||||
if (ring_cookie == RX_CLS_FLOW_DISC) {
|
||||
req->op = NIX_RX_ACTIONOP_DROP;
|
||||
} else {
|
||||
/* change to unicast only if action of default entry is not
|
||||
* requested by user
|
||||
*/
|
||||
if (req->op != NIX_RX_ACTION_DEFAULT)
|
||||
req->op = NIX_RX_ACTIONOP_UCAST;
|
||||
req->index = ethtool_get_flow_spec_ring(ring_cookie);
|
||||
vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
|
||||
if (vf > pci_num_vf(pfvf->pdev)) {
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* ethtool ring_cookie has (VF + 1) for VF */
|
||||
if (vf) {
|
||||
req->vf = vf;
|
||||
flow->is_vf = true;
|
||||
flow->vf = vf;
|
||||
}
|
||||
|
||||
/* Send message to AF */
|
||||
err = otx2_sync_mbox_msg(&pfvf->mbox);
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
|
||||
{
|
||||
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
|
||||
u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
|
||||
struct otx2_flow *flow;
|
||||
bool new = false;
|
||||
int err;
|
||||
|
||||
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
|
||||
return -ENOMEM;
|
||||
|
||||
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
|
||||
return -EINVAL;
|
||||
|
||||
if (fsp->location >= flow_cfg->ntuple_max_flows)
|
||||
return -EINVAL;
|
||||
|
||||
flow = otx2_find_flow(pfvf, fsp->location);
|
||||
if (!flow) {
|
||||
flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
|
||||
if (!flow)
|
||||
return -ENOMEM;
|
||||
flow->location = fsp->location;
|
||||
flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
|
||||
flow->location];
|
||||
new = true;
|
||||
}
|
||||
/* struct copy */
|
||||
flow->flow_spec = *fsp;
|
||||
|
||||
err = otx2_add_flow_msg(pfvf, flow);
|
||||
if (err) {
|
||||
if (new)
|
||||
kfree(flow);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* add the new flow installed to list */
|
||||
if (new) {
|
||||
otx2_add_flow_to_list(pfvf, flow);
|
||||
flow_cfg->nr_flows++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
|
||||
{
|
||||
struct npc_delete_flow_req *req;
|
||||
int err;
|
||||
|
||||
mutex_lock(&pfvf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
|
||||
if (!req) {
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
req->entry = entry;
|
||||
if (all)
|
||||
req->all = 1;
|
||||
|
||||
/* Send message to AF */
|
||||
err = otx2_sync_mbox_msg(&pfvf->mbox);
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
|
||||
{
|
||||
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
|
||||
struct otx2_flow *flow;
|
||||
int err;
|
||||
|
||||
if (location >= flow_cfg->ntuple_max_flows)
|
||||
return -EINVAL;
|
||||
|
||||
flow = otx2_find_flow(pfvf, location);
|
||||
if (!flow)
|
||||
return -ENOENT;
|
||||
|
||||
err = otx2_remove_flow_msg(pfvf, flow->entry, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
list_del(&flow->list);
|
||||
kfree(flow);
|
||||
flow_cfg->nr_flows--;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
|
||||
{
|
||||
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
|
||||
struct npc_delete_flow_req *req;
|
||||
struct otx2_flow *iter, *tmp;
|
||||
int err;
|
||||
|
||||
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&pfvf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
|
||||
if (!req) {
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
|
||||
req->end = flow_cfg->entry[flow_cfg->ntuple_offset +
|
||||
flow_cfg->ntuple_max_flows - 1];
|
||||
err = otx2_sync_mbox_msg(&pfvf->mbox);
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
|
||||
list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
|
||||
list_del(&iter->list);
|
||||
kfree(iter);
|
||||
flow_cfg->nr_flows--;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
|
||||
{
|
||||
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
|
||||
struct npc_mcam_free_entry_req *req;
|
||||
struct otx2_flow *iter, *tmp;
|
||||
int err;
|
||||
|
||||
if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
|
||||
return 0;
|
||||
|
||||
/* remove all flows */
|
||||
err = otx2_remove_flow_msg(pfvf, 0, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
|
||||
list_del(&iter->list);
|
||||
kfree(iter);
|
||||
flow_cfg->nr_flows--;
|
||||
}
|
||||
|
||||
mutex_lock(&pfvf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
|
||||
if (!req) {
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
req->all = 1;
|
||||
/* Send message to AF to free MCAM entries */
|
||||
err = otx2_sync_mbox_msg(&pfvf->mbox);
|
||||
if (err) {
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
|
||||
{
|
||||
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
|
||||
struct npc_install_flow_req *req;
|
||||
int err;
|
||||
|
||||
mutex_lock(&pfvf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
|
||||
if (!req) {
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
|
||||
req->intf = NIX_INTF_RX;
|
||||
ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
|
||||
eth_broadcast_addr((u8 *)&req->mask.dmac);
|
||||
req->channel = pfvf->hw.rx_chan_base;
|
||||
req->op = NIX_RX_ACTION_DEFAULT;
|
||||
req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
|
||||
req->vtag0_valid = true;
|
||||
req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
|
||||
|
||||
/* Send message to AF */
|
||||
err = otx2_sync_mbox_msg(&pfvf->mbox);
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
|
||||
{
|
||||
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
|
||||
struct npc_delete_flow_req *req;
|
||||
int err;
|
||||
|
||||
mutex_lock(&pfvf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
|
||||
if (!req) {
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
|
||||
/* Send message to AF */
|
||||
err = otx2_sync_mbox_msg(&pfvf->mbox);
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
|
||||
{
|
||||
struct nix_vtag_config *req;
|
||||
struct mbox_msghdr *rsp_hdr;
|
||||
int err;
|
||||
|
||||
/* Dont have enough mcam entries */
|
||||
if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
|
||||
return -ENOMEM;
|
||||
|
||||
if (enable) {
|
||||
err = otx2_install_rxvlan_offload_flow(pf);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
err = otx2_delete_rxvlan_offload_flow(pf);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
mutex_lock(&pf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
|
||||
if (!req) {
|
||||
mutex_unlock(&pf->mbox.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* config strip, capture and size */
|
||||
req->vtag_size = VTAGSIZE_T4;
|
||||
req->cfg_type = 1; /* rx vlan cfg */
|
||||
req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
|
||||
req->rx.strip_vtag = enable;
|
||||
req->rx.capture_vtag = enable;
|
||||
|
||||
err = otx2_sync_mbox_msg(&pf->mbox);
|
||||
if (err) {
|
||||
mutex_unlock(&pf->mbox.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
|
||||
if (IS_ERR(rsp_hdr)) {
|
||||
mutex_unlock(&pf->mbox.lock);
|
||||
return PTR_ERR(rsp_hdr);
|
||||
}
|
||||
|
||||
mutex_unlock(&pf->mbox.lock);
|
||||
return rsp_hdr->rc;
|
||||
}
|
@ -1278,6 +1278,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
|
||||
|
||||
static int otx2_init_hw_resources(struct otx2_nic *pf)
|
||||
{
|
||||
struct nix_lf_free_req *free_req;
|
||||
struct mbox *mbox = &pf->mbox;
|
||||
struct otx2_hw *hw = &pf->hw;
|
||||
struct msg_req *req;
|
||||
@ -1359,8 +1360,9 @@ err_free_rq_ptrs:
|
||||
otx2_aura_pool_free(pf);
|
||||
err_free_nix_lf:
|
||||
mutex_lock(&mbox->lock);
|
||||
req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
|
||||
if (req) {
|
||||
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
|
||||
if (free_req) {
|
||||
free_req->flags = NIX_LF_DISABLE_FLOWS;
|
||||
if (otx2_sync_mbox_msg(mbox))
|
||||
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
|
||||
}
|
||||
@ -1379,6 +1381,7 @@ exit:
|
||||
static void otx2_free_hw_resources(struct otx2_nic *pf)
|
||||
{
|
||||
struct otx2_qset *qset = &pf->qset;
|
||||
struct nix_lf_free_req *free_req;
|
||||
struct mbox *mbox = &pf->mbox;
|
||||
struct otx2_cq_queue *cq;
|
||||
struct msg_req *req;
|
||||
@ -1419,8 +1422,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
|
||||
|
||||
mutex_lock(&mbox->lock);
|
||||
/* Reset NIX LF */
|
||||
req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
|
||||
if (req) {
|
||||
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
|
||||
if (free_req) {
|
||||
free_req->flags = NIX_LF_DISABLE_FLOWS;
|
||||
if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
|
||||
free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
|
||||
if (otx2_sync_mbox_msg(mbox))
|
||||
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
|
||||
}
|
||||
@ -1562,6 +1568,9 @@ int otx2_open(struct net_device *netdev)
|
||||
|
||||
otx2_set_cints_affinity(pf);
|
||||
|
||||
if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
|
||||
otx2_enable_rxvlan(pf, true);
|
||||
|
||||
/* When reinitializing enable time stamping if it is enabled before */
|
||||
if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
|
||||
pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
|
||||
@ -1716,10 +1725,20 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
|
||||
struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
|
||||
struct net_device *netdev = pf->netdev;
|
||||
struct nix_rx_mode *req;
|
||||
bool promisc = false;
|
||||
|
||||
if (!(netdev->flags & IFF_UP))
|
||||
return;
|
||||
|
||||
if ((netdev->flags & IFF_PROMISC) ||
|
||||
(netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
|
||||
promisc = true;
|
||||
}
|
||||
|
||||
/* Write unicast address to mcam entries or del from mcam */
|
||||
if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
|
||||
__dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
|
||||
|
||||
mutex_lock(&pf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
|
||||
if (!req) {
|
||||
@ -1729,8 +1748,7 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
|
||||
|
||||
req->mode = NIX_RX_MODE_UCAST;
|
||||
|
||||
/* We don't support MAC address filtering yet */
|
||||
if (netdev->flags & IFF_PROMISC)
|
||||
if (promisc)
|
||||
req->mode |= NIX_RX_MODE_PROMISC;
|
||||
else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
|
||||
req->mode |= NIX_RX_MODE_ALLMULTI;
|
||||
@ -1743,11 +1761,20 @@ static int otx2_set_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
netdev_features_t changed = features ^ netdev->features;
|
||||
bool ntuple = !!(features & NETIF_F_NTUPLE);
|
||||
struct otx2_nic *pf = netdev_priv(netdev);
|
||||
|
||||
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
|
||||
return otx2_cgx_config_loopback(pf,
|
||||
features & NETIF_F_LOOPBACK);
|
||||
|
||||
if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
|
||||
return otx2_enable_rxvlan(pf,
|
||||
features & NETIF_F_HW_VLAN_CTAG_RX);
|
||||
|
||||
if ((changed & NETIF_F_NTUPLE) && !ntuple)
|
||||
otx2_destroy_ntuple_flows(pf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1903,6 +1930,245 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
|
||||
}
|
||||
}
|
||||
|
||||
static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
|
||||
{
|
||||
struct npc_install_flow_req *req;
|
||||
int err;
|
||||
|
||||
mutex_lock(&pf->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
|
||||
if (!req) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ether_addr_copy(req->packet.dmac, mac);
|
||||
eth_broadcast_addr((u8 *)&req->mask.dmac);
|
||||
req->features = BIT_ULL(NPC_DMAC);
|
||||
req->channel = pf->hw.rx_chan_base;
|
||||
req->intf = NIX_INTF_RX;
|
||||
req->default_rule = 1;
|
||||
req->append = 1;
|
||||
req->vf = vf + 1;
|
||||
req->op = NIX_RX_ACTION_DEFAULT;
|
||||
|
||||
err = otx2_sync_mbox_msg(&pf->mbox);
|
||||
out:
|
||||
mutex_unlock(&pf->mbox.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
|
||||
{
|
||||
struct otx2_nic *pf = netdev_priv(netdev);
|
||||
struct pci_dev *pdev = pf->pdev;
|
||||
struct otx2_vf_config *config;
|
||||
int ret;
|
||||
|
||||
if (!netif_running(netdev))
|
||||
return -EAGAIN;
|
||||
|
||||
if (vf >= pci_num_vf(pdev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_valid_ether_addr(mac))
|
||||
return -EINVAL;
|
||||
|
||||
config = &pf->vf_configs[vf];
|
||||
ether_addr_copy(config->mac, mac);
|
||||
|
||||
ret = otx2_do_set_vf_mac(pf, vf, mac);
|
||||
if (ret == 0)
|
||||
dev_info(&pdev->dev, "Reload VF driver to apply the changes\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
|
||||
__be16 proto)
|
||||
{
|
||||
struct otx2_flow_config *flow_cfg = pf->flow_cfg;
|
||||
struct nix_vtag_config_rsp *vtag_rsp;
|
||||
struct npc_delete_flow_req *del_req;
|
||||
struct nix_vtag_config *vtag_req;
|
||||
struct npc_install_flow_req *req;
|
||||
struct otx2_vf_config *config;
|
||||
int err = 0;
|
||||
u32 idx;
|
||||
|
||||
config = &pf->vf_configs[vf];
|
||||
|
||||
if (!vlan && !config->vlan)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&pf->mbox.lock);
|
||||
|
||||
/* free old tx vtag entry */
|
||||
if (config->vlan) {
|
||||
vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
|
||||
if (!vtag_req) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
vtag_req->cfg_type = 0;
|
||||
vtag_req->tx.free_vtag0 = 1;
|
||||
vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
|
||||
|
||||
err = otx2_sync_mbox_msg(&pf->mbox);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!vlan && config->vlan) {
|
||||
/* rx */
|
||||
del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
|
||||
if (!del_req) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
|
||||
del_req->entry =
|
||||
flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
|
||||
err = otx2_sync_mbox_msg(&pf->mbox);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* tx */
|
||||
del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
|
||||
if (!del_req) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
|
||||
del_req->entry =
|
||||
flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
|
||||
err = otx2_sync_mbox_msg(&pf->mbox);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* rx */
|
||||
req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
|
||||
if (!req) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
|
||||
req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
|
||||
req->packet.vlan_tci = htons(vlan);
|
||||
req->mask.vlan_tci = htons(VLAN_VID_MASK);
|
||||
/* af fills the destination mac addr */
|
||||
eth_broadcast_addr((u8 *)&req->mask.dmac);
|
||||
req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
|
||||
req->channel = pf->hw.rx_chan_base;
|
||||
req->intf = NIX_INTF_RX;
|
||||
req->vf = vf + 1;
|
||||
req->op = NIX_RX_ACTION_DEFAULT;
|
||||
req->vtag0_valid = true;
|
||||
req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
|
||||
req->set_cntr = 1;
|
||||
|
||||
err = otx2_sync_mbox_msg(&pf->mbox);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* tx */
|
||||
vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
|
||||
if (!vtag_req) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* configure tx vtag params */
|
||||
vtag_req->vtag_size = VTAGSIZE_T4;
|
||||
vtag_req->cfg_type = 0; /* tx vlan cfg */
|
||||
vtag_req->tx.cfg_vtag0 = 1;
|
||||
vtag_req->tx.vtag0 = (ntohs(proto) << 16) | vlan;
|
||||
|
||||
err = otx2_sync_mbox_msg(&pf->mbox);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
|
||||
(&pf->mbox.mbox, 0, &vtag_req->hdr);
|
||||
if (IS_ERR(vtag_rsp)) {
|
||||
err = PTR_ERR(vtag_rsp);
|
||||
goto out;
|
||||
}
|
||||
config->tx_vtag_idx = vtag_rsp->vtag0_idx;
|
||||
|
||||
req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
|
||||
if (!req) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
eth_zero_addr((u8 *)&req->mask.dmac);
|
||||
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
|
||||
req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
|
||||
req->features = BIT_ULL(NPC_DMAC);
|
||||
req->channel = pf->hw.tx_chan_base;
|
||||
req->intf = NIX_INTF_TX;
|
||||
req->vf = vf + 1;
|
||||
req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
|
||||
req->vtag0_def = vtag_rsp->vtag0_idx;
|
||||
req->vtag0_op = VTAG_INSERT;
|
||||
req->set_cntr = 1;
|
||||
|
||||
err = otx2_sync_mbox_msg(&pf->mbox);
|
||||
out:
|
||||
config->vlan = vlan;
|
||||
mutex_unlock(&pf->mbox.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
|
||||
__be16 proto)
|
||||
{
|
||||
struct otx2_nic *pf = netdev_priv(netdev);
|
||||
struct pci_dev *pdev = pf->pdev;
|
||||
|
||||
if (!netif_running(netdev))
|
||||
return -EAGAIN;
|
||||
|
||||
if (vf >= pci_num_vf(pdev))
|
||||
return -EINVAL;
|
||||
|
||||
/* qos is currently unsupported */
|
||||
if (vlan >= VLAN_N_VID || qos)
|
||||
return -EINVAL;
|
||||
|
||||
if (proto != htons(ETH_P_8021Q))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
|
||||
}
|
||||
|
||||
static int otx2_get_vf_config(struct net_device *netdev, int vf,
|
||||
struct ifla_vf_info *ivi)
|
||||
{
|
||||
struct otx2_nic *pf = netdev_priv(netdev);
|
||||
struct pci_dev *pdev = pf->pdev;
|
||||
struct otx2_vf_config *config;
|
||||
|
||||
if (!netif_running(netdev))
|
||||
return -EAGAIN;
|
||||
|
||||
if (vf >= pci_num_vf(pdev))
|
||||
return -EINVAL;
|
||||
|
||||
config = &pf->vf_configs[vf];
|
||||
ivi->vf = vf;
|
||||
ether_addr_copy(ivi->mac, config->mac);
|
||||
ivi->vlan = config->vlan;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops otx2_netdev_ops = {
|
||||
.ndo_open = otx2_open,
|
||||
.ndo_stop = otx2_stop,
|
||||
@ -1914,6 +2180,9 @@ static const struct net_device_ops otx2_netdev_ops = {
|
||||
.ndo_tx_timeout = otx2_tx_timeout,
|
||||
.ndo_get_stats64 = otx2_get_stats64,
|
||||
.ndo_do_ioctl = otx2_ioctl,
|
||||
.ndo_set_vf_mac = otx2_set_vf_mac,
|
||||
.ndo_set_vf_vlan = otx2_set_vf_vlan,
|
||||
.ndo_get_vf_config = otx2_get_vf_config,
|
||||
};
|
||||
|
||||
static int otx2_wq_init(struct otx2_nic *pf)
|
||||
@ -2110,6 +2379,25 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
|
||||
|
||||
err = otx2_mcam_flow_init(pf);
|
||||
if (err)
|
||||
goto err_ptp_destroy;
|
||||
|
||||
if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
|
||||
netdev->hw_features |= NETIF_F_NTUPLE;
|
||||
|
||||
if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
|
||||
netdev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
/* Support TSO on tag interface */
|
||||
netdev->vlan_features |= netdev->features;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_STAG_RX;
|
||||
netdev->features |= netdev->hw_features;
|
||||
|
||||
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
|
||||
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
|
||||
|
||||
@ -2122,7 +2410,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
err = register_netdev(netdev);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to register netdevice\n");
|
||||
goto err_ptp_destroy;
|
||||
goto err_del_mcam_entries;
|
||||
}
|
||||
|
||||
err = otx2_wq_init(pf);
|
||||
@ -2142,6 +2430,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
err_unreg_netdev:
|
||||
unregister_netdev(netdev);
|
||||
err_del_mcam_entries:
|
||||
otx2_mcam_flow_del(pf);
|
||||
err_ptp_destroy:
|
||||
otx2_ptp_destroy(pf);
|
||||
err_detach_rsrc:
|
||||
@ -2285,6 +2575,8 @@ static void otx2_remove(struct pci_dev *pdev)
|
||||
|
||||
pf = netdev_priv(netdev);
|
||||
|
||||
pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
|
||||
|
||||
if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
|
||||
otx2_config_hw_tx_tstamp(pf, false);
|
||||
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
|
||||
@ -2300,6 +2592,7 @@ static void otx2_remove(struct pci_dev *pdev)
|
||||
destroy_workqueue(pf->otx2_wq);
|
||||
|
||||
otx2_ptp_destroy(pf);
|
||||
otx2_mcam_flow_del(pf);
|
||||
otx2_detach_resources(&pf->mbox);
|
||||
otx2_disable_mbox_intr(pf);
|
||||
otx2_pfaf_mbox_destroy(pf);
|
||||
|
@ -556,6 +556,19 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
|
||||
ext->tstmp = 1;
|
||||
}
|
||||
|
||||
#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
if (skb->vlan_proto == htons(ETH_P_8021Q)) {
|
||||
ext->vlan1_ins_ena = 1;
|
||||
ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
|
||||
ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
|
||||
} else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
|
||||
ext->vlan0_ins_ena = 1;
|
||||
ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
|
||||
ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
|
||||
}
|
||||
}
|
||||
|
||||
*offset += sizeof(*ext);
|
||||
}
|
||||
|
||||
@ -871,6 +884,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
|
||||
/* Insert vlan tag before giving pkt to tso */
|
||||
if (skb_vlan_tag_present(skb))
|
||||
skb = __vlan_hwaccel_push_inside(skb);
|
||||
otx2_sq_append_tso(pfvf, sq, skb, qidx);
|
||||
return true;
|
||||
}
|
||||
|
@ -558,6 +558,11 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_GSO_UDP_L4;
|
||||
netdev->features = netdev->hw_features;
|
||||
/* Support TSO on tag interface */
|
||||
netdev->vlan_features |= netdev->features;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
netdev->features |= netdev->hw_features;
|
||||
|
||||
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
|
||||
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
|
||||
|
Loading…
Reference in New Issue
Block a user