Merge branch 'hns3-next'
Salil Mehta says: ==================== Misc. bug fixes & optimizations for HNS3 driver This patch-set presents some bug fixes found out during the internal review and system testing and some small optimizations. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
21ad117358
@ -47,6 +47,8 @@ enum hclge_mbx_mac_vlan_subcode {
|
||||
HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
|
||||
HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
|
||||
HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
|
||||
HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */
|
||||
HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */
|
||||
};
|
||||
|
||||
/* below are per-VF vlan cfg subcodes */
|
||||
|
@ -316,7 +316,8 @@ struct hnae3_ae_ops {
|
||||
int (*set_loopback)(struct hnae3_handle *handle,
|
||||
enum hnae3_loop loop_mode, bool en);
|
||||
|
||||
void (*set_promisc_mode)(struct hnae3_handle *handle, u32 en);
|
||||
void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
|
||||
bool en_mc_pmc);
|
||||
int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
|
||||
|
||||
void (*get_pauseparam)(struct hnae3_handle *handle,
|
||||
@ -352,6 +353,7 @@ struct hnae3_ae_ops {
|
||||
const unsigned char *addr);
|
||||
int (*rm_mc_addr)(struct hnae3_handle *handle,
|
||||
const unsigned char *addr);
|
||||
int (*update_mta_status)(struct hnae3_handle *handle);
|
||||
|
||||
void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
|
||||
void (*update_stats)(struct hnae3_handle *handle,
|
||||
|
@ -415,15 +415,21 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
|
||||
|
||||
if (h->ae_algo->ops->set_promisc_mode) {
|
||||
if (netdev->flags & IFF_PROMISC)
|
||||
h->ae_algo->ops->set_promisc_mode(h, 1);
|
||||
h->ae_algo->ops->set_promisc_mode(h, true, true);
|
||||
else if (netdev->flags & IFF_ALLMULTI)
|
||||
h->ae_algo->ops->set_promisc_mode(h, false, true);
|
||||
else
|
||||
h->ae_algo->ops->set_promisc_mode(h, 0);
|
||||
h->ae_algo->ops->set_promisc_mode(h, false, false);
|
||||
}
|
||||
if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
|
||||
netdev_err(netdev, "sync uc address fail\n");
|
||||
if (netdev->flags & IFF_MULTICAST)
|
||||
if (netdev->flags & IFF_MULTICAST) {
|
||||
if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
|
||||
netdev_err(netdev, "sync mc address fail\n");
|
||||
|
||||
if (h->ae_algo->ops->update_mta_status)
|
||||
h->ae_algo->ops->update_mta_status(h);
|
||||
}
|
||||
}
|
||||
|
||||
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
|
||||
@ -653,6 +659,32 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
|
||||
}
|
||||
}
|
||||
|
||||
/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
|
||||
* and it is udp packet, which has a dest port as the IANA assigned.
|
||||
* the hardware is expected to do the checksum offload, but the
|
||||
* hardware will not do the checksum offload when udp dest port is
|
||||
* 4789.
|
||||
*/
|
||||
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
|
||||
{
|
||||
#define IANA_VXLAN_PORT 4789
|
||||
union {
|
||||
struct tcphdr *tcp;
|
||||
struct udphdr *udp;
|
||||
struct gre_base_hdr *gre;
|
||||
unsigned char *hdr;
|
||||
} l4;
|
||||
|
||||
l4.hdr = skb_transport_header(skb);
|
||||
|
||||
if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
|
||||
return false;
|
||||
|
||||
skb_checksum_help(skb);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
|
||||
u8 il4_proto, u32 *type_cs_vlan_tso,
|
||||
u32 *ol_type_vlan_len_msec)
|
||||
@ -741,6 +773,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
|
||||
HNS3_L4T_TCP);
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
if (hns3_tunnel_csum_bug(skb))
|
||||
break;
|
||||
|
||||
hnae_set_field(*type_cs_vlan_tso,
|
||||
HNS3_TXD_L4T_M,
|
||||
HNS3_TXD_L4T_S,
|
||||
@ -1130,6 +1165,12 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
|
||||
if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
|
||||
netdev_info(netdev, "already using mac address %pM\n",
|
||||
mac_addr->sa_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
|
||||
if (ret) {
|
||||
netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
|
||||
@ -2999,6 +3040,15 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init)
|
||||
|
||||
}
|
||||
|
||||
static void hns3_uninit_mac_addr(struct net_device *netdev)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
|
||||
if (h->ae_algo->ops->rm_uc_addr)
|
||||
h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
|
||||
}
|
||||
|
||||
static void hns3_nic_set_priv_ops(struct net_device *netdev)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
@ -3127,6 +3177,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
|
||||
|
||||
priv->ring_data = NULL;
|
||||
|
||||
hns3_uninit_mac_addr(netdev);
|
||||
|
||||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
@ -3443,6 +3495,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
||||
|
||||
priv->ring_data = NULL;
|
||||
|
||||
hns3_uninit_mac_addr(netdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
h->ae_algo->ops->set_promisc_mode(h, en);
|
||||
h->ae_algo->ops->set_promisc_mode(h, en, en);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2288,8 +2288,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
|
||||
struct net_device *netdev = handle->kinfo.netdev;
|
||||
struct hclge_mac *mac = &hdev->hw.mac;
|
||||
u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
|
||||
struct hclge_vport *vport;
|
||||
int mtu;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
|
||||
if (ret) {
|
||||
@ -2301,7 +2303,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)
|
||||
mac->link = 0;
|
||||
|
||||
/* Initialize the MTA table work mode */
|
||||
hdev->accept_mta_mc = true;
|
||||
hdev->enable_mta = true;
|
||||
hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
|
||||
|
||||
@ -2314,11 +2315,17 @@ static int hclge_mac_init(struct hclge_dev *hdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"set mta filter mode fail ret=%d\n", ret);
|
||||
return ret;
|
||||
for (i = 0; i < hdev->num_alloc_vport; i++) {
|
||||
vport = &hdev->vport[i];
|
||||
vport->accept_mta_mc = false;
|
||||
|
||||
memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
|
||||
ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"set mta filter mode fail ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
|
||||
@ -2580,16 +2587,18 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
|
||||
* mbx messages reported by this interrupt.
|
||||
*/
|
||||
hclge_mbx_task_schedule(hdev);
|
||||
|
||||
break;
|
||||
default:
|
||||
dev_dbg(&hdev->pdev->dev,
|
||||
"received unknown or unhandled event of vector0\n");
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"received unknown or unhandled event of vector0\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* we should clear the source of interrupt */
|
||||
hclge_clear_event_cause(hdev, event_cause, clearval);
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
/* clear the source of interrupt if it is not cause by reset */
|
||||
if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
|
||||
hclge_clear_event_cause(hdev, event_cause, clearval);
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -2777,6 +2786,33 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
|
||||
return rst_level;
|
||||
}
|
||||
|
||||
static void hclge_clear_reset_cause(struct hclge_dev *hdev)
|
||||
{
|
||||
u32 clearval = 0;
|
||||
|
||||
switch (hdev->reset_type) {
|
||||
case HNAE3_IMP_RESET:
|
||||
clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
|
||||
break;
|
||||
case HNAE3_GLOBAL_RESET:
|
||||
clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
|
||||
break;
|
||||
case HNAE3_CORE_RESET:
|
||||
clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
|
||||
break;
|
||||
default:
|
||||
dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d",
|
||||
hdev->reset_type);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!clearval)
|
||||
return;
|
||||
|
||||
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
}
|
||||
|
||||
static void hclge_reset(struct hclge_dev *hdev)
|
||||
{
|
||||
/* perform reset of the stack & ae device for a client */
|
||||
@ -2789,6 +2825,8 @@ static void hclge_reset(struct hclge_dev *hdev)
|
||||
hclge_reset_ae_dev(hdev->ae_dev);
|
||||
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
|
||||
rtnl_unlock();
|
||||
|
||||
hclge_clear_reset_cause(hdev);
|
||||
} else {
|
||||
/* schedule again to check pending resets later */
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
@ -3580,13 +3618,15 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
|
||||
param->vf_id = vport_id;
|
||||
}
|
||||
|
||||
static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
|
||||
static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
|
||||
bool en_mc_pmc)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_promisc_param param;
|
||||
|
||||
hclge_promisc_param_init(¶m, en, en, true, vport->vport_id);
|
||||
hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true,
|
||||
vport->vport_id);
|
||||
hclge_cmd_set_promisc_mode(hdev, ¶m);
|
||||
}
|
||||
|
||||
@ -3728,9 +3768,6 @@ static int hclge_ae_start(struct hnae3_handle *handle)
|
||||
/* reset tqp stats */
|
||||
hclge_reset_tqp_stats(handle);
|
||||
|
||||
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
|
||||
return 0;
|
||||
|
||||
ret = hclge_mac_start_phy(hdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -3746,9 +3783,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
|
||||
|
||||
del_timer_sync(&hdev->service_timer);
|
||||
cancel_work_sync(&hdev->service_task);
|
||||
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
|
||||
|
||||
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
|
||||
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
|
||||
hclge_mac_stop_phy(hdev);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < vport->alloc_tqps; i++)
|
||||
hclge_tqp_enable(hdev, i, 0, false);
|
||||
@ -3972,9 +4012,88 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (enable)
|
||||
set_bit(idx, vport->mta_shadow);
|
||||
else
|
||||
clear_bit(idx, vport->mta_shadow);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_update_mta_status(struct hnae3_handle *handle)
|
||||
{
|
||||
unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct net_device *netdev = handle->kinfo.netdev;
|
||||
struct netdev_hw_addr *ha;
|
||||
u16 tbl_idx;
|
||||
|
||||
memset(mta_status, 0, sizeof(mta_status));
|
||||
|
||||
/* update mta_status from mc addr list */
|
||||
netdev_for_each_mc_addr(ha, netdev) {
|
||||
tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
|
||||
set_bit(tbl_idx, mta_status);
|
||||
}
|
||||
|
||||
return hclge_update_mta_status_common(vport, mta_status,
|
||||
0, HCLGE_MTA_TBL_SIZE, true);
|
||||
}
|
||||
|
||||
int hclge_update_mta_status_common(struct hclge_vport *vport,
|
||||
unsigned long *status,
|
||||
u16 idx,
|
||||
u16 count,
|
||||
bool update_filter)
|
||||
{
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u16 update_max = idx + count;
|
||||
u16 check_max;
|
||||
int ret = 0;
|
||||
bool used;
|
||||
u16 i;
|
||||
|
||||
/* setup mta check range */
|
||||
if (update_filter) {
|
||||
i = 0;
|
||||
check_max = HCLGE_MTA_TBL_SIZE;
|
||||
} else {
|
||||
i = idx;
|
||||
check_max = update_max;
|
||||
}
|
||||
|
||||
used = false;
|
||||
/* check and update all mta item */
|
||||
for (; i < check_max; i++) {
|
||||
/* ignore unused item */
|
||||
if (!test_bit(i, vport->mta_shadow))
|
||||
continue;
|
||||
|
||||
/* if i in update range then update it */
|
||||
if (i >= idx && i < update_max)
|
||||
if (!test_bit(i - idx, status))
|
||||
hclge_set_mta_table_item(vport, i, false);
|
||||
|
||||
if (!used && test_bit(i, vport->mta_shadow))
|
||||
used = true;
|
||||
}
|
||||
|
||||
/* no longer use mta, disable it */
|
||||
if (vport->accept_mta_mc && update_filter && !used) {
|
||||
ret = hclge_cfg_func_mta_filter(hdev,
|
||||
vport->vport_id,
|
||||
false);
|
||||
if (ret)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"disable func mta filter fail ret=%d\n",
|
||||
ret);
|
||||
else
|
||||
vport->accept_mta_mc = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
|
||||
struct hclge_mac_vlan_tbl_entry_cmd *req)
|
||||
{
|
||||
@ -4242,9 +4361,25 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
|
||||
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
|
||||
}
|
||||
|
||||
/* Set MTA table for this MAC address */
|
||||
tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
|
||||
status = hclge_set_mta_table_item(vport, tbl_idx, true);
|
||||
/* If mc mac vlan table is full, use MTA table */
|
||||
if (status == -ENOSPC) {
|
||||
if (!vport->accept_mta_mc) {
|
||||
status = hclge_cfg_func_mta_filter(hdev,
|
||||
vport->vport_id,
|
||||
true);
|
||||
if (status) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"set mta filter mode fail ret=%d\n",
|
||||
status);
|
||||
return status;
|
||||
}
|
||||
vport->accept_mta_mc = true;
|
||||
}
|
||||
|
||||
/* Set MTA table for this MAC address */
|
||||
tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
|
||||
status = hclge_set_mta_table_item(vport, tbl_idx, true);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -4264,7 +4399,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
|
||||
struct hclge_mac_vlan_tbl_entry_cmd req;
|
||||
enum hclge_cmd_status status;
|
||||
struct hclge_desc desc[3];
|
||||
u16 tbl_idx;
|
||||
|
||||
/* mac addr check */
|
||||
if (!is_multicast_ether_addr(addr)) {
|
||||
@ -4293,17 +4427,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
|
||||
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
|
||||
|
||||
} else {
|
||||
/* This mac addr do not exist, can't delete it */
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Rm multicast mac addr failed, ret = %d.\n",
|
||||
status);
|
||||
return -EIO;
|
||||
/* Maybe this mac address is in mta table, but it cannot be
|
||||
* deleted here because an entry of mta represents an address
|
||||
* range rather than a specific address. the delete action to
|
||||
* all entries will take effect in update_mta_status called by
|
||||
* hns3_nic_set_rx_mode.
|
||||
*/
|
||||
status = 0;
|
||||
}
|
||||
|
||||
/* Set MTB table for this MAC address */
|
||||
tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
|
||||
status = hclge_set_mta_table_item(vport, tbl_idx, false);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -4525,9 +4657,16 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
|
||||
}
|
||||
|
||||
if (!is_kill) {
|
||||
#define HCLGE_VF_VLAN_NO_ENTRY 2
|
||||
if (!req0->resp_code || req0->resp_code == 1)
|
||||
return 0;
|
||||
|
||||
if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"vf vlan table is full, vf vlan filter is disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Add vf vlan filter fail, ret =%d.\n",
|
||||
req0->resp_code);
|
||||
@ -5651,9 +5790,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enable MISC vector(vector0) */
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
|
||||
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
|
||||
HCLGE_DRIVER_NAME);
|
||||
|
||||
@ -6100,6 +6236,7 @@ static const struct hnae3_ae_ops hclge_ops = {
|
||||
.rm_uc_addr = hclge_rm_uc_addr,
|
||||
.add_mc_addr = hclge_add_mc_addr,
|
||||
.rm_mc_addr = hclge_rm_mc_addr,
|
||||
.update_mta_status = hclge_update_mta_status,
|
||||
.set_autoneg = hclge_set_autoneg,
|
||||
.get_autoneg = hclge_get_autoneg,
|
||||
.get_pauseparam = hclge_get_pauseparam,
|
||||
|
@ -61,6 +61,8 @@
|
||||
#define HCLGE_RSS_TC_SIZE_6 64
|
||||
#define HCLGE_RSS_TC_SIZE_7 128
|
||||
|
||||
#define HCLGE_MTA_TBL_SIZE 4096
|
||||
|
||||
#define HCLGE_TQP_RESET_TRY_TIMES 10
|
||||
|
||||
#define HCLGE_PHY_PAGE_MDIX 0
|
||||
@ -559,7 +561,6 @@ struct hclge_dev {
|
||||
|
||||
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
|
||||
bool enable_mta; /* Mutilcast filter enable */
|
||||
bool accept_mta_mc; /* Whether accept mta filter multicast */
|
||||
|
||||
struct hclge_vlan_type_cfg vlan_type_cfg;
|
||||
|
||||
@ -620,6 +621,9 @@ struct hclge_vport {
|
||||
struct hclge_dev *back; /* Back reference to associated dev */
|
||||
struct hnae3_handle nic;
|
||||
struct hnae3_handle roce;
|
||||
|
||||
bool accept_mta_mc; /* whether to accept mta filter multicast */
|
||||
unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
|
||||
};
|
||||
|
||||
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
|
||||
@ -637,6 +641,12 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
|
||||
int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
|
||||
u8 func_id,
|
||||
bool enable);
|
||||
int hclge_update_mta_status_common(struct hclge_vport *vport,
|
||||
unsigned long *status,
|
||||
u16 idx,
|
||||
u16 count,
|
||||
bool update_filter);
|
||||
|
||||
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
|
||||
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
|
||||
int vector_id, bool en,
|
||||
|
@ -190,11 +190,12 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
|
||||
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
|
||||
struct hclge_mbx_vf_to_pf_cmd *req)
|
||||
{
|
||||
bool en = req->msg[1] ? true : false;
|
||||
bool en_uc = req->msg[1] ? true : false;
|
||||
bool en_mc = req->msg[2] ? true : false;
|
||||
struct hclge_promisc_param param;
|
||||
|
||||
/* always enable broadcast promisc bit */
|
||||
hclge_promisc_param_init(¶m, en, en, true, vport->vport_id);
|
||||
hclge_promisc_param_init(¶m, en_uc, en_mc, true, vport->vport_id);
|
||||
return hclge_cmd_set_promisc_mode(vport->back, ¶m);
|
||||
}
|
||||
|
||||
@ -230,12 +231,51 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport,
|
||||
u8 *msg, u8 idx, bool is_end)
|
||||
{
|
||||
#define HCLGE_MTA_STATUS_MSG_SIZE 13
|
||||
#define HCLGE_MTA_STATUS_MSG_BITS \
|
||||
(HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
|
||||
#define HCLGE_MTA_STATUS_MSG_END_BITS \
|
||||
(HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS)
|
||||
unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)];
|
||||
u16 tbl_cnt;
|
||||
u16 tbl_idx;
|
||||
u8 msg_ofs;
|
||||
u8 msg_bit;
|
||||
|
||||
tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS :
|
||||
HCLGE_MTA_STATUS_MSG_BITS;
|
||||
|
||||
/* set msg field */
|
||||
msg_ofs = 0;
|
||||
msg_bit = 0;
|
||||
memset(status, 0, sizeof(status));
|
||||
for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) {
|
||||
if (msg[msg_ofs] & BIT(msg_bit))
|
||||
set_bit(tbl_idx, status);
|
||||
|
||||
msg_bit++;
|
||||
if (msg_bit == BITS_PER_BYTE) {
|
||||
msg_bit = 0;
|
||||
msg_ofs++;
|
||||
}
|
||||
}
|
||||
|
||||
return hclge_update_mta_status_common(vport,
|
||||
status, idx * HCLGE_MTA_STATUS_MSG_BITS,
|
||||
tbl_cnt, is_end);
|
||||
}
|
||||
|
||||
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
|
||||
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
||||
bool gen_resp)
|
||||
{
|
||||
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u8 resp_len = 0;
|
||||
u8 resp_data;
|
||||
int status;
|
||||
|
||||
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
|
||||
@ -247,6 +287,22 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
|
||||
bool enable = mbx_req->msg[2];
|
||||
|
||||
status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
|
||||
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) {
|
||||
resp_data = hdev->mta_mac_sel_type;
|
||||
resp_len = sizeof(u8);
|
||||
gen_resp = true;
|
||||
status = 0;
|
||||
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) {
|
||||
/* mta status update msg format
|
||||
* msg[2.6 : 2.0] msg index
|
||||
* msg[2.7] msg is end
|
||||
* msg[15 : 3] mta status bits[103 : 0]
|
||||
*/
|
||||
bool is_end = (mbx_req->msg[2] & 0x80) ? true : false;
|
||||
|
||||
status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3],
|
||||
mbx_req->msg[2] & 0x7F,
|
||||
is_end);
|
||||
} else {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to set mcast mac addr, unknown subcode %d\n",
|
||||
@ -255,7 +311,8 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
|
||||
}
|
||||
|
||||
if (gen_resp)
|
||||
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
|
||||
hclge_gen_resp_to_vf(vport, mbx_req, status,
|
||||
&resp_data, resp_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -654,7 +654,8 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
|
||||
static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
|
||||
bool en_uc_pmc, bool en_mc_pmc)
|
||||
{
|
||||
struct hclge_mbx_vf_to_pf_cmd *req;
|
||||
struct hclgevf_desc desc;
|
||||
@ -664,7 +665,8 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
|
||||
|
||||
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
|
||||
req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
|
||||
req->msg[1] = en;
|
||||
req->msg[1] = en_uc_pmc ? 1 : 0;
|
||||
req->msg[2] = en_mc_pmc ? 1 : 0;
|
||||
|
||||
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (status)
|
||||
@ -674,11 +676,12 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
|
||||
return status;
|
||||
}
|
||||
|
||||
static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
|
||||
static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
|
||||
bool en_uc_pmc, bool en_mc_pmc)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
|
||||
hclgevf_cmd_set_promisc_mode(hdev, en);
|
||||
hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
|
||||
}
|
||||
|
||||
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
|
||||
@ -736,6 +739,126 @@ static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
|
||||
msg, 1, false, NULL, 0);
|
||||
}
|
||||
|
||||
static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
|
||||
{
|
||||
u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
|
||||
int ret;
|
||||
|
||||
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
|
||||
HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
|
||||
NULL, 0, true, &resp_msg, sizeof(u8));
|
||||
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Read mta type fail, ret=%d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Read mta type invalid, resp=%d.\n", resp_msg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdev->mta_mac_sel_type = resp_msg;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
|
||||
const u8 *addr)
|
||||
{
|
||||
u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
|
||||
u16 high_val = addr[1] | (addr[0] << 8);
|
||||
|
||||
return (high_val >> rsh) & 0xfff;
|
||||
}
|
||||
|
||||
static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
|
||||
unsigned long *status)
|
||||
{
|
||||
#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
|
||||
#define HCLGEVF_MTA_STATUS_MSG_BITS \
|
||||
(HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
|
||||
#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
|
||||
(HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
|
||||
u16 tbl_cnt;
|
||||
u16 tbl_idx;
|
||||
u8 msg_cnt;
|
||||
u8 msg_idx;
|
||||
int ret;
|
||||
|
||||
msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
|
||||
HCLGEVF_MTA_STATUS_MSG_BITS);
|
||||
tbl_idx = 0;
|
||||
msg_idx = 0;
|
||||
while (msg_cnt--) {
|
||||
u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
|
||||
u8 *p = &msg[1];
|
||||
u8 msg_ofs;
|
||||
u8 msg_bit;
|
||||
|
||||
memset(msg, 0, sizeof(msg));
|
||||
|
||||
/* set index field */
|
||||
msg[0] = 0x7F & msg_idx;
|
||||
|
||||
/* set end flag field */
|
||||
if (msg_cnt == 0) {
|
||||
msg[0] |= 0x80;
|
||||
tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
|
||||
} else {
|
||||
tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
|
||||
}
|
||||
|
||||
/* set status field */
|
||||
msg_ofs = 0;
|
||||
msg_bit = 0;
|
||||
while (tbl_cnt--) {
|
||||
if (test_bit(tbl_idx, status))
|
||||
p[msg_ofs] |= BIT(msg_bit);
|
||||
|
||||
tbl_idx++;
|
||||
|
||||
msg_bit++;
|
||||
if (msg_bit == BITS_PER_BYTE) {
|
||||
msg_bit = 0;
|
||||
msg_ofs++;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
|
||||
HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
|
||||
msg, sizeof(msg), false, NULL, 0);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
msg_idx++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hclgevf_update_mta_status(struct hnae3_handle *handle)
|
||||
{
|
||||
unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
struct net_device *netdev = hdev->nic.kinfo.netdev;
|
||||
struct netdev_hw_addr *ha;
|
||||
u16 tbl_idx;
|
||||
|
||||
/* clear status */
|
||||
memset(mta_status, 0, sizeof(mta_status));
|
||||
|
||||
/* update status from mc addr list */
|
||||
netdev_for_each_mc_addr(ha, netdev) {
|
||||
tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
|
||||
set_bit(tbl_idx, mta_status);
|
||||
}
|
||||
|
||||
return hclgevf_do_update_mta_status(hdev, mta_status);
|
||||
}
|
||||
|
||||
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
@ -1334,6 +1457,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
|
||||
hclgevf_reset_tqp_stats(handle);
|
||||
del_timer_sync(&hdev->service_timer);
|
||||
cancel_work_sync(&hdev->service_task);
|
||||
clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
|
||||
hclgevf_update_link_status(hdev, 0);
|
||||
}
|
||||
|
||||
@ -1665,12 +1789,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
||||
goto err_config;
|
||||
}
|
||||
|
||||
/* Initialize VF's MTA */
|
||||
hdev->accept_mta_mc = true;
|
||||
ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
|
||||
/* Initialize mta type for this VF */
|
||||
ret = hclgevf_cfg_func_mta_type(hdev);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed(%d) to set mta filter mode\n", ret);
|
||||
"failed(%d) to initialize MTA type\n", ret);
|
||||
goto err_config;
|
||||
}
|
||||
|
||||
@ -1825,6 +1948,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
|
||||
.rm_uc_addr = hclgevf_rm_uc_addr,
|
||||
.add_mc_addr = hclgevf_add_mc_addr,
|
||||
.rm_mc_addr = hclgevf_rm_mc_addr,
|
||||
.update_mta_status = hclgevf_update_mta_status,
|
||||
.get_stats = hclgevf_get_stats,
|
||||
.update_stats = hclgevf_update_stats,
|
||||
.get_strings = hclgevf_get_strings,
|
||||
|
@ -48,6 +48,9 @@
|
||||
#define HCLGEVF_RSS_CFG_TBL_NUM \
|
||||
(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
|
||||
|
||||
#define HCLGEVF_MTA_TBL_SIZE 4096
|
||||
#define HCLGEVF_MTA_TYPE_SEL_MAX 4
|
||||
|
||||
/* states of hclgevf device & tasks */
|
||||
enum hclgevf_states {
|
||||
/* device states */
|
||||
@ -152,6 +155,7 @@ struct hclgevf_dev {
|
||||
int *vector_irq;
|
||||
|
||||
bool accept_mta_mc; /* whether to accept mta filter multicast */
|
||||
u8 mta_mac_sel_type;
|
||||
bool mbx_event_pending;
|
||||
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
|
||||
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
|
||||
|
Loading…
x
Reference in New Issue
Block a user