Merge branch 'hns3-fixes-for-configuration-lost-problems'
Peng Li says: ==================== fixes for configuration lost problems This patchset refactors some functions and some bugs in order to fix the configuration loss problem when resetting and setting channel number. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
feaf751d8d
@ -265,6 +265,8 @@ struct hnae3_ae_dev {
|
||||
* Get tc size of handle
|
||||
* get_vector()
|
||||
* Get vector number and vector information
|
||||
* put_vector()
|
||||
* Put the vector in hdev
|
||||
* map_ring_to_vector()
|
||||
* Map rings to vector
|
||||
* unmap_ring_from_vector()
|
||||
@ -375,6 +377,7 @@ struct hnae3_ae_ops {
|
||||
|
||||
int (*get_vector)(struct hnae3_handle *handle, u16 vector_num,
|
||||
struct hnae3_vector_info *vector_info);
|
||||
int (*put_vector)(struct hnae3_handle *handle, int vector_num);
|
||||
int (*map_ring_to_vector)(struct hnae3_handle *handle,
|
||||
int vector_num,
|
||||
struct hnae3_ring_chain_node *vr_chain);
|
||||
|
@ -168,8 +168,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
|
||||
*/
|
||||
|
||||
if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
|
||||
!tqp_vector->rx_group.gl_adapt_enable)
|
||||
if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
|
||||
!tqp_vector->rx_group.coal.gl_adapt_enable)
|
||||
/* According to the hardware, the range of rl_reg is
|
||||
* 0-59 and the unit is 4.
|
||||
*/
|
||||
@ -205,23 +205,29 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
*/
|
||||
|
||||
/* Default: enable interrupt coalescing self-adaptive and GL */
|
||||
tqp_vector->tx_group.gl_adapt_enable = 1;
|
||||
tqp_vector->rx_group.gl_adapt_enable = 1;
|
||||
tqp_vector->tx_group.coal.gl_adapt_enable = 1;
|
||||
tqp_vector->rx_group.coal.gl_adapt_enable = 1;
|
||||
|
||||
tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
|
||||
tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
|
||||
|
||||
hns3_set_vector_coalesce_tx_gl(tqp_vector,
|
||||
tqp_vector->tx_group.int_gl);
|
||||
hns3_set_vector_coalesce_rx_gl(tqp_vector,
|
||||
tqp_vector->rx_group.int_gl);
|
||||
tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
|
||||
tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
|
||||
|
||||
/* Default: disable RL */
|
||||
h->kinfo.int_rl_setting = 0;
|
||||
hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
|
||||
|
||||
tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
|
||||
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
|
||||
tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
|
||||
tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
|
||||
}
|
||||
|
||||
static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
struct hns3_nic_priv *priv)
|
||||
{
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
|
||||
hns3_set_vector_coalesce_tx_gl(tqp_vector,
|
||||
tqp_vector->tx_group.coal.int_gl);
|
||||
hns3_set_vector_coalesce_rx_gl(tqp_vector,
|
||||
tqp_vector->rx_group.coal.int_gl);
|
||||
hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
|
||||
}
|
||||
|
||||
static int hns3_nic_set_real_num_queue(struct net_device *netdev)
|
||||
@ -2387,12 +2393,12 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
|
||||
u16 new_int_gl;
|
||||
int usecs;
|
||||
|
||||
if (!ring_group->int_gl)
|
||||
if (!ring_group->coal.int_gl)
|
||||
return false;
|
||||
|
||||
if (ring_group->total_packets == 0) {
|
||||
ring_group->int_gl = HNS3_INT_GL_50K;
|
||||
ring_group->flow_level = HNS3_FLOW_LOW;
|
||||
ring_group->coal.int_gl = HNS3_INT_GL_50K;
|
||||
ring_group->coal.flow_level = HNS3_FLOW_LOW;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2402,10 +2408,10 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
|
||||
* 20-1249MB/s high (18000 ints/s)
|
||||
* > 40000pps ultra (8000 ints/s)
|
||||
*/
|
||||
new_flow_level = ring_group->flow_level;
|
||||
new_int_gl = ring_group->int_gl;
|
||||
new_flow_level = ring_group->coal.flow_level;
|
||||
new_int_gl = ring_group->coal.int_gl;
|
||||
tqp_vector = ring_group->ring->tqp_vector;
|
||||
usecs = (ring_group->int_gl << 1);
|
||||
usecs = (ring_group->coal.int_gl << 1);
|
||||
bytes_per_usecs = ring_group->total_bytes / usecs;
|
||||
/* 1000000 microseconds */
|
||||
packets_per_secs = ring_group->total_packets * 1000000 / usecs;
|
||||
@ -2452,9 +2458,9 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
|
||||
|
||||
ring_group->total_bytes = 0;
|
||||
ring_group->total_packets = 0;
|
||||
ring_group->flow_level = new_flow_level;
|
||||
if (new_int_gl != ring_group->int_gl) {
|
||||
ring_group->int_gl = new_int_gl;
|
||||
ring_group->coal.flow_level = new_flow_level;
|
||||
if (new_int_gl != ring_group->coal.int_gl) {
|
||||
ring_group->coal.int_gl = new_int_gl;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -2466,18 +2472,18 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
|
||||
struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
|
||||
bool rx_update, tx_update;
|
||||
|
||||
if (rx_group->gl_adapt_enable) {
|
||||
if (rx_group->coal.gl_adapt_enable) {
|
||||
rx_update = hns3_get_new_int_gl(rx_group);
|
||||
if (rx_update)
|
||||
hns3_set_vector_coalesce_rx_gl(tqp_vector,
|
||||
rx_group->int_gl);
|
||||
rx_group->coal.int_gl);
|
||||
}
|
||||
|
||||
if (tx_group->gl_adapt_enable) {
|
||||
if (tx_group->coal.gl_adapt_enable) {
|
||||
tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
|
||||
if (tx_update)
|
||||
hns3_set_vector_coalesce_tx_gl(tqp_vector,
|
||||
tx_group->int_gl);
|
||||
tx_group->coal.int_gl);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2623,6 +2629,65 @@ static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
|
||||
static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
|
||||
{
|
||||
struct hnae3_ring_chain_node vector_ring_chain;
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
struct hns3_enet_tqp_vector *tqp_vector;
|
||||
int ret = 0;
|
||||
u16 i;
|
||||
|
||||
for (i = 0; i < priv->vector_num; i++) {
|
||||
tqp_vector = &priv->tqp_vector[i];
|
||||
hns3_vector_gl_rl_init_hw(tqp_vector, priv);
|
||||
tqp_vector->num_tqps = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
||||
u16 vector_i = i % priv->vector_num;
|
||||
u16 tqp_num = h->kinfo.num_tqps;
|
||||
|
||||
tqp_vector = &priv->tqp_vector[vector_i];
|
||||
|
||||
hns3_add_ring_to_group(&tqp_vector->tx_group,
|
||||
priv->ring_data[i].ring);
|
||||
|
||||
hns3_add_ring_to_group(&tqp_vector->rx_group,
|
||||
priv->ring_data[i + tqp_num].ring);
|
||||
|
||||
priv->ring_data[i].ring->tqp_vector = tqp_vector;
|
||||
priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
|
||||
tqp_vector->num_tqps++;
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->vector_num; i++) {
|
||||
tqp_vector = &priv->tqp_vector[i];
|
||||
|
||||
tqp_vector->rx_group.total_bytes = 0;
|
||||
tqp_vector->rx_group.total_packets = 0;
|
||||
tqp_vector->tx_group.total_bytes = 0;
|
||||
tqp_vector->tx_group.total_packets = 0;
|
||||
tqp_vector->handle = h;
|
||||
|
||||
ret = hns3_get_vector_ring_chain(tqp_vector,
|
||||
&vector_ring_chain);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = h->ae_algo->ops->map_ring_to_vector(h,
|
||||
tqp_vector->vector_irq, &vector_ring_chain);
|
||||
|
||||
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
netif_napi_add(priv->netdev, &tqp_vector->napi,
|
||||
hns3_nic_common_poll, NAPI_POLL_WEIGHT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
|
||||
{
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
struct hns3_enet_tqp_vector *tqp_vector;
|
||||
struct hnae3_vector_info *vector;
|
||||
@ -2646,53 +2711,17 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
|
||||
priv->tqp_vector = (struct hns3_enet_tqp_vector *)
|
||||
devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
|
||||
GFP_KERNEL);
|
||||
if (!priv->tqp_vector)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < tqp_num; i++) {
|
||||
u16 vector_i = i % vector_num;
|
||||
|
||||
tqp_vector = &priv->tqp_vector[vector_i];
|
||||
|
||||
hns3_add_ring_to_group(&tqp_vector->tx_group,
|
||||
priv->ring_data[i].ring);
|
||||
|
||||
hns3_add_ring_to_group(&tqp_vector->rx_group,
|
||||
priv->ring_data[i + tqp_num].ring);
|
||||
|
||||
tqp_vector->idx = vector_i;
|
||||
tqp_vector->mask_addr = vector[vector_i].io_addr;
|
||||
tqp_vector->vector_irq = vector[vector_i].vector;
|
||||
tqp_vector->num_tqps++;
|
||||
|
||||
priv->ring_data[i].ring->tqp_vector = tqp_vector;
|
||||
priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
|
||||
if (!priv->tqp_vector) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < vector_num; i++) {
|
||||
for (i = 0; i < priv->vector_num; i++) {
|
||||
tqp_vector = &priv->tqp_vector[i];
|
||||
|
||||
tqp_vector->rx_group.total_bytes = 0;
|
||||
tqp_vector->rx_group.total_packets = 0;
|
||||
tqp_vector->tx_group.total_bytes = 0;
|
||||
tqp_vector->tx_group.total_packets = 0;
|
||||
tqp_vector->idx = i;
|
||||
tqp_vector->mask_addr = vector[i].io_addr;
|
||||
tqp_vector->vector_irq = vector[i].vector;
|
||||
hns3_vector_gl_rl_init(tqp_vector, priv);
|
||||
tqp_vector->handle = h;
|
||||
|
||||
ret = hns3_get_vector_ring_chain(tqp_vector,
|
||||
&vector_ring_chain);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = h->ae_algo->ops->map_ring_to_vector(h,
|
||||
tqp_vector->vector_irq, &vector_ring_chain);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
|
||||
|
||||
netif_napi_add(priv->netdev, &tqp_vector->napi,
|
||||
hns3_nic_common_poll, NAPI_POLL_WEIGHT);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -2700,12 +2729,17 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
|
||||
{
|
||||
group->ring = NULL;
|
||||
group->count = 0;
|
||||
}
|
||||
|
||||
static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
|
||||
{
|
||||
struct hnae3_ring_chain_node vector_ring_chain;
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
struct hns3_enet_tqp_vector *tqp_vector;
|
||||
struct pci_dev *pdev = h->pdev;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < priv->vector_num; i++) {
|
||||
@ -2721,6 +2755,10 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
|
||||
|
||||
if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
|
||||
@ -2732,12 +2770,30 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
|
||||
}
|
||||
|
||||
priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
|
||||
|
||||
hns3_clear_ring_group(&tqp_vector->rx_group);
|
||||
hns3_clear_ring_group(&tqp_vector->tx_group);
|
||||
netif_napi_del(&priv->tqp_vector[i].napi);
|
||||
}
|
||||
|
||||
devm_kfree(&pdev->dev, priv->tqp_vector);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
|
||||
{
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
struct pci_dev *pdev = h->pdev;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < priv->vector_num; i++) {
|
||||
struct hns3_enet_tqp_vector *tqp_vector;
|
||||
|
||||
tqp_vector = &priv->tqp_vector[i];
|
||||
ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
devm_kfree(&pdev->dev, priv->tqp_vector);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2967,13 +3023,8 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
|
||||
h->ae_algo->ops->reset_queue(h, i);
|
||||
|
||||
hns3_fini_ring(priv->ring_data[i].ring);
|
||||
devm_kfree(priv->dev, priv->ring_data[i].ring);
|
||||
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
|
||||
devm_kfree(priv->dev,
|
||||
priv->ring_data[i + h->kinfo.num_tqps].ring);
|
||||
}
|
||||
devm_kfree(priv->dev, priv->ring_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3058,6 +3109,12 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||
goto out_get_ring_cfg;
|
||||
}
|
||||
|
||||
ret = hns3_nic_alloc_vector_data(priv);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
goto out_alloc_vector_data;
|
||||
}
|
||||
|
||||
ret = hns3_nic_init_vector_data(priv);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
@ -3086,8 +3143,10 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||
out_reg_netdev_fail:
|
||||
out_init_ring_data:
|
||||
(void)hns3_nic_uninit_vector_data(priv);
|
||||
priv->ring_data = NULL;
|
||||
out_init_vector_data:
|
||||
hns3_nic_dealloc_vector_data(priv);
|
||||
out_alloc_vector_data:
|
||||
priv->ring_data = NULL;
|
||||
out_get_ring_cfg:
|
||||
priv->ae_handle = NULL;
|
||||
free_netdev(netdev);
|
||||
@ -3107,10 +3166,16 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
|
||||
if (ret)
|
||||
netdev_err(netdev, "uninit vector error\n");
|
||||
|
||||
ret = hns3_nic_dealloc_vector_data(priv);
|
||||
if (ret)
|
||||
netdev_err(netdev, "dealloc vector error\n");
|
||||
|
||||
ret = hns3_uninit_all_ring(priv);
|
||||
if (ret)
|
||||
netdev_err(netdev, "uninit ring error\n");
|
||||
|
||||
hns3_put_ring_config(priv);
|
||||
|
||||
priv->ring_data = NULL;
|
||||
|
||||
free_netdev(netdev);
|
||||
@ -3316,6 +3381,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
||||
if (ret)
|
||||
netdev_err(netdev, "uninit ring error\n");
|
||||
|
||||
hns3_put_ring_config(priv);
|
||||
|
||||
priv->ring_data = NULL;
|
||||
|
||||
return ret;
|
||||
@ -3346,7 +3413,24 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
|
||||
static void hns3_restore_coal(struct hns3_nic_priv *priv,
|
||||
struct hns3_enet_coalesce *tx,
|
||||
struct hns3_enet_coalesce *rx)
|
||||
{
|
||||
u16 vector_num = priv->vector_num;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vector_num; i++) {
|
||||
memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
|
||||
sizeof(struct hns3_enet_coalesce));
|
||||
memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
|
||||
sizeof(struct hns3_enet_coalesce));
|
||||
}
|
||||
}
|
||||
|
||||
static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
|
||||
struct hns3_enet_coalesce *tx,
|
||||
struct hns3_enet_coalesce *rx)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||
@ -3360,6 +3444,12 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hns3_nic_alloc_vector_data(priv);
|
||||
if (ret)
|
||||
goto err_alloc_vector;
|
||||
|
||||
hns3_restore_coal(priv, tx, rx);
|
||||
|
||||
ret = hns3_nic_init_vector_data(priv);
|
||||
if (ret)
|
||||
goto err_uninit_vector;
|
||||
@ -3374,6 +3464,8 @@ err_put_ring:
|
||||
hns3_put_ring_config(priv);
|
||||
err_uninit_vector:
|
||||
hns3_nic_uninit_vector_data(priv);
|
||||
err_alloc_vector:
|
||||
hns3_nic_dealloc_vector_data(priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3388,6 +3480,7 @@ int hns3_set_channels(struct net_device *netdev,
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||
struct hnae3_knic_private_info *kinfo = &h->kinfo;
|
||||
struct hns3_enet_coalesce tx_coal, rx_coal;
|
||||
bool if_running = netif_running(netdev);
|
||||
u32 new_tqp_num = ch->combined_count;
|
||||
u16 org_tqp_num;
|
||||
@ -3421,12 +3514,26 @@ int hns3_set_channels(struct net_device *netdev,
|
||||
goto open_netdev;
|
||||
}
|
||||
|
||||
/* Changing the tqp num may also change the vector num,
|
||||
* ethtool only support setting and querying one coal
|
||||
* configuation for now, so save the vector 0' coal
|
||||
* configuation here in order to restore it.
|
||||
*/
|
||||
memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
|
||||
sizeof(struct hns3_enet_coalesce));
|
||||
memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
|
||||
sizeof(struct hns3_enet_coalesce));
|
||||
|
||||
hns3_nic_dealloc_vector_data(priv);
|
||||
|
||||
hns3_uninit_all_ring(priv);
|
||||
hns3_put_ring_config(priv);
|
||||
|
||||
org_tqp_num = h->kinfo.num_tqps;
|
||||
ret = hns3_modify_tqp_num(netdev, new_tqp_num);
|
||||
ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
|
||||
if (ret) {
|
||||
ret = hns3_modify_tqp_num(netdev, org_tqp_num);
|
||||
ret = hns3_modify_tqp_num(netdev, org_tqp_num,
|
||||
&tx_coal, &rx_coal);
|
||||
if (ret) {
|
||||
/* If revert to old tqp failed, fatal error occurred */
|
||||
dev_err(&netdev->dev,
|
||||
|
@ -460,15 +460,19 @@ enum hns3_link_mode_bits {
|
||||
#define HNS3_INT_RL_MAX 0x00EC
|
||||
#define HNS3_INT_RL_ENABLE_MASK 0x40
|
||||
|
||||
struct hns3_enet_coalesce {
|
||||
u16 int_gl;
|
||||
u8 gl_adapt_enable;
|
||||
enum hns3_flow_level_range flow_level;
|
||||
};
|
||||
|
||||
struct hns3_enet_ring_group {
|
||||
/* array of pointers to rings */
|
||||
struct hns3_enet_ring *ring;
|
||||
u64 total_bytes; /* total bytes processed this group */
|
||||
u64 total_packets; /* total packets processed this group */
|
||||
u16 count;
|
||||
enum hns3_flow_level_range flow_level;
|
||||
u16 int_gl;
|
||||
u8 gl_adapt_enable;
|
||||
struct hns3_enet_coalesce coal;
|
||||
};
|
||||
|
||||
struct hns3_enet_tqp_vector {
|
||||
|
@ -905,11 +905,13 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
|
||||
tx_vector = priv->ring_data[queue].ring->tqp_vector;
|
||||
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
|
||||
|
||||
cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable;
|
||||
cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable;
|
||||
cmd->use_adaptive_tx_coalesce =
|
||||
tx_vector->tx_group.coal.gl_adapt_enable;
|
||||
cmd->use_adaptive_rx_coalesce =
|
||||
rx_vector->rx_group.coal.gl_adapt_enable;
|
||||
|
||||
cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl;
|
||||
cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl;
|
||||
cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
|
||||
cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
|
||||
|
||||
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
|
||||
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
|
||||
@ -1029,14 +1031,18 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
|
||||
tx_vector = priv->ring_data[queue].ring->tqp_vector;
|
||||
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
|
||||
|
||||
tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce;
|
||||
rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce;
|
||||
tx_vector->tx_group.coal.gl_adapt_enable =
|
||||
cmd->use_adaptive_tx_coalesce;
|
||||
rx_vector->rx_group.coal.gl_adapt_enable =
|
||||
cmd->use_adaptive_rx_coalesce;
|
||||
|
||||
tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs;
|
||||
rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs;
|
||||
tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs;
|
||||
rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs;
|
||||
|
||||
hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl);
|
||||
hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl);
|
||||
hns3_set_vector_coalesce_tx_gl(tx_vector,
|
||||
tx_vector->tx_group.coal.int_gl);
|
||||
hns3_set_vector_coalesce_rx_gl(rx_vector,
|
||||
rx_vector->rx_group.coal.int_gl);
|
||||
|
||||
hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
|
||||
hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
|
||||
|
@ -144,6 +144,8 @@ static int hclge_map_update(struct hnae3_handle *h)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hclge_rss_indir_init_cfg(hdev);
|
||||
|
||||
return hclge_rss_init_hw(hdev);
|
||||
}
|
||||
|
||||
|
@ -2969,6 +2969,24 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int hclge_put_vector(struct hnae3_handle *handle, int vector)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int vector_id;
|
||||
|
||||
vector_id = hclge_get_vector_index(hdev, vector);
|
||||
if (vector_id < 0) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get vector index fail. vector_id =%d\n", vector_id);
|
||||
return vector_id;
|
||||
}
|
||||
|
||||
hclge_free_vector(hdev, vector_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
|
||||
{
|
||||
return HCLGE_RSS_KEY_SIZE;
|
||||
@ -2979,31 +2997,6 @@ static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
|
||||
return HCLGE_RSS_IND_TBL_SIZE;
|
||||
}
|
||||
|
||||
static int hclge_get_rss_algo(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_rss_config_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
int rss_hash_algo;
|
||||
int ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get link status error, status =%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
req = (struct hclge_rss_config_cmd *)desc.data;
|
||||
rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
|
||||
|
||||
if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
|
||||
return ETH_RSS_HASH_TOP;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
|
||||
const u8 hfunc, const u8 *key)
|
||||
{
|
||||
@ -3042,7 +3035,7 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
|
||||
static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
|
||||
{
|
||||
struct hclge_rss_indirection_table_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
@ -3116,14 +3109,16 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
|
||||
|
||||
req = (struct hclge_rss_input_tuple_cmd *)desc.data;
|
||||
req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
|
||||
req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
|
||||
req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
|
||||
/* Get the tuple cfg from pf */
|
||||
req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
|
||||
req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
|
||||
req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
|
||||
req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
|
||||
req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
|
||||
req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
|
||||
req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
|
||||
req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
@ -3138,12 +3133,11 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
|
||||
u8 *key, u8 *hfunc)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int i;
|
||||
|
||||
/* Get hash algorithm */
|
||||
if (hfunc)
|
||||
*hfunc = hclge_get_rss_algo(hdev);
|
||||
*hfunc = vport->rss_algo;
|
||||
|
||||
/* Get the RSS Key required by the user */
|
||||
if (key)
|
||||
@ -3167,8 +3161,6 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
|
||||
|
||||
/* Set the RSS Hash Key if specififed by the user */
|
||||
if (key) {
|
||||
/* Update the shadow RSS key with user specified qids */
|
||||
memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
|
||||
|
||||
if (hfunc == ETH_RSS_HASH_TOP ||
|
||||
hfunc == ETH_RSS_HASH_NO_CHANGE)
|
||||
@ -3178,6 +3170,10 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
|
||||
ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Update the shadow RSS key with user specified qids */
|
||||
memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
|
||||
vport->rss_algo = hash_algo;
|
||||
}
|
||||
|
||||
/* Update the shadow RSS table with user specified qids */
|
||||
@ -3185,8 +3181,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
|
||||
vport->rss_indirection_tbl[i] = indir[i];
|
||||
|
||||
/* Update the hardware */
|
||||
ret = hclge_set_rss_indir_table(hdev, indir);
|
||||
return ret;
|
||||
return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
|
||||
}
|
||||
|
||||
static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
|
||||
@ -3229,15 +3224,16 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
|
||||
return -EINVAL;
|
||||
|
||||
req = (struct hclge_rss_input_tuple_cmd *)desc.data;
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Read rss tuple fail, status = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
|
||||
|
||||
hclge_cmd_reuse_desc(&desc, false);
|
||||
req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
|
||||
req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
|
||||
req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
|
||||
req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
|
||||
req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
|
||||
req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
|
||||
req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
|
||||
req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
|
||||
|
||||
tuple_sets = hclge_get_rss_hash_bits(nfc);
|
||||
switch (nfc->flow_type) {
|
||||
@ -3274,52 +3270,49 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
|
||||
}
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Set rss tuple fail, status = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
|
||||
vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
|
||||
vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
|
||||
vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
|
||||
vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
|
||||
vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
|
||||
vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
|
||||
vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
|
||||
struct ethtool_rxnfc *nfc)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_rss_input_tuple_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
u8 tuple_sets;
|
||||
int ret;
|
||||
|
||||
nfc->data = 0;
|
||||
|
||||
req = (struct hclge_rss_input_tuple_cmd *)desc.data;
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Read rss tuple fail, status = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (nfc->flow_type) {
|
||||
case TCP_V4_FLOW:
|
||||
tuple_sets = req->ipv4_tcp_en;
|
||||
tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
|
||||
break;
|
||||
case UDP_V4_FLOW:
|
||||
tuple_sets = req->ipv4_udp_en;
|
||||
tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
|
||||
break;
|
||||
case TCP_V6_FLOW:
|
||||
tuple_sets = req->ipv6_tcp_en;
|
||||
tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
|
||||
break;
|
||||
case UDP_V6_FLOW:
|
||||
tuple_sets = req->ipv6_udp_en;
|
||||
tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
|
||||
break;
|
||||
case SCTP_V4_FLOW:
|
||||
tuple_sets = req->ipv4_sctp_en;
|
||||
tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
|
||||
break;
|
||||
case SCTP_V6_FLOW:
|
||||
tuple_sets = req->ipv6_sctp_en;
|
||||
tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
|
||||
break;
|
||||
case IPV4_FLOW:
|
||||
case IPV6_FLOW:
|
||||
@ -3354,50 +3347,28 @@ static int hclge_get_tc_size(struct hnae3_handle *handle)
|
||||
|
||||
int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
{
|
||||
const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
|
||||
struct hclge_vport *vport = hdev->vport;
|
||||
u8 *rss_indir = vport[0].rss_indirection_tbl;
|
||||
u16 rss_size = vport[0].alloc_rss_size;
|
||||
u8 *key = vport[0].rss_hash_key;
|
||||
u8 hfunc = vport[0].rss_algo;
|
||||
u16 tc_offset[HCLGE_MAX_TC_NUM];
|
||||
u8 rss_key[HCLGE_RSS_KEY_SIZE];
|
||||
u16 tc_valid[HCLGE_MAX_TC_NUM];
|
||||
u16 tc_size[HCLGE_MAX_TC_NUM];
|
||||
u32 *rss_indir = NULL;
|
||||
u16 rss_size = 0, roundup_size;
|
||||
const u8 *key;
|
||||
int i, ret, j;
|
||||
u16 roundup_size;
|
||||
int i, ret;
|
||||
|
||||
rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
|
||||
if (!rss_indir)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Get default RSS key */
|
||||
netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
|
||||
|
||||
/* Initialize RSS indirect table for each vport */
|
||||
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
|
||||
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
|
||||
vport[j].rss_indirection_tbl[i] =
|
||||
i % vport[j].alloc_rss_size;
|
||||
|
||||
/* vport 0 is for PF */
|
||||
if (j != 0)
|
||||
continue;
|
||||
|
||||
rss_size = vport[j].alloc_rss_size;
|
||||
rss_indir[i] = vport[j].rss_indirection_tbl[i];
|
||||
}
|
||||
}
|
||||
ret = hclge_set_rss_indir_table(hdev, rss_indir);
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
key = rss_key;
|
||||
ret = hclge_set_rss_algo_key(hdev, hfunc, key);
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
ret = hclge_set_rss_input_tuple(hdev);
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
/* Each TC have the same queue size, and tc_size set to hardware is
|
||||
* the log2 of roundup power of two of rss_size, the acutal queue
|
||||
@ -3407,8 +3378,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Configure rss tc size failed, invalid TC_SIZE = %d\n",
|
||||
rss_size);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
roundup_size = roundup_pow_of_two(rss_size);
|
||||
@ -3425,12 +3395,50 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
tc_offset[i] = rss_size * i;
|
||||
}
|
||||
|
||||
ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
|
||||
return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
|
||||
}
|
||||
|
||||
err:
|
||||
kfree(rss_indir);
|
||||
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_vport *vport = hdev->vport;
|
||||
int i, j;
|
||||
|
||||
return ret;
|
||||
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
|
||||
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
|
||||
vport[j].rss_indirection_tbl[i] =
|
||||
i % vport[j].alloc_rss_size;
|
||||
}
|
||||
}
|
||||
|
||||
static void hclge_rss_init_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_vport *vport = hdev->vport;
|
||||
int i;
|
||||
|
||||
netdev_rss_key_fill(vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
|
||||
|
||||
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
|
||||
vport[i].rss_tuple_sets.ipv4_tcp_en =
|
||||
HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
vport[i].rss_tuple_sets.ipv4_udp_en =
|
||||
HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
vport[i].rss_tuple_sets.ipv4_sctp_en =
|
||||
HCLGE_RSS_INPUT_TUPLE_SCTP;
|
||||
vport[i].rss_tuple_sets.ipv4_fragment_en =
|
||||
HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
vport[i].rss_tuple_sets.ipv6_tcp_en =
|
||||
HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
vport[i].rss_tuple_sets.ipv6_udp_en =
|
||||
HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
vport[i].rss_tuple_sets.ipv6_sctp_en =
|
||||
HCLGE_RSS_INPUT_TUPLE_SCTP;
|
||||
vport[i].rss_tuple_sets.ipv6_fragment_en =
|
||||
HCLGE_RSS_INPUT_TUPLE_OTHER;
|
||||
|
||||
vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
|
||||
}
|
||||
|
||||
hclge_rss_indir_init_cfg(hdev);
|
||||
}
|
||||
|
||||
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
|
||||
@ -3533,18 +3541,13 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
|
||||
}
|
||||
|
||||
ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
dev_err(&handle->pdev->dev,
|
||||
"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
|
||||
vector_id,
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Free this MSIX or MSI vector */
|
||||
hclge_free_vector(hdev, vector_id);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
|
||||
@ -5398,6 +5401,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
hclge_rss_init_cfg(hdev);
|
||||
ret = hclge_rss_init_hw(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
|
||||
@ -5502,9 +5506,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_tm_schd_init(hdev);
|
||||
ret = hclge_tm_init_hw(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
|
||||
dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -6003,6 +6007,7 @@ static const struct hnae3_ae_ops hclge_ops = {
|
||||
.map_ring_to_vector = hclge_map_ring_to_vector,
|
||||
.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
|
||||
.get_vector = hclge_get_vector,
|
||||
.put_vector = hclge_put_vector,
|
||||
.set_promisc_mode = hclge_set_promisc_mode,
|
||||
.set_loopback = hclge_set_loopback,
|
||||
.start = hclge_ae_start,
|
||||
|
@ -573,12 +573,27 @@ struct hclge_rx_vtag_cfg {
|
||||
bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
|
||||
};
|
||||
|
||||
struct hclge_rss_tuple_cfg {
|
||||
u8 ipv4_tcp_en;
|
||||
u8 ipv4_udp_en;
|
||||
u8 ipv4_sctp_en;
|
||||
u8 ipv4_fragment_en;
|
||||
u8 ipv6_tcp_en;
|
||||
u8 ipv6_udp_en;
|
||||
u8 ipv6_sctp_en;
|
||||
u8 ipv6_fragment_en;
|
||||
};
|
||||
|
||||
struct hclge_vport {
|
||||
u16 alloc_tqps; /* Allocated Tx/Rx queues */
|
||||
|
||||
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
|
||||
/* User configured lookup table entries */
|
||||
u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
|
||||
int rss_algo; /* User configured hash algorithm */
|
||||
/* User configured rss tuple sets */
|
||||
struct hclge_rss_tuple_cfg rss_tuple_sets;
|
||||
|
||||
u16 alloc_rss_size;
|
||||
|
||||
u16 qs_offset;
|
||||
@ -627,6 +642,7 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
|
||||
|
||||
int hclge_buffer_alloc(struct hclge_dev *hdev);
|
||||
int hclge_rss_init_hw(struct hclge_dev *hdev);
|
||||
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
|
||||
|
||||
void hclge_mbx_handler(struct hclge_dev *hdev);
|
||||
void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
|
||||
|
@ -627,13 +627,18 @@ static int hclgevf_unmap_ring_from_vector(
|
||||
}
|
||||
|
||||
ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
dev_err(&handle->pdev->dev,
|
||||
"Unmap ring from vector fail. vector=%d, ret =%d\n",
|
||||
vector_id,
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
|
||||
hclgevf_free_vector(hdev, vector);
|
||||
|
||||
@ -1466,6 +1471,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
|
||||
.map_ring_to_vector = hclgevf_map_ring_to_vector,
|
||||
.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
|
||||
.get_vector = hclgevf_get_vector,
|
||||
.put_vector = hclgevf_put_vector,
|
||||
.reset_queue = hclgevf_reset_tqp,
|
||||
.set_promisc_mode = hclgevf_set_promisc_mode,
|
||||
.get_mac_addr = hclgevf_get_mac_addr,
|
||||
|
Loading…
x
Reference in New Issue
Block a user