Merge branch 'ibmvnic-Fix-driver-reset-and-DMA-bugs'
Thomas Falcon says: ==================== ibmvnic: Fix driver reset and DMA bugs This patch series introduces some fixes to the driver reset routines and a patch that fixes mistakes caught by the kernel DMA debugger. The reset fixes include a fix to reset TX queue counters properly after a reset as well as updates to driver reset error-handling code. It also provides updates to the reset handling routine for redundant backing VF failover and partition migration cases. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -118,6 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
|
|||||||
static int ibmvnic_init(struct ibmvnic_adapter *);
|
static int ibmvnic_init(struct ibmvnic_adapter *);
|
||||||
static void release_crq_queue(struct ibmvnic_adapter *);
|
static void release_crq_queue(struct ibmvnic_adapter *);
|
||||||
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
|
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
|
||||||
|
static int init_crq_queue(struct ibmvnic_adapter *adapter);
|
||||||
|
|
||||||
struct ibmvnic_stat {
|
struct ibmvnic_stat {
|
||||||
char name[ETH_GSTRING_LEN];
|
char name[ETH_GSTRING_LEN];
|
||||||
@@ -320,18 +321,16 @@ failure:
|
|||||||
dev_info(dev, "replenish pools failure\n");
|
dev_info(dev, "replenish pools failure\n");
|
||||||
pool->free_map[pool->next_free] = index;
|
pool->free_map[pool->next_free] = index;
|
||||||
pool->rx_buff[index].skb = NULL;
|
pool->rx_buff[index].skb = NULL;
|
||||||
if (!dma_mapping_error(dev, dma_addr))
|
|
||||||
dma_unmap_single(dev, dma_addr, pool->buff_size,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
adapter->replenish_add_buff_failure++;
|
adapter->replenish_add_buff_failure++;
|
||||||
atomic_add(buffers_added, &pool->available);
|
atomic_add(buffers_added, &pool->available);
|
||||||
|
|
||||||
if (lpar_rc == H_CLOSED) {
|
if (lpar_rc == H_CLOSED || adapter->failover_pending) {
|
||||||
/* Disable buffer pool replenishment and report carrier off if
|
/* Disable buffer pool replenishment and report carrier off if
|
||||||
* queue is closed. Firmware guarantees that a signal will
|
* queue is closed or pending failover.
|
||||||
* be sent to the driver, triggering a reset.
|
* Firmware guarantees that a signal will be sent to the
|
||||||
|
* driver, triggering a reset.
|
||||||
*/
|
*/
|
||||||
deactivate_rx_pools(adapter);
|
deactivate_rx_pools(adapter);
|
||||||
netif_carrier_off(adapter->netdev);
|
netif_carrier_off(adapter->netdev);
|
||||||
@@ -1071,6 +1070,14 @@ static int ibmvnic_open(struct net_device *netdev)
|
|||||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
/* If device failover is pending, just set device state and return.
|
||||||
|
* Device operation will be handled by reset routine.
|
||||||
|
*/
|
||||||
|
if (adapter->failover_pending) {
|
||||||
|
adapter->state = VNIC_OPEN;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&adapter->reset_lock);
|
mutex_lock(&adapter->reset_lock);
|
||||||
|
|
||||||
if (adapter->state != VNIC_CLOSED) {
|
if (adapter->state != VNIC_CLOSED) {
|
||||||
@@ -1218,7 +1225,6 @@ static int __ibmvnic_close(struct net_device *netdev)
|
|||||||
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
|
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
ibmvnic_cleanup(netdev);
|
|
||||||
adapter->state = VNIC_CLOSED;
|
adapter->state = VNIC_CLOSED;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1228,8 +1234,17 @@ static int ibmvnic_close(struct net_device *netdev)
|
|||||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
/* If device failover is pending, just set device state and return.
|
||||||
|
* Device operation will be handled by reset routine.
|
||||||
|
*/
|
||||||
|
if (adapter->failover_pending) {
|
||||||
|
adapter->state = VNIC_CLOSED;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&adapter->reset_lock);
|
mutex_lock(&adapter->reset_lock);
|
||||||
rc = __ibmvnic_close(netdev);
|
rc = __ibmvnic_close(netdev);
|
||||||
|
ibmvnic_cleanup(netdev);
|
||||||
mutex_unlock(&adapter->reset_lock);
|
mutex_unlock(&adapter->reset_lock);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
@@ -1562,8 +1577,9 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
tx_buff->skb = NULL;
|
tx_buff->skb = NULL;
|
||||||
|
|
||||||
if (lpar_rc == H_CLOSED) {
|
if (lpar_rc == H_CLOSED || adapter->failover_pending) {
|
||||||
/* Disable TX and report carrier off if queue is closed.
|
/* Disable TX and report carrier off if queue is closed
|
||||||
|
* or pending failover.
|
||||||
* Firmware guarantees that a signal will be sent to the
|
* Firmware guarantees that a signal will be sent to the
|
||||||
* driver, triggering a reset or some other action.
|
* driver, triggering a reset or some other action.
|
||||||
*/
|
*/
|
||||||
@@ -1711,14 +1727,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||||||
old_num_rx_queues = adapter->req_rx_queues;
|
old_num_rx_queues = adapter->req_rx_queues;
|
||||||
old_num_tx_queues = adapter->req_tx_queues;
|
old_num_tx_queues = adapter->req_tx_queues;
|
||||||
|
|
||||||
if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
|
ibmvnic_cleanup(netdev);
|
||||||
rc = ibmvnic_reenable_crq_queue(adapter);
|
|
||||||
if (rc)
|
if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
|
||||||
return 0;
|
adapter->reset_reason != VNIC_RESET_FAILOVER) {
|
||||||
ibmvnic_cleanup(netdev);
|
|
||||||
} else if (rwi->reset_reason == VNIC_RESET_FAILOVER) {
|
|
||||||
ibmvnic_cleanup(netdev);
|
|
||||||
} else {
|
|
||||||
rc = __ibmvnic_close(netdev);
|
rc = __ibmvnic_close(netdev);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
@@ -1737,6 +1749,23 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||||||
*/
|
*/
|
||||||
adapter->state = VNIC_PROBED;
|
adapter->state = VNIC_PROBED;
|
||||||
|
|
||||||
|
if (adapter->wait_for_reset) {
|
||||||
|
rc = init_crq_queue(adapter);
|
||||||
|
} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||||
|
rc = ibmvnic_reenable_crq_queue(adapter);
|
||||||
|
release_sub_crqs(adapter, 1);
|
||||||
|
} else {
|
||||||
|
rc = ibmvnic_reset_crq(adapter);
|
||||||
|
if (!rc)
|
||||||
|
rc = vio_enable_interrupts(adapter->vdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rc) {
|
||||||
|
netdev_err(adapter->netdev,
|
||||||
|
"Couldn't initialize crq. rc=%d\n", rc);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
rc = ibmvnic_init(adapter);
|
rc = ibmvnic_init(adapter);
|
||||||
if (rc)
|
if (rc)
|
||||||
return IBMVNIC_INIT_FAILED;
|
return IBMVNIC_INIT_FAILED;
|
||||||
@@ -1878,23 +1907,26 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||||||
mutex_unlock(&adapter->reset_lock);
|
mutex_unlock(&adapter->reset_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
||||||
enum ibmvnic_reset_reason reason)
|
enum ibmvnic_reset_reason reason)
|
||||||
{
|
{
|
||||||
struct ibmvnic_rwi *rwi, *tmp;
|
struct ibmvnic_rwi *rwi, *tmp;
|
||||||
struct net_device *netdev = adapter->netdev;
|
struct net_device *netdev = adapter->netdev;
|
||||||
struct list_head *entry;
|
struct list_head *entry;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (adapter->state == VNIC_REMOVING ||
|
if (adapter->state == VNIC_REMOVING ||
|
||||||
adapter->state == VNIC_REMOVED) {
|
adapter->state == VNIC_REMOVED ||
|
||||||
netdev_dbg(netdev, "Adapter removing, skipping reset\n");
|
adapter->failover_pending) {
|
||||||
return;
|
ret = EBUSY;
|
||||||
|
netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
|
||||||
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adapter->state == VNIC_PROBING) {
|
if (adapter->state == VNIC_PROBING) {
|
||||||
netdev_warn(netdev, "Adapter reset during probe\n");
|
netdev_warn(netdev, "Adapter reset during probe\n");
|
||||||
adapter->init_done_rc = EAGAIN;
|
ret = adapter->init_done_rc = EAGAIN;
|
||||||
return;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&adapter->rwi_lock);
|
mutex_lock(&adapter->rwi_lock);
|
||||||
@@ -1904,7 +1936,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|||||||
if (tmp->reset_reason == reason) {
|
if (tmp->reset_reason == reason) {
|
||||||
netdev_dbg(netdev, "Skipping matching reset\n");
|
netdev_dbg(netdev, "Skipping matching reset\n");
|
||||||
mutex_unlock(&adapter->rwi_lock);
|
mutex_unlock(&adapter->rwi_lock);
|
||||||
return;
|
ret = EBUSY;
|
||||||
|
goto err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1912,7 +1945,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|||||||
if (!rwi) {
|
if (!rwi) {
|
||||||
mutex_unlock(&adapter->rwi_lock);
|
mutex_unlock(&adapter->rwi_lock);
|
||||||
ibmvnic_close(netdev);
|
ibmvnic_close(netdev);
|
||||||
return;
|
ret = ENOMEM;
|
||||||
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
rwi->reset_reason = reason;
|
rwi->reset_reason = reason;
|
||||||
@@ -1921,6 +1955,12 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|||||||
|
|
||||||
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
|
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
|
||||||
schedule_work(&adapter->ibmvnic_reset);
|
schedule_work(&adapter->ibmvnic_reset);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
err:
|
||||||
|
if (adapter->wait_for_reset)
|
||||||
|
adapter->wait_for_reset = false;
|
||||||
|
return -ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ibmvnic_tx_timeout(struct net_device *dev)
|
static void ibmvnic_tx_timeout(struct net_device *dev)
|
||||||
@@ -2055,6 +2095,8 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
|
|||||||
|
|
||||||
static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
||||||
{
|
{
|
||||||
|
int rc, ret;
|
||||||
|
|
||||||
adapter->fallback.mtu = adapter->req_mtu;
|
adapter->fallback.mtu = adapter->req_mtu;
|
||||||
adapter->fallback.rx_queues = adapter->req_rx_queues;
|
adapter->fallback.rx_queues = adapter->req_rx_queues;
|
||||||
adapter->fallback.tx_queues = adapter->req_tx_queues;
|
adapter->fallback.tx_queues = adapter->req_tx_queues;
|
||||||
@@ -2062,11 +2104,15 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
|||||||
adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
|
adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
|
||||||
|
|
||||||
init_completion(&adapter->reset_done);
|
init_completion(&adapter->reset_done);
|
||||||
ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
|
|
||||||
adapter->wait_for_reset = true;
|
adapter->wait_for_reset = true;
|
||||||
|
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
wait_for_completion(&adapter->reset_done);
|
wait_for_completion(&adapter->reset_done);
|
||||||
|
|
||||||
|
ret = 0;
|
||||||
if (adapter->reset_done_rc) {
|
if (adapter->reset_done_rc) {
|
||||||
|
ret = -EIO;
|
||||||
adapter->desired.mtu = adapter->fallback.mtu;
|
adapter->desired.mtu = adapter->fallback.mtu;
|
||||||
adapter->desired.rx_queues = adapter->fallback.rx_queues;
|
adapter->desired.rx_queues = adapter->fallback.rx_queues;
|
||||||
adapter->desired.tx_queues = adapter->fallback.tx_queues;
|
adapter->desired.tx_queues = adapter->fallback.tx_queues;
|
||||||
@@ -2074,12 +2120,15 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
|||||||
adapter->desired.tx_entries = adapter->fallback.tx_entries;
|
adapter->desired.tx_entries = adapter->fallback.tx_entries;
|
||||||
|
|
||||||
init_completion(&adapter->reset_done);
|
init_completion(&adapter->reset_done);
|
||||||
ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
|
adapter->wait_for_reset = true;
|
||||||
|
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
|
||||||
|
if (rc)
|
||||||
|
return ret;
|
||||||
wait_for_completion(&adapter->reset_done);
|
wait_for_completion(&adapter->reset_done);
|
||||||
}
|
}
|
||||||
adapter->wait_for_reset = false;
|
adapter->wait_for_reset = false;
|
||||||
|
|
||||||
return adapter->reset_done_rc;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
|
static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
|
||||||
@@ -2364,6 +2413,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
|
|||||||
}
|
}
|
||||||
|
|
||||||
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
|
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
|
||||||
|
atomic_set(&scrq->used, 0);
|
||||||
scrq->cur = 0;
|
scrq->cur = 0;
|
||||||
|
|
||||||
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
|
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
|
||||||
@@ -2574,7 +2624,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
|
|||||||
union sub_crq *next;
|
union sub_crq *next;
|
||||||
int index;
|
int index;
|
||||||
int i, j;
|
int i, j;
|
||||||
u8 first;
|
u8 *first;
|
||||||
|
|
||||||
restart_loop:
|
restart_loop:
|
||||||
while (pending_scrq(adapter, scrq)) {
|
while (pending_scrq(adapter, scrq)) {
|
||||||
@@ -2605,11 +2655,12 @@ restart_loop:
|
|||||||
txbuff->data_dma[j] = 0;
|
txbuff->data_dma[j] = 0;
|
||||||
}
|
}
|
||||||
/* if sub_crq was sent indirectly */
|
/* if sub_crq was sent indirectly */
|
||||||
first = txbuff->indir_arr[0].generic.first;
|
first = &txbuff->indir_arr[0].generic.first;
|
||||||
if (first == IBMVNIC_CRQ_CMD) {
|
if (*first == IBMVNIC_CRQ_CMD) {
|
||||||
dma_unmap_single(dev, txbuff->indir_dma,
|
dma_unmap_single(dev, txbuff->indir_dma,
|
||||||
sizeof(txbuff->indir_arr),
|
sizeof(txbuff->indir_arr),
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
*first = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (txbuff->last_frag) {
|
if (txbuff->last_frag) {
|
||||||
@@ -3882,9 +3933,9 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
|
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_TO_DEVICE);
|
||||||
dma_unmap_single(dev, adapter->login_rsp_buf_token,
|
dma_unmap_single(dev, adapter->login_rsp_buf_token,
|
||||||
adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
|
adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
/* If the number of queues requested can't be allocated by the
|
/* If the number of queues requested can't be allocated by the
|
||||||
* server, the login response will return with code 1. We will need
|
* server, the login response will return with code 1. We will need
|
||||||
@@ -4144,7 +4195,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|||||||
case IBMVNIC_CRQ_INIT:
|
case IBMVNIC_CRQ_INIT:
|
||||||
dev_info(dev, "Partner initialized\n");
|
dev_info(dev, "Partner initialized\n");
|
||||||
adapter->from_passive_init = true;
|
adapter->from_passive_init = true;
|
||||||
|
adapter->failover_pending = false;
|
||||||
complete(&adapter->init_done);
|
complete(&adapter->init_done);
|
||||||
|
ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
|
||||||
break;
|
break;
|
||||||
case IBMVNIC_CRQ_INIT_COMPLETE:
|
case IBMVNIC_CRQ_INIT_COMPLETE:
|
||||||
dev_info(dev, "Partner initialization complete\n");
|
dev_info(dev, "Partner initialization complete\n");
|
||||||
@@ -4161,7 +4214,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|||||||
ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
|
ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
|
||||||
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
|
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
|
||||||
dev_info(dev, "Backing device failover detected\n");
|
dev_info(dev, "Backing device failover detected\n");
|
||||||
ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
|
adapter->failover_pending = true;
|
||||||
} else {
|
} else {
|
||||||
/* The adapter lost the connection */
|
/* The adapter lost the connection */
|
||||||
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
|
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
|
||||||
@@ -4461,19 +4514,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
|
|||||||
u64 old_num_rx_queues, old_num_tx_queues;
|
u64 old_num_rx_queues, old_num_tx_queues;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (adapter->resetting && !adapter->wait_for_reset) {
|
|
||||||
rc = ibmvnic_reset_crq(adapter);
|
|
||||||
if (!rc)
|
|
||||||
rc = vio_enable_interrupts(adapter->vdev);
|
|
||||||
} else {
|
|
||||||
rc = init_crq_queue(adapter);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rc) {
|
|
||||||
dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
adapter->from_passive_init = false;
|
adapter->from_passive_init = false;
|
||||||
|
|
||||||
old_num_rx_queues = adapter->req_rx_queues;
|
old_num_rx_queues = adapter->req_rx_queues;
|
||||||
@@ -4498,7 +4538,8 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adapter->resetting && !adapter->wait_for_reset) {
|
if (adapter->resetting && !adapter->wait_for_reset &&
|
||||||
|
adapter->reset_reason != VNIC_RESET_MOBILITY) {
|
||||||
if (adapter->req_rx_queues != old_num_rx_queues ||
|
if (adapter->req_rx_queues != old_num_rx_queues ||
|
||||||
adapter->req_tx_queues != old_num_tx_queues) {
|
adapter->req_tx_queues != old_num_tx_queues) {
|
||||||
release_sub_crqs(adapter, 0);
|
release_sub_crqs(adapter, 0);
|
||||||
@@ -4586,6 +4627,13 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||||||
adapter->mac_change_pending = false;
|
adapter->mac_change_pending = false;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
rc = init_crq_queue(adapter);
|
||||||
|
if (rc) {
|
||||||
|
dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
|
||||||
|
rc);
|
||||||
|
goto ibmvnic_init_fail;
|
||||||
|
}
|
||||||
|
|
||||||
rc = ibmvnic_init(adapter);
|
rc = ibmvnic_init(adapter);
|
||||||
if (rc && rc != EAGAIN)
|
if (rc && rc != EAGAIN)
|
||||||
goto ibmvnic_init_fail;
|
goto ibmvnic_init_fail;
|
||||||
|
@@ -1108,6 +1108,7 @@ struct ibmvnic_adapter {
|
|||||||
bool napi_enabled, from_passive_init;
|
bool napi_enabled, from_passive_init;
|
||||||
|
|
||||||
bool mac_change_pending;
|
bool mac_change_pending;
|
||||||
|
bool failover_pending;
|
||||||
|
|
||||||
struct ibmvnic_tunables desired;
|
struct ibmvnic_tunables desired;
|
||||||
struct ibmvnic_tunables fallback;
|
struct ibmvnic_tunables fallback;
|
||||||
|
Reference in New Issue
Block a user