diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1fa84790041b..218a446c4c27 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -312,6 +312,9 @@ struct receive_queue { /* Is dynamic interrupt moderation enabled? */ bool dim_enabled; + /* Used to protect dim_enabled and inter_coal */ + struct mutex dim_lock; + /* Dynamic Interrupt Moderation */ struct dim dim; @@ -368,16 +371,6 @@ struct virtio_net_ctrl_rss { struct control_buf { struct virtio_net_ctrl_hdr hdr; virtio_net_ctrl_ack status; - struct virtio_net_ctrl_mq mq; - u8 promisc; - u8 allmulti; - __virtio16 vid; - __virtio64 offloads; - struct virtio_net_ctrl_rss rss; - struct virtio_net_ctrl_coal_tx coal_tx; - struct virtio_net_ctrl_coal_rx coal_rx; - struct virtio_net_ctrl_coal_vq coal_vq; - struct virtio_net_stats_capabilities stats_cap; }; struct virtnet_info { @@ -416,10 +409,14 @@ struct virtnet_info { u16 rss_indir_table_size; u32 rss_hash_types_supported; u32 rss_hash_types_saved; + struct virtio_net_ctrl_rss rss; /* Has control virtqueue */ bool has_cvq; + /* Lock to protect the control VQ */ + struct mutex cvq_lock; + /* Host can handle any s/g split between our header and packet data */ bool any_header_sg; @@ -2371,6 +2368,10 @@ static int virtnet_poll(struct napi_struct *napi, int budget) /* Out of packets? */ if (received < budget) { napi_complete = virtqueue_napi_complete(napi, rq->vq, received); + /* Intentionally not taking dim_lock here. This may result in a + * spurious net_dim call. But if that happens virtnet_rx_dim_work + * will not act on the scheduled work. + */ if (napi_complete && rq->dim_enabled) virtnet_rx_dim_update(vi, rq); } @@ -2684,6 +2685,7 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); + mutex_lock(&vi->cvq_lock); vi->ctrl->status = ~0; vi->ctrl->hdr.class = class; vi->ctrl->hdr.cmd = cmd; @@ -2706,11 +2708,12 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd if (ret < 0) { dev_warn(&vi->vdev->dev, "Failed to add sgs for command vq: %d\n.", ret); + mutex_unlock(&vi->cvq_lock); return false; } if (unlikely(!virtqueue_kick(vi->cvq))) - return vi->ctrl->status == VIRTIO_NET_OK; + goto unlock; /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. @@ -2721,6 +2724,8 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd cpu_relax(); } +unlock: + mutex_unlock(&vi->cvq_lock); return vi->ctrl->status == VIRTIO_NET_OK; } @@ -2819,23 +2824,26 @@ static void virtnet_stats(struct net_device *dev, static void virtnet_ack_link_announce(struct virtnet_info *vi) { - rtnl_lock(); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); - rtnl_unlock(); } -static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) +static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { + struct virtio_net_ctrl_mq *mq __free(kfree) = NULL; struct scatterlist sg; struct net_device *dev = vi->dev; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; - vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); - sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); + mq = kzalloc(sizeof(*mq), GFP_KERNEL); + if (!mq) + return -ENOMEM; + + mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); + sg_init_one(&sg, mq, sizeof(*mq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { @@ -2852,18 +2860,9 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) return 0; } -static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) -{ - int err; - - rtnl_lock(); - err = _virtnet_set_queues(vi, queue_pairs); - rtnl_unlock(); - return err; -} - static int virtnet_close(struct net_device *dev) { + u8 *promisc_allmulti __free(kfree) = NULL; struct virtnet_info *vi = netdev_priv(dev); int i; @@ -2888,6 +2887,7 @@ static void virtnet_rx_mode_work(struct work_struct *work) struct scatterlist sg[2]; struct virtio_net_ctrl_mac *mac_data; struct netdev_hw_addr *ha; + u8 *promisc_allmulti; int uc_count; int mc_count; void *buf; @@ -2899,22 +2899,27 @@ static void virtnet_rx_mode_work(struct work_struct *work) rtnl_lock(); - vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); - vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); + promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC); + if (!promisc_allmulti) { + dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n"); + return; + } - sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); + *promisc_allmulti = !!(dev->flags & IFF_PROMISC); + sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", - vi->ctrl->promisc ? "en" : "dis"); + *promisc_allmulti ? "en" : "dis"); - sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); + *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI); + sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", - vi->ctrl->allmulti ? "en" : "dis"); + *promisc_allmulti ? "en" : "dis"); netif_addr_lock_bh(dev); @@ -2975,10 +2980,15 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); + __virtio16 *_vid __free(kfree) = NULL; struct scatterlist sg; - vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); - sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); + _vid = kzalloc(sizeof(*_vid), GFP_KERNEL); + if (!_vid) + return -ENOMEM; + + *_vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, _vid, sizeof(*_vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg)) @@ -2990,10 +3000,15 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); + __virtio16 *_vid __free(kfree) = NULL; struct scatterlist sg; - vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); - sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); + _vid = kzalloc(sizeof(*_vid), GFP_KERNEL); + if (!_vid) + return -ENOMEM; + + *_vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, _vid, sizeof(*_vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg)) @@ -3106,12 +3121,17 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi) static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, u16 vqn, u32 max_usecs, u32 max_packets) { + struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL; struct scatterlist sgs; - vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); - vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); - vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); - sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); + coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL); + if (!coal_vq) + return -ENOMEM; + + coal_vq->vqn = cpu_to_le16(vqn); + coal_vq->coal.max_usecs = cpu_to_le32(max_usecs); + coal_vq->coal.max_packets = cpu_to_le32(max_packets); + sg_init_one(&sgs, coal_vq, sizeof(*coal_vq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, @@ -3222,9 +3242,11 @@ static int virtnet_set_ringparam(struct net_device *dev, return err; /* The reason is same as the transmit virtqueue reset */ + mutex_lock(&vi->rq[i].dim_lock); err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, vi->intr_coal_rx.max_usecs, vi->intr_coal_rx.max_packets); + mutex_unlock(&vi->rq[i].dim_lock); if (err) return err; } @@ -3243,25 +3265,29 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi) sg_init_table(sgs, 4); sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table); - sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); + sg_set_buf(&sgs[0], &vi->rss, sg_buf_size); - sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); - sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); + sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1); + sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size); sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); - sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); + sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size); sg_buf_size = vi->rss_key_size; - sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); + sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG - : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { - dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); - return false; - } + : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) + goto err; + return true; + +err: + dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); + return false; + } static void virtnet_init_default_rss(struct virtnet_info *vi) @@ -3269,21 +3295,21 @@ static void virtnet_init_default_rss(struct virtnet_info *vi) u32 indir_val = 0; int i = 0; - vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; + vi->rss.hash_types = vi->rss_hash_types_supported; vi->rss_hash_types_saved = vi->rss_hash_types_supported; - vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size + vi->rss.indirection_table_mask = vi->rss_indir_table_size ? vi->rss_indir_table_size - 1 : 0; - vi->ctrl->rss.unclassified_queue = 0; + vi->rss.unclassified_queue = 0; for (; i < vi->rss_indir_table_size; ++i) { indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); - vi->ctrl->rss.indirection_table[i] = indir_val; + vi->rss.indirection_table[i] = indir_val; } - vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; - vi->ctrl->rss.hash_key_length = vi->rss_key_size; + vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; + vi->rss.hash_key_length = vi->rss_key_size; - netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); + netdev_rss_key_fill(vi->rss.key, vi->rss_key_size); } static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) @@ -3394,7 +3420,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc * if (new_hashtypes != vi->rss_hash_types_saved) { vi->rss_hash_types_saved = new_hashtypes; - vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; + vi->rss.hash_types = vi->rss_hash_types_saved; if (vi->dev->features & NETIF_F_RXHASH) return virtnet_commit_rss_command(vi); } @@ -3439,7 +3465,7 @@ static int virtnet_set_channels(struct net_device *dev, return -EINVAL; cpus_read_lock(); - err = _virtnet_set_queues(vi, queue_pairs); + err = virtnet_set_queues(vi, queue_pairs); if (err) { cpus_read_unlock(); goto err; @@ -4193,12 +4219,17 @@ static int virtnet_get_link_ksettings(struct net_device *dev, static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { + struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL; struct scatterlist sgs_tx; int i; - vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); - vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); - sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); + coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL); + if (!coal_tx) + return -ENOMEM; + + coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); + coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); + sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, @@ -4218,8 +4249,10 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { + struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL; bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; struct scatterlist sgs_rx; + int ret = 0; int i; if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) @@ -4229,11 +4262,21 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) return -EINVAL; + /* Acquire all queues dim_locks */ + for (i = 0; i < vi->max_queue_pairs; i++) + mutex_lock(&vi->rq[i].dim_lock); + if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { vi->rx_dim_enabled = true; for (i = 0; i < vi->max_queue_pairs; i++) vi->rq[i].dim_enabled = true; - return 0; + goto unlock; + } + + coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL); + if (!coal_rx) { + ret = -ENOMEM; + goto unlock; } if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { @@ -4246,14 +4289,16 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, * we need apply the global new params even if they * are not updated. */ - vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); - vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); - sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); + coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); + coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); + sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, - &sgs_rx)) - return -EINVAL; + &sgs_rx)) { + ret = -EINVAL; + goto unlock; + } vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; @@ -4261,8 +4306,11 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; } +unlock: + for (i = vi->max_queue_pairs - 1; i >= 0; i--) + mutex_unlock(&vi->rq[i].dim_lock); - return 0; + return ret; } static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, @@ -4286,19 +4334,24 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, u16 queue) { bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; - bool cur_rx_dim = vi->rq[queue].dim_enabled; u32 max_usecs, max_packets; + bool cur_rx_dim; int err; + mutex_lock(&vi->rq[queue].dim_lock); + cur_rx_dim = vi->rq[queue].dim_enabled; max_usecs = vi->rq[queue].intr_coal.max_usecs; max_packets = vi->rq[queue].intr_coal.max_packets; if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || - ec->rx_max_coalesced_frames != max_packets)) + ec->rx_max_coalesced_frames != max_packets)) { + mutex_unlock(&vi->rq[queue].dim_lock); return -EINVAL; + } if (rx_ctrl_dim_on && !cur_rx_dim) { vi->rq[queue].dim_enabled = true; + mutex_unlock(&vi->rq[queue].dim_lock); return 0; } @@ -4311,10 +4364,8 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, ec->rx_coalesce_usecs, ec->rx_max_coalesced_frames); - if (err) - return err; - - return 0; + mutex_unlock(&vi->rq[queue].dim_lock); + return err; } static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, @@ -4344,39 +4395,27 @@ static void virtnet_rx_dim_work(struct work_struct *work) struct virtnet_info *vi = rq->vq->vdev->priv; struct net_device *dev = vi->dev; struct dim_cq_moder update_moder; - int i, qnum, err; + int qnum, err; - if (!rtnl_trylock()) - return; + qnum = rq - vi->rq; - /* Each rxq's work is queued by "net_dim()->schedule_work()" - * in response to NAPI traffic changes. Note that dim->profile_ix - * for each rxq is updated prior to the queuing action. - * So we only need to traverse and update profiles for all rxqs - * in the work which is holding rtnl_lock. - */ - for (i = 0; i < vi->curr_queue_pairs; i++) { - rq = &vi->rq[i]; - dim = &rq->dim; - qnum = rq - vi->rq; + mutex_lock(&rq->dim_lock); + if (!rq->dim_enabled) + goto out; - if (!rq->dim_enabled) - continue; - - update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); - if (update_moder.usec != rq->intr_coal.max_usecs || - update_moder.pkts != rq->intr_coal.max_packets) { - err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, - update_moder.usec, - update_moder.pkts); - if (err) - pr_debug("%s: Failed to send dim parameters on rxq%d\n", - dev->name, qnum); - dim->state = DIM_START_MEASURE; - } + update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + if (update_moder.usec != rq->intr_coal.max_usecs || + update_moder.pkts != rq->intr_coal.max_packets) { + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, + update_moder.usec, + update_moder.pkts); + if (err) + pr_debug("%s: Failed to send dim parameters on rxq%d\n", + dev->name, qnum); + dim->state = DIM_START_MEASURE; } - - rtnl_unlock(); +out: + mutex_unlock(&rq->dim_lock); } static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) @@ -4514,11 +4553,13 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev, return -EINVAL; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { + mutex_lock(&vi->rq[queue].dim_lock); ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; + mutex_unlock(&vi->rq[queue].dim_lock); } else { ec->rx_max_coalesced_frames = 1; @@ -4574,11 +4615,11 @@ static int virtnet_get_rxfh(struct net_device *dev, if (rxfh->indir) { for (i = 0; i < vi->rss_indir_table_size; ++i) - rxfh->indir[i] = vi->ctrl->rss.indirection_table[i]; + rxfh->indir[i] = vi->rss.indirection_table[i]; } if (rxfh->key) - memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size); + memcpy(rxfh->key, vi->rss.key, vi->rss_key_size); rxfh->hfunc = ETH_RSS_HASH_TOP; @@ -4602,7 +4643,7 @@ static int virtnet_set_rxfh(struct net_device *dev, return -EOPNOTSUPP; for (i = 0; i < vi->rss_indir_table_size; ++i) - vi->ctrl->rss.indirection_table[i] = rxfh->indir[i]; + vi->rss.indirection_table[i] = rxfh->indir[i]; update = true; } @@ -4614,7 +4655,7 @@ static int virtnet_set_rxfh(struct net_device *dev, if (!vi->has_rss && !vi->has_rss_hash_report) return -EOPNOTSUPP; - memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size); + memcpy(vi->rss.key, rxfh->key, vi->rss_key_size); update = true; } @@ -4823,10 +4864,16 @@ static int virtnet_restore_up(struct virtio_device *vdev) static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) { + __virtio64 *_offloads __free(kfree) = NULL; struct scatterlist sg; - vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); - sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); + _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL); + if (!_offloads) + return -ENOMEM; + + *_offloads = cpu_to_virtio64(vi->vdev, offloads); + + sg_init_one(&sg, _offloads, sizeof(*_offloads)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { @@ -4926,7 +4973,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, synchronize_net(); } - err = _virtnet_set_queues(vi, curr_qp + xdp_qp); + err = virtnet_set_queues(vi, curr_qp + xdp_qp); if (err) goto err; netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); @@ -5028,9 +5075,9 @@ static int virtnet_set_features(struct net_device *dev, if ((dev->features ^ features) & NETIF_F_RXHASH) { if (features & NETIF_F_RXHASH) - vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; + vi->rss.hash_types = vi->rss_hash_types_saved; else - vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; + vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; if (!virtnet_commit_rss_command(vi)) return -EINVAL; @@ -5342,6 +5389,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) u64_stats_init(&vi->rq[i].stats.syncp); u64_stats_init(&vi->sq[i].stats.syncp); + mutex_init(&vi->rq[i].dim_lock); } return 0; @@ -5698,6 +5746,8 @@ static int virtnet_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; + mutex_init(&vi->cvq_lock); + if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { mtu = virtio_cread16(vdev, offsetof(struct virtio_net_config, @@ -5789,7 +5839,7 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); - _virtnet_set_queues(vi, vi->curr_queue_pairs); + virtnet_set_queues(vi, vi->curr_queue_pairs); /* a random MAC address has been assigned, notify the device. * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there @@ -5810,10 +5860,18 @@ static int virtnet_probe(struct virtio_device *vdev) } if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) { + struct virtio_net_stats_capabilities *stats_cap __free(kfree) = NULL; struct scatterlist sg; __le64 v; - sg_init_one(&sg, &vi->ctrl->stats_cap, sizeof(vi->ctrl->stats_cap)); + stats_cap = kzalloc(sizeof(*stats_cap), GFP_KERNEL); + if (!stats_cap) { + rtnl_unlock(); + err = -ENOMEM; + goto free_unregister_netdev; + } + + sg_init_one(&sg, stats_cap, sizeof(*stats_cap)); if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS, VIRTIO_NET_CTRL_STATS_QUERY, @@ -5824,7 +5882,7 @@ static int virtnet_probe(struct virtio_device *vdev) goto free_unregister_netdev; } - v = vi->ctrl->stats_cap.supported_stats_types[0]; + v = stats_cap->supported_stats_types[0]; vi->device_stats_cap = le64_to_cpu(v); }