diff options
author | David Vrabel <david.vrabel@citrix.com> | 2014-06-18 10:47:28 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-06-21 16:14:26 -0700 |
commit | ce58725fec6e609eee162e6af848bd57107b97af (patch) | |
tree | 0c2b20bc313a7e0b292904e2352b7ac72719044a /drivers/net | |
parent | 765418694bc99d91e71ede6d2889a6328da137fe (diff) | |
download | linux-ce58725fec6e609eee162e6af848bd57107b97af.tar.gz linux-ce58725fec6e609eee162e6af848bd57107b97af.tar.bz2 linux-ce58725fec6e609eee162e6af848bd57107b97af.zip |
xen-netfront: recreate queues correctly when reconnecting
When reconnecting to the backend (after a resume/migration, for example),
a different number of queues may be required (since the guest may have
moved to a different host with different capabilities). During the
reconnection the old queues are torn down and new ones created.
Introduce xennet_create_queues() and xennet_destroy_queues() that fixes
three bugs during the reconnection.
- The old info->queues was leaked.
- The old queue's napi instances were not deleted.
- The new queue's napi instances were left disabled (which meant no
packets could be received).
The xennet_destroy_queues() calls is deferred until the reconnection
instead of the disconnection (in xennet_disconnect_backend()) because
napi_disable() might sleep.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/xen-netfront.c | 104 |
1 files changed, 72 insertions, 32 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index daaf1e56e41e..2ccb4a02368b 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1699,8 +1699,6 @@ static int xennet_init_queue(struct netfront_queue *queue) goto exit_free_tx; } - netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64); - return 0; exit_free_tx: @@ -1791,6 +1789,70 @@ error: return err; } +static void xennet_destroy_queues(struct netfront_info *info) +{ + unsigned int i; + + rtnl_lock(); + + for (i = 0; i < info->netdev->real_num_tx_queues; i++) { + struct netfront_queue *queue = &info->queues[i]; + + if (netif_running(info->netdev)) + napi_disable(&queue->napi); + netif_napi_del(&queue->napi); + } + + rtnl_unlock(); + + kfree(info->queues); + info->queues = NULL; +} + +static int xennet_create_queues(struct netfront_info *info, + unsigned int num_queues) +{ + unsigned int i; + int ret; + + info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), + GFP_KERNEL); + if (!info->queues) + return -ENOMEM; + + rtnl_lock(); + + for (i = 0; i < num_queues; i++) { + struct netfront_queue *queue = &info->queues[i]; + + queue->id = i; + queue->info = info; + + ret = xennet_init_queue(queue); + if (ret < 0) { + dev_warn(&info->netdev->dev, "only created %d queues\n", + num_queues); + num_queues = i; + break; + } + + netif_napi_add(queue->info->netdev, &queue->napi, + xennet_poll, 64); + if (netif_running(info->netdev)) + napi_enable(&queue->napi); + } + + netif_set_real_num_tx_queues(info->netdev, num_queues); + + rtnl_unlock(); + + if (num_queues == 0) { + dev_err(&info->netdev->dev, "no queues\n"); + return -EINVAL; + } + return 0; +} + /* Common code used when first setting up, and when resuming. */ static int talk_to_netback(struct xenbus_device *dev, struct netfront_info *info) @@ -1827,42 +1889,20 @@ static int talk_to_netback(struct xenbus_device *dev, goto out; } - /* Allocate array of queues */ - info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL); - if (!info->queues) { - err = -ENOMEM; - goto out; - } - rtnl_lock(); - netif_set_real_num_tx_queues(info->netdev, num_queues); - rtnl_unlock(); + if (info->queues) + xennet_destroy_queues(info); + + err = xennet_create_queues(info, num_queues); + if (err < 0) + goto destroy_ring; /* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; - queue->id = i; - queue->info = info; - err = xennet_init_queue(queue); - if (err) { - /* xennet_init_queue() cleans up after itself on failure, - * but we still have to clean up any previously initialised - * queues. If i > 0, set num_queues to i, then goto - * destroy_ring, which calls xennet_disconnect_backend() - * to tidy up. - */ - if (i > 0) { - rtnl_lock(); - netif_set_real_num_tx_queues(info->netdev, i); - rtnl_unlock(); - goto destroy_ring; - } else { - goto out; - } - } err = setup_netfront(dev, queue, feature_split_evtchn); if (err) { - /* As for xennet_init_queue(), setup_netfront() will tidy - * up the current queue on error, but we need to clean up + /* setup_netfront() will tidy up the current + * queue on error, but we need to clean up * those already allocated. */ if (i > 0) { |