summaryrefslogtreecommitdiffstats
path: root/drivers/net/tun.c
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2013-01-23 03:59:12 +0000
committerDavid S. Miller <davem@davemloft.net>2013-01-23 13:47:06 -0500
commitedfb6a148ce62e5e19354a1dcd9a34e00815c2a1 (patch)
tree839be5ca970b47e5b5fa38c9df3e672732ef6987 /drivers/net/tun.c
parent844e88f04410cc4e85615db519c1d44089333c4f (diff)
downloadlinux-edfb6a148ce62e5e19354a1dcd9a34e00815c2a1.tar.gz
linux-edfb6a148ce62e5e19354a1dcd9a34e00815c2a1.tar.bz2
linux-edfb6a148ce62e5e19354a1dcd9a34e00815c2a1.zip
tuntap: reduce memory using of queues
A MAX_TAP_QUEUES(1024) queues of tuntap device is always allocated unconditionally even userspace only requires a single queue device. This is unnecessary and will lead a very high order of page allocation when has a high possibility to fail. Solving this by creating a one queue net device when userspace only use one queue and also reduce MAX_TAP_QUEUES to DEFAULT_MAX_NUM_RSS_QUEUES which can guarantee the success of the allocation. Reported-by: Dirk Hohndel <dirk@hohndel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tun.c')
-rw-r--r--drivers/net/tun.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index c81680dc10eb..8939d2117de2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,11 +109,10 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
-/* 1024 is probably a high enough limit: modern hypervisors seem to support on
- * the order of 100-200 CPUs so this leaves us some breathing space if we want
- * to match a queue per guest CPU.
- */
-#define MAX_TAP_QUEUES 1024
+/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+ * the netdevice to be fit in one page. So we can make sure the success of
+ * memory allocation. TODO: increase the limit. */
+#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
#define TUN_FLOW_EXPIRE (3 * HZ)
@@ -1583,6 +1582,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else {
char *name;
unsigned long flags = 0;
+ int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
+ MAX_TAP_QUEUES : 1;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
@@ -1606,8 +1607,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
name = ifr->ifr_name;
dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
- tun_setup,
- MAX_TAP_QUEUES, MAX_TAP_QUEUES);
+ tun_setup, queues, queues);
+
if (!dev)
return -ENOMEM;