From 2c8c56e15df3d4c2af3d656e44feb18789f75837 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 11 Nov 2014 05:54:28 -0800 Subject: net: introduce SO_INCOMING_CPU Alternative to RPS/RFS is to use hardware support for multiple queues. Then split a set of million of sockets into worker threads, each one using epoll() to manage events on its own socket pool. Ideally, we want one thread per RX/TX queue/cpu, but we have no way to know after accept() or connect() on which queue/cpu a socket is managed. We normally use one cpu per RX queue (IRQ smp_affinity being properly set), so remembering on socket structure which cpu delivered last packet is enough to solve the problem. After accept(), connect(), or even file descriptor passing around processes, applications can use : int cpu; socklen_t len = sizeof(cpu); getsockopt(fd, SOL_SOCKET, SO_INCOMING_CPU, &cpu, &len); And use this information to put the socket into the right silo for optimal performance, as all networking stack should run on the appropriate cpu, without need to send IPI (RPS/RFS). Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- arch/ia64/include/uapi/asm/socket.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index a1b49bac7951..09a93fb566f6 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -89,4 +89,6 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + #endif /* _ASM_IA64_SOCKET_H */ -- cgit v1.2.3 From 89aa075832b0da4402acebd698d0411dcc82d03e Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 1 Dec 2014 15:06:35 -0800 Subject: net: sock: allow eBPF programs to be attached to sockets introduce new setsockopt() command: setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd, sizeof(prog_fd)) where prog_fd was received from syscall bpf(BPF_PROG_LOAD, attr, ...) and attr->prog_type == BPF_PROG_TYPE_SOCKET_FILTER setsockopt() calls bpf_prog_get() which increments refcnt of the program, so it doesn't get unloaded while socket is using the program. The same eBPF program can be attached to multiple sockets. User task exit automatically closes socket which calls sk_filter_uncharge() which decrements refcnt of eBPF program Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/ia64/include/uapi/asm/socket.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 09a93fb566f6..59be3d87f86d 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -91,4 +91,7 @@ #define SO_INCOMING_CPU 49 +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_IA64_SOCKET_H */ -- cgit v1.2.3 From 0cb6c969ed9de43687abdfc63714b6fe4385d2fc Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 10 Dec 2014 16:33:12 +0100 Subject: net, lib: kill arch_fast_hash library bits As there are now no remaining users of arch_fast_hash(), lets kill it entirely. This basically reverts commit 71ae8aac3e19 ("lib: introduce arch optimized hash library") and follow-up work, that is f.e., commit 237217546d44 ("lib: hash: follow-up fixups for arch hash"), commit e3fec2f74f7f ("lib: Add missing arch generic-y entries for asm-generic/hash.h") and last but not least commit 6a02652df511 ("perf tools: Fix include for non x86 architectures"). Cc: Francesco Fusco Cc: Thomas Graf Cc: Arnaldo Carvalho de Melo Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/ia64/include/asm/Kbuild | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 747320be9d0e..9b41b4bcc073 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += clkdev.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += kvm_para.h generic-y += mcs_spinlock.h -- cgit v1.2.3