summaryrefslogtreecommitdiffstats
path: root/include/linux/bpf-cgroup.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bpf-cgroup.h')
-rw-r--r--include/linux/bpf-cgroup.h18
1 files changed, 18 insertions, 0 deletions
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index c970a25d2a49..360c082e885c 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -7,6 +7,7 @@
struct sock;
struct cgroup;
struct sk_buff;
+struct bpf_sock_ops_kern;
#ifdef CONFIG_CGROUP_BPF
@@ -42,6 +43,10 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
int __cgroup_bpf_run_filter_sk(struct sock *sk,
enum bpf_attach_type type);
+int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
+ struct bpf_sock_ops_kern *sock_ops,
+ enum bpf_attach_type type);
+
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
@@ -75,6 +80,18 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
__ret; \
})
+#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled && (sock_ops)->sk) { \
+ typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
+ if (sk_fullsock(__sk)) \
+ __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
+ sock_ops, \
+ BPF_CGROUP_SOCK_OPS); \
+ } \
+ __ret; \
+})
#else
struct cgroup_bpf {};
@@ -85,6 +102,7 @@ static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#endif /* CONFIG_CGROUP_BPF */