diff options
author | Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com> | 2019-12-09 10:31:43 -0800 |
---|---|---|
committer | Kees Cook <keescook@chromium.org> | 2019-12-09 10:36:44 -0800 |
commit | c593642c8be046915ca3a4a300243a68077cd207 (patch) | |
tree | cd025832d9c77907d9feee365f7ba4afe2ff56ad /net/core | |
parent | e43723292d6d7f217b572aab07f1f165189a1b56 (diff) | |
download | linux-c593642c8be046915ca3a4a300243a68077cd207.tar.gz linux-c593642c8be046915ca3a4a300243a68077cd207.tar.bz2 linux-c593642c8be046915ca3a4a300243a68077cd207.zip |
treewide: Use sizeof_field() macro
Replace all the occurrences of FIELD_SIZEOF() with sizeof_field() except
at places where these are defined. Later patches will remove the unused
definition of FIELD_SIZEOF().
This patch is generated using following script:
EXCLUDE_FILES="include/linux/stddef.h|include/linux/kernel.h"
git grep -l -e "\bFIELD_SIZEOF\b" | while read file;
do
if [[ "$file" =~ $EXCLUDE_FILES ]]; then
continue
fi
sed -i -e 's/\bFIELD_SIZEOF\b/sizeof_field/g' $file;
done
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Link: https://lore.kernel.org/r/20190924105839.110713-3-pankaj.laxminarayan.bharadiya@intel.com
Co-developed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: David Miller <davem@davemloft.net> # for net
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 2 | ||||
-rw-r--r-- | net/core/filter.c | 140 | ||||
-rw-r--r-- | net/core/flow_dissector.c | 10 | ||||
-rw-r--r-- | net/core/xdp.c | 4 |
4 files changed, 78 insertions, 78 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 2c277b8aba38..0ad39c87b7fd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10165,7 +10165,7 @@ static struct hlist_head * __net_init netdev_create_hash(void) static int __net_init netdev_init(struct net *net) { BUILD_BUG_ON(GRO_HASH_BUCKETS > - 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask)); + 8 * sizeof_field(struct napi_struct, gro_bitmask)); if (net != &init_net) INIT_LIST_HEAD(&net->dev_base_head); diff --git a/net/core/filter.c b/net/core/filter.c index f1e703eed3d2..c19dd0973e0c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -274,7 +274,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, switch (skb_field) { case SKF_AD_MARK: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); + BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, offsetof(struct sk_buff, mark)); @@ -289,14 +289,14 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, break; case SKF_AD_QUEUE: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); + BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2); *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, offsetof(struct sk_buff, queue_mapping)); break; case SKF_AD_VLAN_TAG: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); + BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2); /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, @@ -322,7 +322,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp, switch (fp->k) { case SKF_AD_OFF + SKF_AD_PROTOCOL: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); + BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2); /* A = *(u16 *) (CTX + offsetof(protocol)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, @@ -338,8 +338,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_IFINDEX: case SKF_AD_OFF + SKF_AD_HATYPE: - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); + BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); + BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), BPF_REG_TMP, BPF_REG_CTX, @@ -361,7 +361,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp, break; case SKF_AD_OFF + SKF_AD_RXHASH: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); + BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, hash)); @@ -385,7 +385,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp, break; case SKF_AD_OFF + SKF_AD_VLAN_TPID: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); + BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2); /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, @@ -5589,8 +5589,8 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, #define BPF_TCP_SOCK_GET_COMMON(FIELD) \ do { \ - BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) > \ - FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \ + BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) > \ + sizeof_field(struct bpf_tcp_sock, FIELD)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\ si->dst_reg, si->src_reg, \ offsetof(struct tcp_sock, FIELD)); \ @@ -5598,9 +5598,9 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, #define BPF_INET_SOCK_GET_COMMON(FIELD) \ do { \ - BUILD_BUG_ON(FIELD_SIZEOF(struct inet_connection_sock, \ + BUILD_BUG_ON(sizeof_field(struct inet_connection_sock, \ FIELD) > \ - FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \ + sizeof_field(struct bpf_tcp_sock, FIELD)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct inet_connection_sock, \ FIELD), \ @@ -5615,7 +5615,7 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, switch (si->off) { case offsetof(struct bpf_tcp_sock, rtt_min): - BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) != + BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != sizeof(struct minmax)); BUILD_BUG_ON(sizeof(struct minmax) < sizeof(struct minmax_sample)); @@ -5780,8 +5780,8 @@ u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, #define BPF_XDP_SOCK_GET(FIELD) \ do { \ - BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) > \ - FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \ + BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) > \ + sizeof_field(struct bpf_xdp_sock, FIELD)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\ si->dst_reg, si->src_reg, \ offsetof(struct xdp_sock, FIELD)); \ @@ -7344,7 +7344,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, cb[0]) ... offsetofend(struct __sk_buff, cb[4]) - 1: - BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); + BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20); BUILD_BUG_ON((offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb, data)) % sizeof(__u64)); @@ -7363,7 +7363,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct __sk_buff, tc_classid): - BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2); + BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2); off = si->off; off -= offsetof(struct __sk_buff, tc_classid); @@ -7434,7 +7434,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, #endif break; case offsetof(struct __sk_buff, family): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, @@ -7445,7 +7445,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, 2, target_size)); break; case offsetof(struct __sk_buff, remote_ip4): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, @@ -7456,7 +7456,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, 4, target_size)); break; case offsetof(struct __sk_buff, local_ip4): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_rcv_saddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), @@ -7470,7 +7470,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, remote_ip6[0]) ... offsetof(struct __sk_buff, remote_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_v6_daddr.s6_addr32[0]) != 4); off = si->off; @@ -7490,7 +7490,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, local_ip6[0]) ... offsetof(struct __sk_buff, local_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) != 4); off = si->off; @@ -7509,7 +7509,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct __sk_buff, remote_port): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, @@ -7524,7 +7524,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct __sk_buff, local_port): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, @@ -7535,7 +7535,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct __sk_buff, tstamp): - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8); + BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_DW, @@ -7573,7 +7573,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, target_size)); break; case offsetof(struct __sk_buff, wire_len): - BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4); + BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4); off = si->off; off -= offsetof(struct __sk_buff, wire_len); @@ -7603,7 +7603,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, switch (si->off) { case offsetof(struct bpf_sock, bound_dev_if): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4); + BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, @@ -7614,7 +7614,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct bpf_sock, mark): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4); + BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, @@ -7625,7 +7625,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct bpf_sock, priority): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4); + BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, @@ -7641,7 +7641,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_family, - FIELD_SIZEOF(struct sock_common, + sizeof_field(struct sock_common, skc_family), target_size)); break; @@ -7668,7 +7668,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_rcv_saddr, - FIELD_SIZEOF(struct sock_common, + sizeof_field(struct sock_common, skc_rcv_saddr), target_size)); break; @@ -7677,7 +7677,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_daddr, - FIELD_SIZEOF(struct sock_common, + sizeof_field(struct sock_common, skc_daddr), target_size)); break; @@ -7691,7 +7691,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, bpf_target_off( struct sock_common, skc_v6_rcv_saddr.s6_addr32[0], - FIELD_SIZEOF(struct sock_common, + sizeof_field(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]), target_size) + off); #else @@ -7708,7 +7708,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_v6_daddr.s6_addr32[0], - FIELD_SIZEOF(struct sock_common, + sizeof_field(struct sock_common, skc_v6_daddr.s6_addr32[0]), target_size) + off); #else @@ -7722,7 +7722,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, BPF_FIELD_SIZEOF(struct sock_common, skc_num), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_num, - FIELD_SIZEOF(struct sock_common, + sizeof_field(struct sock_common, skc_num), target_size)); break; @@ -7732,7 +7732,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, BPF_FIELD_SIZEOF(struct sock_common, skc_dport), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_dport, - FIELD_SIZEOF(struct sock_common, + sizeof_field(struct sock_common, skc_dport), target_size)); break; @@ -7742,7 +7742,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, BPF_FIELD_SIZEOF(struct sock_common, skc_state), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_state, - FIELD_SIZEOF(struct sock_common, + sizeof_field(struct sock_common, skc_state), target_size)); break; @@ -7837,7 +7837,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, si->src_reg, offsetof(S, F)); \ *insn++ = BPF_LDX_MEM( \ SIZE, si->dst_reg, si->dst_reg, \ - bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ + bpf_target_off(NS, NF, sizeof_field(NS, NF), \ target_size) \ + OFF); \ } while (0) @@ -7868,7 +7868,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ si->dst_reg, offsetof(S, F)); \ *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \ - bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ + bpf_target_off(NS, NF, sizeof_field(NS, NF), \ target_size) \ + OFF); \ *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \ @@ -7930,8 +7930,8 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type, */ BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) != offsetof(struct sockaddr_in6, sin6_port)); - BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) != - FIELD_SIZEOF(struct sockaddr_in6, sin6_port)); + BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) != + sizeof_field(struct sockaddr_in6, sin6_port)); SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, sin6_port, tmp_reg); @@ -7997,8 +7997,8 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, /* Helper macro for adding read access to tcp_sock or sock fields. */ #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ do { \ - BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \ - FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \ + BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ + sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, \ is_fullsock), \ @@ -8031,8 +8031,8 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ do { \ int reg = BPF_REG_9; \ - BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \ - FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \ + BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ + sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ if (si->dst_reg == reg || si->src_reg == reg) \ reg--; \ if (si->dst_reg == reg || si->src_reg == reg) \ @@ -8073,12 +8073,12 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, switch (si->off) { case offsetof(struct bpf_sock_ops, op) ... offsetof(struct bpf_sock_ops, replylong[3]): - BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) != - FIELD_SIZEOF(struct bpf_sock_ops_kern, op)); - BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) != - FIELD_SIZEOF(struct bpf_sock_ops_kern, reply)); - BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) != - FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong)); + BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, op) != + sizeof_field(struct bpf_sock_ops_kern, op)); + BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) != + sizeof_field(struct bpf_sock_ops_kern, reply)); + BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) != + sizeof_field(struct bpf_sock_ops_kern, replylong)); off = si->off; off -= offsetof(struct bpf_sock_ops, op); off += offsetof(struct bpf_sock_ops_kern, op); @@ -8091,7 +8091,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct bpf_sock_ops, family): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), @@ -8102,7 +8102,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct bpf_sock_ops, remote_ip4): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), @@ -8113,7 +8113,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct bpf_sock_ops, local_ip4): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_rcv_saddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( @@ -8128,7 +8128,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, case offsetof(struct bpf_sock_ops, remote_ip6[0]) ... offsetof(struct bpf_sock_ops, remote_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_v6_daddr.s6_addr32[0]) != 4); off = si->off; @@ -8149,7 +8149,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, case offsetof(struct bpf_sock_ops, local_ip6[0]) ... offsetof(struct bpf_sock_ops, local_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) != 4); off = si->off; @@ -8168,7 +8168,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct bpf_sock_ops, remote_port): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), @@ -8182,7 +8182,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct bpf_sock_ops, local_port): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), @@ -8202,7 +8202,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct bpf_sock_ops, state): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), @@ -8213,7 +8213,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct bpf_sock_ops, rtt_min): - BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) != + BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != sizeof(struct minmax)); BUILD_BUG_ON(sizeof(struct minmax) < sizeof(struct minmax_sample)); @@ -8224,7 +8224,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct tcp_sock, rtt_min) + - FIELD_SIZEOF(struct minmax_sample, t)); + sizeof_field(struct minmax_sample, t)); break; case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags): @@ -8366,7 +8366,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, offsetof(struct sk_msg, data_end)); break; case offsetof(struct sk_msg_md, family): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), @@ -8377,7 +8377,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct sk_msg_md, remote_ip4): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), @@ -8388,7 +8388,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct sk_msg_md, local_ip4): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_rcv_saddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( @@ -8403,7 +8403,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, case offsetof(struct sk_msg_md, remote_ip6[0]) ... offsetof(struct sk_msg_md, remote_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_v6_daddr.s6_addr32[0]) != 4); off = si->off; @@ -8424,7 +8424,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, case offsetof(struct sk_msg_md, local_ip6[0]) ... offsetof(struct sk_msg_md, local_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) != 4); off = si->off; @@ -8443,7 +8443,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct sk_msg_md, remote_port): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), @@ -8457,7 +8457,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct sk_msg_md, local_port): - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), @@ -8847,7 +8847,7 @@ sk_reuseport_is_valid_access(int off, int size, /* Fields that allow narrowing */ case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): - if (size < FIELD_SIZEOF(struct sk_buff, protocol)) + if (size < sizeof_field(struct sk_buff, protocol)) return false; /* fall through */ case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): @@ -8865,7 +8865,7 @@ sk_reuseport_is_valid_access(int off, int size, *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \ si->dst_reg, si->src_reg, \ bpf_target_off(struct sk_reuseport_kern, F, \ - FIELD_SIZEOF(struct sk_reuseport_kern, F), \ + sizeof_field(struct sk_reuseport_kern, F), \ target_size)); \ }) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index d524a693e00f..2dbbb030fbed 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -599,8 +599,8 @@ __skb_flow_dissect_gre(const struct sk_buff *skb, offset += sizeof(struct gre_base_hdr); if (hdr->flags & GRE_CSUM) - offset += FIELD_SIZEOF(struct gre_full_hdr, csum) + - FIELD_SIZEOF(struct gre_full_hdr, reserved1); + offset += sizeof_field(struct gre_full_hdr, csum) + + sizeof_field(struct gre_full_hdr, reserved1); if (hdr->flags & GRE_KEY) { const __be32 *keyid; @@ -622,11 +622,11 @@ __skb_flow_dissect_gre(const struct sk_buff *skb, else key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK; } - offset += FIELD_SIZEOF(struct gre_full_hdr, key); + offset += sizeof_field(struct gre_full_hdr, key); } if (hdr->flags & GRE_SEQ) - offset += FIELD_SIZEOF(struct pptp_gre_header, seq); + offset += sizeof_field(struct pptp_gre_header, seq); if (gre_ver == 0) { if (*p_proto == htons(ETH_P_TEB)) { @@ -653,7 +653,7 @@ __skb_flow_dissect_gre(const struct sk_buff *skb, u8 *ppp_hdr; if (hdr->flags & GRE_ACK) - offset += FIELD_SIZEOF(struct pptp_gre_header, ack); + offset += sizeof_field(struct pptp_gre_header, ack); ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset, sizeof(_ppp_hdr), diff --git a/net/core/xdp.c b/net/core/xdp.c index 7c8390ad4dc6..8310714c47fd 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -36,7 +36,7 @@ static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) const u32 *k = data; const u32 key = *k; - BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id) + BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id) != sizeof(u32)); /* Use cyclic increasing ID as direct hash key */ @@ -56,7 +56,7 @@ static const struct rhashtable_params mem_id_rht_params = { .nelem_hint = 64, .head_offset = offsetof(struct xdp_mem_allocator, node), .key_offset = offsetof(struct xdp_mem_allocator, mem.id), - .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id), + .key_len = sizeof_field(struct xdp_mem_allocator, mem.id), .max_size = MEM_ID_MAX, .min_size = 8, .automatic_shrinking = true, |