diff options
Diffstat (limited to 'drivers/net/vxlan/vxlan_mdb.c')
-rw-r--r-- | drivers/net/vxlan/vxlan_mdb.c | 190 |
1 files changed, 174 insertions, 16 deletions
diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c index 5e041622261a..eb4c580b5cee 100644 --- a/drivers/net/vxlan/vxlan_mdb.c +++ b/drivers/net/vxlan/vxlan_mdb.c @@ -311,7 +311,7 @@ vxlan_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = { [MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(vxlan_mdbe_src_list_entry_pol), }; -static struct netlink_range_validation vni_range = { +static const struct netlink_range_validation vni_range = { .max = VXLAN_N_VID - 1, }; @@ -370,12 +370,10 @@ static bool vxlan_mdb_is_valid_source(const struct nlattr *attr, __be16 proto, return true; } -static void vxlan_mdb_config_group_set(struct vxlan_mdb_config *cfg, - const struct br_mdb_entry *entry, - const struct nlattr *source_attr) +static void vxlan_mdb_group_set(struct vxlan_mdb_entry_key *group, + const struct br_mdb_entry *entry, + const struct nlattr *source_attr) { - struct vxlan_mdb_entry_key *group = &cfg->group; - switch (entry->addr.proto) { case htons(ETH_P_IP): group->dst.sa.sa_family = AF_INET; @@ -503,7 +501,7 @@ static int vxlan_mdb_config_attrs_init(struct vxlan_mdb_config *cfg, entry->addr.proto, extack)) return -EINVAL; - vxlan_mdb_config_group_set(cfg, entry, mdbe_attrs[MDBE_ATTR_SOURCE]); + vxlan_mdb_group_set(&cfg->group, entry, mdbe_attrs[MDBE_ATTR_SOURCE]); /* rtnetlink code only validates that IPv4 group address is * multicast. @@ -927,23 +925,20 @@ vxlan_mdb_nlmsg_src_list_size(const struct vxlan_mdb_entry_key *group, return nlmsg_size; } -static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan, - const struct vxlan_mdb_entry *mdb_entry, - const struct vxlan_mdb_remote *remote) +static size_t +vxlan_mdb_nlmsg_remote_size(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote) { const struct vxlan_mdb_entry_key *group = &mdb_entry->key; struct vxlan_rdst *rd = rtnl_dereference(remote->rd); size_t nlmsg_size; - nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) + - /* MDBA_MDB */ - nla_total_size(0) + - /* MDBA_MDB_ENTRY */ - nla_total_size(0) + /* MDBA_MDB_ENTRY_INFO */ - nla_total_size(sizeof(struct br_mdb_entry)) + + nlmsg_size = nla_total_size(sizeof(struct br_mdb_entry)) + /* MDBA_MDB_EATTR_TIMER */ nla_total_size(sizeof(u32)); + /* MDBA_MDB_EATTR_SOURCE */ if (vxlan_mdb_is_sg(group)) nlmsg_size += nla_total_size(vxlan_addr_size(&group->dst)); @@ -971,6 +966,19 @@ static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan, return nlmsg_size; } +static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote) +{ + return NLMSG_ALIGN(sizeof(struct br_port_msg)) + + /* MDBA_MDB */ + nla_total_size(0) + + /* MDBA_MDB_ENTRY */ + nla_total_size(0) + + /* Remote entry */ + vxlan_mdb_nlmsg_remote_size(vxlan, mdb_entry, remote); +} + static int vxlan_mdb_nlmsg_fill(const struct vxlan_dev *vxlan, struct sk_buff *skb, const struct vxlan_mdb_entry *mdb_entry, @@ -1298,6 +1306,156 @@ int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[], return err; } +static const struct nla_policy vxlan_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = { + [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY, + sizeof(struct in_addr), + sizeof(struct in6_addr)), + [MDBE_ATTR_SRC_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range), +}; + +static int vxlan_mdb_get_parse(struct net_device *dev, struct nlattr *tb[], + struct vxlan_mdb_entry_key *group, + struct netlink_ext_ack *extack) +{ + struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]); + struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1]; + struct vxlan_dev *vxlan = netdev_priv(dev); + int err; + + memset(group, 0, sizeof(*group)); + group->vni = vxlan->default_dst.remote_vni; + + if (!tb[MDBA_GET_ENTRY_ATTRS]) { + vxlan_mdb_group_set(group, entry, NULL); + return 0; + } + + err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX, + tb[MDBA_GET_ENTRY_ATTRS], + vxlan_mdbe_attrs_get_pol, extack); + if (err) + return err; + + if (mdbe_attrs[MDBE_ATTR_SOURCE] && + !vxlan_mdb_is_valid_source(mdbe_attrs[MDBE_ATTR_SOURCE], + entry->addr.proto, extack)) + return -EINVAL; + + vxlan_mdb_group_set(group, entry, mdbe_attrs[MDBE_ATTR_SOURCE]); + + if (mdbe_attrs[MDBE_ATTR_SRC_VNI]) + group->vni = + cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_SRC_VNI])); + + return 0; +} + +static struct sk_buff * +vxlan_mdb_get_reply_alloc(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry) +{ + struct vxlan_mdb_remote *remote; + size_t nlmsg_size; + + nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) + + /* MDBA_MDB */ + nla_total_size(0) + + /* MDBA_MDB_ENTRY */ + nla_total_size(0); + + list_for_each_entry(remote, &mdb_entry->remotes, list) + nlmsg_size += vxlan_mdb_nlmsg_remote_size(vxlan, mdb_entry, + remote); + + return nlmsg_new(nlmsg_size, GFP_KERNEL); +} + +static int +vxlan_mdb_get_reply_fill(const struct vxlan_dev *vxlan, + struct sk_buff *skb, + const struct vxlan_mdb_entry *mdb_entry, + u32 portid, u32 seq) +{ + struct nlattr *mdb_nest, *mdb_entry_nest; + struct vxlan_mdb_remote *remote; + struct br_port_msg *bpm; + struct nlmsghdr *nlh; + int err; + + nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0); + if (!nlh) + return -EMSGSIZE; + + bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); + bpm->family = AF_BRIDGE; + bpm->ifindex = vxlan->dev->ifindex; + mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB); + if (!mdb_nest) { + err = -EMSGSIZE; + goto cancel; + } + mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY); + if (!mdb_entry_nest) { + err = -EMSGSIZE; + goto cancel; + } + + list_for_each_entry(remote, &mdb_entry->remotes, list) { + err = vxlan_mdb_entry_info_fill(vxlan, skb, mdb_entry, remote); + if (err) + goto cancel; + } + + nla_nest_end(skb, mdb_entry_nest); + nla_nest_end(skb, mdb_nest); + nlmsg_end(skb, nlh); + + return 0; + +cancel: + nlmsg_cancel(skb, nlh); + return err; +} + +int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, + u32 seq, struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_mdb_entry *mdb_entry; + struct vxlan_mdb_entry_key group; + struct sk_buff *skb; + int err; + + ASSERT_RTNL(); + + err = vxlan_mdb_get_parse(dev, tb, &group, extack); + if (err) + return err; + + mdb_entry = vxlan_mdb_entry_lookup(vxlan, &group); + if (!mdb_entry) { + NL_SET_ERR_MSG_MOD(extack, "MDB entry not found"); + return -ENOENT; + } + + skb = vxlan_mdb_get_reply_alloc(vxlan, mdb_entry); + if (!skb) + return -ENOMEM; + + err = vxlan_mdb_get_reply_fill(vxlan, skb, mdb_entry, portid, seq); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply"); + goto free; + } + + return rtnl_unicast(skb, dev_net(dev), portid); + +free: + kfree_skb(skb); + return err; +} + struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan, struct sk_buff *skb, __be32 src_vni) |