2020-07-09 17:42:47 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
|
|
|
|
#include <linux/ethtool_netlink.h>
|
|
|
|
#include <net/udp_tunnel.h>
|
2020-07-28 14:47:58 -07:00
|
|
|
#include <net/vxlan.h>
|
2020-07-09 17:42:47 -07:00
|
|
|
|
|
|
|
#include "bitset.h"
|
|
|
|
#include "common.h"
|
|
|
|
#include "netlink.h"
|
|
|
|
|
2020-10-05 15:07:35 -07:00
|
|
|
const struct nla_policy ethnl_tunnel_info_get_policy[] = {
|
2020-10-05 15:07:36 -07:00
|
|
|
[ETHTOOL_A_TUNNEL_INFO_HEADER] =
|
|
|
|
NLA_POLICY_NESTED(ethnl_header_policy),
|
2020-07-09 17:42:47 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN));
|
|
|
|
static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE));
|
|
|
|
static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE ==
|
|
|
|
ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE));
|
|
|
|
|
2020-07-28 14:47:58 -07:00
|
|
|
static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact)
|
|
|
|
{
|
|
|
|
ssize_t size;
|
|
|
|
|
|
|
|
size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
|
|
|
|
udp_tunnel_type_names, compact);
|
|
|
|
if (size < 0)
|
|
|
|
return size;
|
|
|
|
|
|
|
|
return size +
|
|
|
|
nla_total_size(0) + /* _UDP_TABLE */
|
|
|
|
nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */
|
|
|
|
}
|
|
|
|
|
2020-07-09 17:42:47 -07:00
|
|
|
static ssize_t
|
|
|
|
ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
|
|
|
|
const struct udp_tunnel_nic_info *info;
|
|
|
|
unsigned int i;
|
2020-07-28 14:47:58 -07:00
|
|
|
ssize_t ret;
|
2020-07-09 17:42:47 -07:00
|
|
|
size_t size;
|
|
|
|
|
|
|
|
info = req_base->dev->udp_tunnel_nic_info;
|
|
|
|
if (!info) {
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"device does not report tunnel offload info");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
size = nla_total_size(0); /* _INFO_UDP_PORTS */
|
|
|
|
|
|
|
|
for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
|
|
|
|
if (!info->tables[i].n_entries)
|
2020-07-28 14:47:58 -07:00
|
|
|
break;
|
2020-07-09 17:42:47 -07:00
|
|
|
|
2020-07-28 14:47:58 -07:00
|
|
|
ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types,
|
|
|
|
compact);
|
2020-07-09 17:42:47 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
size += ret;
|
|
|
|
|
|
|
|
size += udp_tunnel_nic_dump_size(req_base->dev, i);
|
|
|
|
}
|
|
|
|
|
2020-07-28 14:47:58 -07:00
|
|
|
if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
|
|
|
|
ret = ethnl_udp_table_reply_size(0, compact);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
size += ret;
|
|
|
|
|
|
|
|
size += nla_total_size(0) + /* _TABLE_ENTRY */
|
|
|
|
nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
|
|
|
|
nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */
|
|
|
|
}
|
|
|
|
|
2020-07-09 17:42:47 -07:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
|
|
|
|
const struct udp_tunnel_nic_info *info;
|
2020-07-28 14:47:58 -07:00
|
|
|
struct nlattr *ports, *table, *entry;
|
2020-07-09 17:42:47 -07:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
info = req_base->dev->udp_tunnel_nic_info;
|
|
|
|
if (!info)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS);
|
|
|
|
if (!ports)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
|
|
|
|
if (!info->tables[i].n_entries)
|
|
|
|
break;
|
|
|
|
|
|
|
|
table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
|
|
|
|
if (!table)
|
|
|
|
goto err_cancel_ports;
|
|
|
|
|
|
|
|
if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE,
|
|
|
|
info->tables[i].n_entries))
|
|
|
|
goto err_cancel_table;
|
|
|
|
|
|
|
|
if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
|
|
|
|
&info->tables[i].tunnel_types, NULL,
|
|
|
|
__ETHTOOL_UDP_TUNNEL_TYPE_CNT,
|
|
|
|
udp_tunnel_type_names, compact))
|
|
|
|
goto err_cancel_table;
|
|
|
|
|
|
|
|
if (udp_tunnel_nic_dump_write(req_base->dev, i, skb))
|
|
|
|
goto err_cancel_table;
|
|
|
|
|
|
|
|
nla_nest_end(skb, table);
|
|
|
|
}
|
|
|
|
|
2020-07-28 14:47:58 -07:00
|
|
|
if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
|
|
|
|
u32 zero = 0;
|
|
|
|
|
|
|
|
table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
|
|
|
|
if (!table)
|
|
|
|
goto err_cancel_ports;
|
|
|
|
|
|
|
|
if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1))
|
|
|
|
goto err_cancel_table;
|
|
|
|
|
|
|
|
if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
|
|
|
|
&zero, NULL,
|
|
|
|
__ETHTOOL_UDP_TUNNEL_TYPE_CNT,
|
|
|
|
udp_tunnel_type_names, compact))
|
|
|
|
goto err_cancel_table;
|
|
|
|
|
|
|
|
entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
|
2022-09-21 11:17:16 -07:00
|
|
|
if (!entry)
|
|
|
|
goto err_cancel_entry;
|
2020-07-28 14:47:58 -07:00
|
|
|
|
|
|
|
if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
|
|
|
|
htons(IANA_VXLAN_UDP_PORT)) ||
|
|
|
|
nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
|
|
|
|
ilog2(UDP_TUNNEL_TYPE_VXLAN)))
|
|
|
|
goto err_cancel_entry;
|
|
|
|
|
|
|
|
nla_nest_end(skb, entry);
|
|
|
|
nla_nest_end(skb, table);
|
|
|
|
}
|
|
|
|
|
2020-07-09 17:42:47 -07:00
|
|
|
nla_nest_end(skb, ports);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2020-07-28 14:47:58 -07:00
|
|
|
err_cancel_entry:
|
|
|
|
nla_nest_cancel(skb, entry);
|
2020-07-09 17:42:47 -07:00
|
|
|
err_cancel_table:
|
|
|
|
nla_nest_cancel(skb, table);
|
|
|
|
err_cancel_ports:
|
|
|
|
nla_nest_cancel(skb, ports);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct ethnl_req_info req_info = {};
|
2020-10-05 15:07:33 -07:00
|
|
|
struct nlattr **tb = info->attrs;
|
2020-07-09 17:42:47 -07:00
|
|
|
struct sk_buff *rskb;
|
|
|
|
void *reply_payload;
|
|
|
|
int reply_len;
|
|
|
|
int ret;
|
|
|
|
|
2020-10-05 15:07:33 -07:00
|
|
|
ret = ethnl_parse_header_dev_get(&req_info,
|
|
|
|
tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
|
|
|
|
genl_info_net(info), info->extack,
|
|
|
|
true);
|
2020-07-09 17:42:47 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
ret = ethnl_tunnel_info_reply_size(&req_info, info->extack);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err_unlock_rtnl;
|
|
|
|
reply_len = ret + ethnl_reply_header_size();
|
|
|
|
|
|
|
|
rskb = ethnl_reply_init(reply_len, req_info.dev,
|
2020-09-16 16:04:10 -07:00
|
|
|
ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
|
2020-07-09 17:42:47 -07:00
|
|
|
ETHTOOL_A_TUNNEL_INFO_HEADER,
|
|
|
|
info, &reply_payload);
|
|
|
|
if (!rskb) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_unlock_rtnl;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ethnl_tunnel_info_fill_reply(&req_info, rskb);
|
|
|
|
if (ret)
|
|
|
|
goto err_free_msg;
|
|
|
|
rtnl_unlock();
|
2021-12-14 01:42:30 -07:00
|
|
|
ethnl_parse_header_dev_put(&req_info);
|
2020-07-09 17:42:47 -07:00
|
|
|
genlmsg_end(rskb, reply_payload);
|
|
|
|
|
|
|
|
return genlmsg_reply(rskb, info);
|
|
|
|
|
|
|
|
err_free_msg:
|
|
|
|
nlmsg_free(rskb);
|
|
|
|
err_unlock_rtnl:
|
|
|
|
rtnl_unlock();
|
2021-12-14 01:42:30 -07:00
|
|
|
ethnl_parse_header_dev_put(&req_info);
|
2020-07-09 17:42:47 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ethnl_tunnel_info_dump_ctx {
|
|
|
|
struct ethnl_req_info req_info;
|
2023-07-26 11:55:30 -07:00
|
|
|
unsigned long ifindex;
|
2020-07-09 17:42:47 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
int ethnl_tunnel_info_start(struct netlink_callback *cb)
|
|
|
|
{
|
2020-10-05 15:07:33 -07:00
|
|
|
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
|
2020-07-09 17:42:47 -07:00
|
|
|
struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
|
2023-08-14 14:47:18 -07:00
|
|
|
struct nlattr **tb = info->info.attrs;
|
2020-07-09 17:42:47 -07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
|
|
|
|
|
|
|
|
memset(ctx, 0, sizeof(*ctx));
|
|
|
|
|
2020-10-05 15:07:33 -07:00
|
|
|
ret = ethnl_parse_header_dev_get(&ctx->req_info,
|
|
|
|
tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
|
|
|
|
sock_net(cb->skb->sk), cb->extack,
|
|
|
|
false);
|
2020-07-09 17:42:47 -07:00
|
|
|
if (ctx->req_info.dev) {
|
2021-12-14 01:42:30 -07:00
|
|
|
ethnl_parse_header_dev_put(&ctx->req_info);
|
2020-07-09 17:42:47 -07:00
|
|
|
ctx->req_info.dev = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
|
|
|
|
{
|
|
|
|
struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
|
|
|
|
struct net *net = sock_net(skb->sk);
|
2023-07-26 11:55:30 -07:00
|
|
|
struct net_device *dev;
|
2020-07-09 17:42:47 -07:00
|
|
|
int ret = 0;
|
|
|
|
void *ehdr;
|
|
|
|
|
|
|
|
rtnl_lock();
|
2023-07-26 11:55:30 -07:00
|
|
|
for_each_netdev_dump(net, dev, ctx->ifindex) {
|
|
|
|
ehdr = ethnl_dump_put(skb, cb,
|
|
|
|
ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
|
|
|
|
if (!ehdr) {
|
|
|
|
ret = -EMSGSIZE;
|
|
|
|
break;
|
2020-07-09 17:42:47 -07:00
|
|
|
}
|
2023-07-26 11:55:30 -07:00
|
|
|
|
|
|
|
ret = ethnl_fill_reply_header(skb, dev,
|
|
|
|
ETHTOOL_A_TUNNEL_INFO_HEADER);
|
|
|
|
if (ret < 0) {
|
|
|
|
genlmsg_cancel(skb, ehdr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->req_info.dev = dev;
|
|
|
|
ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb);
|
|
|
|
ctx->req_info.dev = NULL;
|
|
|
|
if (ret < 0) {
|
|
|
|
genlmsg_cancel(skb, ehdr);
|
|
|
|
if (ret == -EOPNOTSUPP)
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
genlmsg_end(skb, ehdr);
|
2020-07-09 17:42:47 -07:00
|
|
|
}
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
if (ret == -EMSGSIZE && skb->len)
|
|
|
|
return skb->len;
|
|
|
|
return ret;
|
|
|
|
}
|