1
linux/net/hsr/hsr_netlink.c
Eric Dumazet b3c9e65eb2 net: hsr: remove seqnr_lock
syzbot found a new splat [1].

Instead of adding yet another spin_lock_bh(&hsr->seqnr_lock) /
spin_unlock_bh(&hsr->seqnr_lock) pair, remove seqnr_lock
and use atomic_t for hsr->sequence_nr and hsr->sup_sequence_nr.

This also avoid a race in hsr_fill_info().

Also remove interlink_sequence_nr which is unused.

[1]
 WARNING: CPU: 1 PID: 9723 at net/hsr/hsr_forward.c:602 handle_std_frame+0x247/0x2c0 net/hsr/hsr_forward.c:602
Modules linked in:
CPU: 1 UID: 0 PID: 9723 Comm: syz.0.1657 Not tainted 6.11.0-rc6-syzkaller-00026-g88fac17500f4 #0
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014
 RIP: 0010:handle_std_frame+0x247/0x2c0 net/hsr/hsr_forward.c:602
Code: 49 8d bd b0 01 00 00 be ff ff ff ff e8 e2 58 25 00 31 ff 89 c5 89 c6 e8 47 53 a8 f6 85 ed 0f 85 5a ff ff ff e8 fa 50 a8 f6 90 <0f> 0b 90 e9 4c ff ff ff e8 cc e7 06 f7 e9 8f fe ff ff e8 52 e8 06
RSP: 0018:ffffc90000598598 EFLAGS: 00010246
RAX: 0000000000000000 RBX: ffffc90000598670 RCX: ffffffff8ae2c919
RDX: ffff888024e94880 RSI: ffffffff8ae2c926 RDI: 0000000000000005
RBP: 0000000000000000 R08: 0000000000000005 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000003
R13: ffff8880627a8cc0 R14: 0000000000000000 R15: ffff888012b03c3a
FS:  0000000000000000(0000) GS:ffff88802b700000(0063) knlGS:00000000f5696b40
CS:  0010 DS: 002b ES: 002b CR0: 0000000080050033
CR2: 0000000020010000 CR3: 00000000768b4000 CR4: 0000000000350ef0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
 <IRQ>
  hsr_fill_frame_info+0x2c8/0x360 net/hsr/hsr_forward.c:630
  fill_frame_info net/hsr/hsr_forward.c:700 [inline]
  hsr_forward_skb+0x7df/0x25c0 net/hsr/hsr_forward.c:715
  hsr_handle_frame+0x603/0x850 net/hsr/hsr_slave.c:70
  __netif_receive_skb_core.constprop.0+0xa3d/0x4330 net/core/dev.c:5555
  __netif_receive_skb_list_core+0x357/0x950 net/core/dev.c:5737
  __netif_receive_skb_list net/core/dev.c:5804 [inline]
  netif_receive_skb_list_internal+0x753/0xda0 net/core/dev.c:5896
  gro_normal_list include/net/gro.h:515 [inline]
  gro_normal_list include/net/gro.h:511 [inline]
  napi_complete_done+0x23f/0x9a0 net/core/dev.c:6247
  gro_cell_poll+0x162/0x210 net/core/gro_cells.c:66
  __napi_poll.constprop.0+0xb7/0x550 net/core/dev.c:6772
  napi_poll net/core/dev.c:6841 [inline]
  net_rx_action+0xa92/0x1010 net/core/dev.c:6963
  handle_softirqs+0x216/0x8f0 kernel/softirq.c:554
  do_softirq kernel/softirq.c:455 [inline]
  do_softirq+0xb2/0xf0 kernel/softirq.c:442
 </IRQ>
 <TASK>

Fixes: 06afd2c31d ("hsr: Synchronize sending frames to have always incremented outgoing seq nr.")
Fixes: f421436a59 ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)")
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2024-09-09 10:25:01 +01:00

582 lines
14 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011-2014 Autronica Fire and Security AS
*
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
*
* Routines for handling Netlink messages for HSR and PRP.
*/
#include "hsr_netlink.h"
#include <linux/kernel.h>
#include <net/rtnetlink.h>
#include <net/genetlink.h>
#include "hsr_main.h"
#include "hsr_device.h"
#include "hsr_framereg.h"
static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
[IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
[IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
[IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
[IFLA_HSR_VERSION] = { .type = NLA_U8 },
[IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
[IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
[IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
[IFLA_HSR_INTERLINK] = { .type = NLA_U32 },
};
/* Here, it seems a netdevice has already been allocated for us, and the
* hsr_dev_setup routine has been executed. Nice!
*/
static int hsr_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
enum hsr_version proto_version;
unsigned char multicast_spec;
u8 proto = HSR_PROTOCOL_HSR;
struct net_device *link[2], *interlink = NULL;
if (!data) {
NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
return -EINVAL;
}
if (!data[IFLA_HSR_SLAVE1]) {
NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
return -EINVAL;
}
link[0] = __dev_get_by_index(src_net,
nla_get_u32(data[IFLA_HSR_SLAVE1]));
if (!link[0]) {
NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
return -EINVAL;
}
if (!data[IFLA_HSR_SLAVE2]) {
NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
return -EINVAL;
}
link[1] = __dev_get_by_index(src_net,
nla_get_u32(data[IFLA_HSR_SLAVE2]));
if (!link[1]) {
NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
return -EINVAL;
}
if (link[0] == link[1]) {
NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
return -EINVAL;
}
if (data[IFLA_HSR_INTERLINK])
interlink = __dev_get_by_index(src_net,
nla_get_u32(data[IFLA_HSR_INTERLINK]));
if (interlink && interlink == link[0]) {
NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave1 are the same");
return -EINVAL;
}
if (interlink && interlink == link[1]) {
NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave2 are the same");
return -EINVAL;
}
if (!data[IFLA_HSR_MULTICAST_SPEC])
multicast_spec = 0;
else
multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
if (data[IFLA_HSR_PROTOCOL])
proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
if (proto >= HSR_PROTOCOL_MAX) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
return -EINVAL;
}
if (!data[IFLA_HSR_VERSION]) {
proto_version = HSR_V0;
} else {
if (proto == HSR_PROTOCOL_PRP) {
NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
return -EINVAL;
}
proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
if (proto_version > HSR_V1) {
NL_SET_ERR_MSG_MOD(extack,
"Only HSR version 0/1 supported");
return -EINVAL;
}
}
if (proto == HSR_PROTOCOL_PRP) {
proto_version = PRP_V1;
if (interlink) {
NL_SET_ERR_MSG_MOD(extack,
"Interlink only works with HSR");
return -EINVAL;
}
}
return hsr_dev_finalize(dev, link, interlink, multicast_spec,
proto_version, extack);
}
static void hsr_dellink(struct net_device *dev, struct list_head *head)
{
struct hsr_priv *hsr = netdev_priv(dev);
del_timer_sync(&hsr->prune_timer);
del_timer_sync(&hsr->prune_proxy_timer);
del_timer_sync(&hsr->announce_timer);
timer_delete_sync(&hsr->announce_proxy_timer);
hsr_debugfs_term(hsr);
hsr_del_ports(hsr);
hsr_del_self_node(hsr);
hsr_del_nodes(&hsr->node_db);
hsr_del_nodes(&hsr->proxy_node_db);
unregister_netdevice_queue(dev, head);
}
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct hsr_priv *hsr = netdev_priv(dev);
u8 proto = HSR_PROTOCOL_HSR;
struct hsr_port *port;
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
if (port) {
if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
goto nla_put_failure;
}
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
if (port) {
if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
goto nla_put_failure;
}
if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
hsr->sup_multicast_addr) ||
nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
goto nla_put_failure;
if (hsr->prot_version == PRP_V1)
proto = HSR_PROTOCOL_PRP;
if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static struct rtnl_link_ops hsr_link_ops __read_mostly = {
.kind = "hsr",
.maxtype = IFLA_HSR_MAX,
.policy = hsr_policy,
.priv_size = sizeof(struct hsr_priv),
.setup = hsr_dev_setup,
.newlink = hsr_newlink,
.dellink = hsr_dellink,
.fill_info = hsr_fill_info,
};
/* attribute policy */
static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
[HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
[HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
[HSR_A_IFINDEX] = { .type = NLA_U32 },
[HSR_A_IF1_AGE] = { .type = NLA_U32 },
[HSR_A_IF2_AGE] = { .type = NLA_U32 },
[HSR_A_IF1_SEQ] = { .type = NLA_U16 },
[HSR_A_IF2_SEQ] = { .type = NLA_U16 },
};
static struct genl_family hsr_genl_family;
static const struct genl_multicast_group hsr_mcgrps[] = {
{ .name = "hsr-network", },
};
/* This is called if for some node with MAC address addr, we only get frames
* over one of the slave interfaces. This would indicate an open network ring
* (i.e. a link has failed somewhere).
*/
void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
struct hsr_port *port)
{
struct sk_buff *skb;
void *msg_head;
struct hsr_port *master;
int res;
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb)
goto fail;
msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
HSR_C_RING_ERROR);
if (!msg_head)
goto nla_put_failure;
res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
if (res < 0)
goto nla_put_failure;
res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
if (res < 0)
goto nla_put_failure;
genlmsg_end(skb, msg_head);
genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
return;
nla_put_failure:
kfree_skb(skb);
fail:
rcu_read_lock();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
netdev_warn(master->dev, "Could not send HSR ring error message\n");
rcu_read_unlock();
}
/* This is called when we haven't heard from the node with MAC address addr for
* some time (just before the node is removed from the node table/list).
*/
void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
{
struct sk_buff *skb;
void *msg_head;
struct hsr_port *master;
int res;
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb)
goto fail;
msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
if (!msg_head)
goto nla_put_failure;
res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
if (res < 0)
goto nla_put_failure;
genlmsg_end(skb, msg_head);
genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
return;
nla_put_failure:
kfree_skb(skb);
fail:
rcu_read_lock();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
netdev_warn(master->dev, "Could not send HSR node down\n");
rcu_read_unlock();
}
/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
* about the status of a specific node in the network, defined by its MAC
* address.
*
* Input: hsr ifindex, node mac address
* Output: hsr ifindex, node mac address (copied from request),
* age of latest frame from node over slave 1, slave 2 [ms]
*/
static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
{
/* For receiving */
struct nlattr *na;
struct net_device *hsr_dev;
/* For sending */
struct sk_buff *skb_out;
void *msg_head;
struct hsr_priv *hsr;
struct hsr_port *port;
unsigned char hsr_node_addr_b[ETH_ALEN];
int hsr_node_if1_age;
u16 hsr_node_if1_seq;
int hsr_node_if2_age;
u16 hsr_node_if2_seq;
int addr_b_ifindex;
int res;
if (!info)
goto invalid;
na = info->attrs[HSR_A_IFINDEX];
if (!na)
goto invalid;
na = info->attrs[HSR_A_NODE_ADDR];
if (!na)
goto invalid;
rcu_read_lock();
hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
nla_get_u32(info->attrs[HSR_A_IFINDEX]));
if (!hsr_dev)
goto rcu_unlock;
if (!is_hsr_master(hsr_dev))
goto rcu_unlock;
/* Send reply */
skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb_out) {
res = -ENOMEM;
goto fail;
}
msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
info->snd_seq, &hsr_genl_family, 0,
HSR_C_SET_NODE_STATUS);
if (!msg_head) {
res = -ENOMEM;
goto nla_put_failure;
}
res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
if (res < 0)
goto nla_put_failure;
hsr = netdev_priv(hsr_dev);
res = hsr_get_node_data(hsr,
(unsigned char *)
nla_data(info->attrs[HSR_A_NODE_ADDR]),
hsr_node_addr_b,
&addr_b_ifindex,
&hsr_node_if1_age,
&hsr_node_if1_seq,
&hsr_node_if2_age,
&hsr_node_if2_seq);
if (res < 0)
goto nla_put_failure;
res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
nla_data(info->attrs[HSR_A_NODE_ADDR]));
if (res < 0)
goto nla_put_failure;
if (addr_b_ifindex > -1) {
res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
hsr_node_addr_b);
if (res < 0)
goto nla_put_failure;
res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
addr_b_ifindex);
if (res < 0)
goto nla_put_failure;
}
res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
if (res < 0)
goto nla_put_failure;
res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
if (res < 0)
goto nla_put_failure;
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
if (port)
res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
port->dev->ifindex);
if (res < 0)
goto nla_put_failure;
res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
if (res < 0)
goto nla_put_failure;
res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
if (res < 0)
goto nla_put_failure;
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
if (port)
res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
port->dev->ifindex);
if (res < 0)
goto nla_put_failure;
rcu_read_unlock();
genlmsg_end(skb_out, msg_head);
genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
return 0;
rcu_unlock:
rcu_read_unlock();
invalid:
netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
return 0;
nla_put_failure:
kfree_skb(skb_out);
/* Fall through */
fail:
rcu_read_unlock();
return res;
}
/* Get a list of MacAddressA of all nodes known to this node (including self).
*/
static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
{
unsigned char addr[ETH_ALEN];
struct net_device *hsr_dev;
struct sk_buff *skb_out;
struct hsr_priv *hsr;
bool restart = false;
struct nlattr *na;
void *pos = NULL;
void *msg_head;
int res;
if (!info)
goto invalid;
na = info->attrs[HSR_A_IFINDEX];
if (!na)
goto invalid;
rcu_read_lock();
hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
nla_get_u32(info->attrs[HSR_A_IFINDEX]));
if (!hsr_dev)
goto rcu_unlock;
if (!is_hsr_master(hsr_dev))
goto rcu_unlock;
restart:
/* Send reply */
skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb_out) {
res = -ENOMEM;
goto fail;
}
msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
info->snd_seq, &hsr_genl_family, 0,
HSR_C_SET_NODE_LIST);
if (!msg_head) {
res = -ENOMEM;
goto nla_put_failure;
}
if (!restart) {
res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
if (res < 0)
goto nla_put_failure;
}
hsr = netdev_priv(hsr_dev);
if (!pos)
pos = hsr_get_next_node(hsr, NULL, addr);
while (pos) {
res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
if (res < 0) {
if (res == -EMSGSIZE) {
genlmsg_end(skb_out, msg_head);
genlmsg_unicast(genl_info_net(info), skb_out,
info->snd_portid);
restart = true;
goto restart;
}
goto nla_put_failure;
}
pos = hsr_get_next_node(hsr, pos, addr);
}
rcu_read_unlock();
genlmsg_end(skb_out, msg_head);
genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
return 0;
rcu_unlock:
rcu_read_unlock();
invalid:
netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
return 0;
nla_put_failure:
nlmsg_free(skb_out);
/* Fall through */
fail:
rcu_read_unlock();
return res;
}
static const struct genl_small_ops hsr_ops[] = {
{
.cmd = HSR_C_GET_NODE_STATUS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0,
.doit = hsr_get_node_status,
.dumpit = NULL,
},
{
.cmd = HSR_C_GET_NODE_LIST,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0,
.doit = hsr_get_node_list,
.dumpit = NULL,
},
};
static struct genl_family hsr_genl_family __ro_after_init = {
.hdrsize = 0,
.name = "HSR",
.version = 1,
.maxattr = HSR_A_MAX,
.policy = hsr_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.small_ops = hsr_ops,
.n_small_ops = ARRAY_SIZE(hsr_ops),
.resv_start_op = HSR_C_SET_NODE_LIST + 1,
.mcgrps = hsr_mcgrps,
.n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
};
int __init hsr_netlink_init(void)
{
int rc;
rc = rtnl_link_register(&hsr_link_ops);
if (rc)
goto fail_rtnl_link_register;
rc = genl_register_family(&hsr_genl_family);
if (rc)
goto fail_genl_register_family;
hsr_debugfs_create_root();
return 0;
fail_genl_register_family:
rtnl_link_unregister(&hsr_link_ops);
fail_rtnl_link_register:
return rc;
}
void __exit hsr_netlink_exit(void)
{
genl_unregister_family(&hsr_genl_family);
rtnl_link_unregister(&hsr_link_ops);
}
MODULE_ALIAS_RTNL_LINK("hsr");