1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "hsr_netlink.h"
15#include <linux/kernel.h>
16#include <net/rtnetlink.h>
17#include <net/genetlink.h>
18#include "hsr_main.h"
19#include "hsr_device.h"
20#include "hsr_framereg.h"
21
22static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
26 [IFLA_HSR_VERSION] = { .type = NLA_U8 },
27 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
28 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
29};
30
31
32
33
34
35static int hsr_newlink(struct net *src_net, struct net_device *dev,
36 struct nlattr *tb[], struct nlattr *data[],
37 struct netlink_ext_ack *extack)
38{
39 struct net_device *link[2];
40 unsigned char multicast_spec, hsr_version;
41
42 if (!data) {
43 netdev_info(dev, "HSR: No slave devices specified\n");
44 return -EINVAL;
45 }
46 if (!data[IFLA_HSR_SLAVE1]) {
47 netdev_info(dev, "HSR: Slave1 device not specified\n");
48 return -EINVAL;
49 }
50 link[0] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE1]));
51 if (!data[IFLA_HSR_SLAVE2]) {
52 netdev_info(dev, "HSR: Slave2 device not specified\n");
53 return -EINVAL;
54 }
55 link[1] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE2]));
56
57 if (!link[0] || !link[1])
58 return -ENODEV;
59 if (link[0] == link[1])
60 return -EINVAL;
61
62 if (!data[IFLA_HSR_MULTICAST_SPEC])
63 multicast_spec = 0;
64 else
65 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
66
67 if (!data[IFLA_HSR_VERSION])
68 hsr_version = 0;
69 else
70 hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
71
72 return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
73}
74
75static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
76{
77 struct hsr_priv *hsr;
78 struct hsr_port *port;
79 int res;
80
81 hsr = netdev_priv(dev);
82
83 res = 0;
84
85 rcu_read_lock();
86 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
87 if (port)
88 res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
89 rcu_read_unlock();
90 if (res)
91 goto nla_put_failure;
92
93 rcu_read_lock();
94 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
95 if (port)
96 res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
97 rcu_read_unlock();
98 if (res)
99 goto nla_put_failure;
100
101 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
102 hsr->sup_multicast_addr) ||
103 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
104 goto nla_put_failure;
105
106 return 0;
107
108nla_put_failure:
109 return -EMSGSIZE;
110}
111
112static struct rtnl_link_ops hsr_link_ops __read_mostly = {
113 .kind = "hsr",
114 .maxtype = IFLA_HSR_MAX,
115 .policy = hsr_policy,
116 .priv_size = sizeof(struct hsr_priv),
117 .setup = hsr_dev_setup,
118 .newlink = hsr_newlink,
119 .fill_info = hsr_fill_info,
120};
121
122
123
124
125static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
126 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
127 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
128 [HSR_A_IFINDEX] = { .type = NLA_U32 },
129 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
130 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
131 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
132 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
133};
134
135static struct genl_family hsr_genl_family;
136
137static const struct genl_multicast_group hsr_mcgrps[] = {
138 { .name = "hsr-network", },
139};
140
141
142
143
144
145
146
147void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
148 struct hsr_port *port)
149{
150 struct sk_buff *skb;
151 void *msg_head;
152 struct hsr_port *master;
153 int res;
154
155 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
156 if (!skb)
157 goto fail;
158
159 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_RING_ERROR);
160 if (!msg_head)
161 goto nla_put_failure;
162
163 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
164 if (res < 0)
165 goto nla_put_failure;
166
167 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
168 if (res < 0)
169 goto nla_put_failure;
170
171 genlmsg_end(skb, msg_head);
172 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
173
174 return;
175
176nla_put_failure:
177 kfree_skb(skb);
178
179fail:
180 rcu_read_lock();
181 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
182 netdev_warn(master->dev, "Could not send HSR ring error message\n");
183 rcu_read_unlock();
184}
185
186
187
188
189void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
190{
191 struct sk_buff *skb;
192 void *msg_head;
193 struct hsr_port *master;
194 int res;
195
196 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
197 if (!skb)
198 goto fail;
199
200 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
201 if (!msg_head)
202 goto nla_put_failure;
203
204
205 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
206 if (res < 0)
207 goto nla_put_failure;
208
209 genlmsg_end(skb, msg_head);
210 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
211
212 return;
213
214nla_put_failure:
215 kfree_skb(skb);
216
217fail:
218 rcu_read_lock();
219 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
220 netdev_warn(master->dev, "Could not send HSR node down\n");
221 rcu_read_unlock();
222}
223
224
225
226
227
228
229
230
231
232
233static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
234{
235
236 struct nlattr *na;
237 struct net_device *hsr_dev;
238
239
240 struct sk_buff *skb_out;
241 void *msg_head;
242 struct hsr_priv *hsr;
243 struct hsr_port *port;
244 unsigned char hsr_node_addr_b[ETH_ALEN];
245 int hsr_node_if1_age;
246 u16 hsr_node_if1_seq;
247 int hsr_node_if2_age;
248 u16 hsr_node_if2_seq;
249 int addr_b_ifindex;
250 int res;
251
252 if (!info)
253 goto invalid;
254
255 na = info->attrs[HSR_A_IFINDEX];
256 if (!na)
257 goto invalid;
258 na = info->attrs[HSR_A_NODE_ADDR];
259 if (!na)
260 goto invalid;
261
262 hsr_dev = __dev_get_by_index(genl_info_net(info),
263 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
264 if (!hsr_dev)
265 goto invalid;
266 if (!is_hsr_master(hsr_dev))
267 goto invalid;
268
269
270
271
272 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
273 if (!skb_out) {
274 res = -ENOMEM;
275 goto fail;
276 }
277
278 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
279 info->snd_seq, &hsr_genl_family, 0,
280 HSR_C_SET_NODE_STATUS);
281 if (!msg_head) {
282 res = -ENOMEM;
283 goto nla_put_failure;
284 }
285
286 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
287 if (res < 0)
288 goto nla_put_failure;
289
290 hsr = netdev_priv(hsr_dev);
291 res = hsr_get_node_data(hsr,
292 (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]),
293 hsr_node_addr_b,
294 &addr_b_ifindex,
295 &hsr_node_if1_age,
296 &hsr_node_if1_seq,
297 &hsr_node_if2_age,
298 &hsr_node_if2_seq);
299 if (res < 0)
300 goto nla_put_failure;
301
302 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
303 nla_data(info->attrs[HSR_A_NODE_ADDR]));
304 if (res < 0)
305 goto nla_put_failure;
306
307 if (addr_b_ifindex > -1) {
308 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
309 hsr_node_addr_b);
310 if (res < 0)
311 goto nla_put_failure;
312
313 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, addr_b_ifindex);
314 if (res < 0)
315 goto nla_put_failure;
316 }
317
318 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
319 if (res < 0)
320 goto nla_put_failure;
321 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
322 if (res < 0)
323 goto nla_put_failure;
324 rcu_read_lock();
325 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
326 if (port)
327 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
328 port->dev->ifindex);
329 rcu_read_unlock();
330 if (res < 0)
331 goto nla_put_failure;
332
333 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
334 if (res < 0)
335 goto nla_put_failure;
336 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
337 if (res < 0)
338 goto nla_put_failure;
339 rcu_read_lock();
340 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
341 if (port)
342 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
343 port->dev->ifindex);
344 rcu_read_unlock();
345 if (res < 0)
346 goto nla_put_failure;
347
348 genlmsg_end(skb_out, msg_head);
349 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
350
351 return 0;
352
353invalid:
354 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
355 return 0;
356
357nla_put_failure:
358 kfree_skb(skb_out);
359
360
361fail:
362 return res;
363}
364
365
366
367static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
368{
369
370 struct nlattr *na;
371 struct net_device *hsr_dev;
372
373
374 struct sk_buff *skb_out;
375 void *msg_head;
376 struct hsr_priv *hsr;
377 void *pos;
378 unsigned char addr[ETH_ALEN];
379 int res;
380
381 if (!info)
382 goto invalid;
383
384 na = info->attrs[HSR_A_IFINDEX];
385 if (!na)
386 goto invalid;
387
388 hsr_dev = __dev_get_by_index(genl_info_net(info),
389 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
390 if (!hsr_dev)
391 goto invalid;
392 if (!is_hsr_master(hsr_dev))
393 goto invalid;
394
395
396
397
398 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
399 if (!skb_out) {
400 res = -ENOMEM;
401 goto fail;
402 }
403
404 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
405 info->snd_seq, &hsr_genl_family, 0,
406 HSR_C_SET_NODE_LIST);
407 if (!msg_head) {
408 res = -ENOMEM;
409 goto nla_put_failure;
410 }
411
412 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
413 if (res < 0)
414 goto nla_put_failure;
415
416 hsr = netdev_priv(hsr_dev);
417
418 rcu_read_lock();
419 pos = hsr_get_next_node(hsr, NULL, addr);
420 while (pos) {
421 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
422 if (res < 0) {
423 rcu_read_unlock();
424 goto nla_put_failure;
425 }
426 pos = hsr_get_next_node(hsr, pos, addr);
427 }
428 rcu_read_unlock();
429
430 genlmsg_end(skb_out, msg_head);
431 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
432
433 return 0;
434
435invalid:
436 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
437 return 0;
438
439nla_put_failure:
440 kfree_skb(skb_out);
441
442
443fail:
444 return res;
445}
446
447
448static const struct genl_ops hsr_ops[] = {
449 {
450 .cmd = HSR_C_GET_NODE_STATUS,
451 .flags = 0,
452 .policy = hsr_genl_policy,
453 .doit = hsr_get_node_status,
454 .dumpit = NULL,
455 },
456 {
457 .cmd = HSR_C_GET_NODE_LIST,
458 .flags = 0,
459 .policy = hsr_genl_policy,
460 .doit = hsr_get_node_list,
461 .dumpit = NULL,
462 },
463};
464
465static struct genl_family hsr_genl_family __ro_after_init = {
466 .hdrsize = 0,
467 .name = "HSR",
468 .version = 1,
469 .maxattr = HSR_A_MAX,
470 .module = THIS_MODULE,
471 .ops = hsr_ops,
472 .n_ops = ARRAY_SIZE(hsr_ops),
473 .mcgrps = hsr_mcgrps,
474 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
475};
476
477int __init hsr_netlink_init(void)
478{
479 int rc;
480
481 rc = rtnl_link_register(&hsr_link_ops);
482 if (rc)
483 goto fail_rtnl_link_register;
484
485 rc = genl_register_family(&hsr_genl_family);
486 if (rc)
487 goto fail_genl_register_family;
488
489 return 0;
490
491fail_genl_register_family:
492 rtnl_link_unregister(&hsr_link_ops);
493fail_rtnl_link_register:
494
495 return rc;
496}
497
498void __exit hsr_netlink_exit(void)
499{
500 genl_unregister_family(&hsr_genl_family);
501 rtnl_link_unregister(&hsr_link_ops);
502}
503
504MODULE_ALIAS_RTNL_LINK("hsr");
505