1
2
3
4
5
6
7
8
9
10
11
12#include "hsr_slave.h"
13#include <linux/etherdevice.h>
14#include <linux/if_arp.h>
15#include <linux/if_vlan.h>
16#include "hsr_main.h"
17#include "hsr_device.h"
18#include "hsr_forward.h"
19#include "hsr_framereg.h"
20
21
22static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
23{
24 struct sk_buff *skb = *pskb;
25 struct hsr_port *port;
26 u16 protocol;
27
28 if (!skb_mac_header_was_set(skb)) {
29 WARN_ONCE(1, "%s: skb invalid", __func__);
30 return RX_HANDLER_PASS;
31 }
32
33 rcu_read_lock();
34 port = hsr_port_get_rcu(skb->dev);
35
36 if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
37
38 kfree_skb(skb);
39 goto finish_consume;
40 }
41
42 protocol = eth_hdr(skb)->h_proto;
43 if (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR))
44 goto finish_pass;
45
46 skb_push(skb, ETH_HLEN);
47
48 hsr_forward_skb(skb, port);
49
50finish_consume:
51 rcu_read_unlock();
52 return RX_HANDLER_CONSUMED;
53
54finish_pass:
55 rcu_read_unlock();
56 return RX_HANDLER_PASS;
57}
58
59bool hsr_port_exists(const struct net_device *dev)
60{
61 return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
62}
63
64
65static int hsr_check_dev_ok(struct net_device *dev)
66{
67
68 if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) ||
69 (dev->addr_len != ETH_ALEN)) {
70 netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
71 return -EINVAL;
72 }
73
74
75 if (is_hsr_master(dev)) {
76 netdev_info(dev, "Cannot create trees of HSR devices.\n");
77 return -EINVAL;
78 }
79
80 if (hsr_port_exists(dev)) {
81 netdev_info(dev, "This device is already a HSR slave.\n");
82 return -EINVAL;
83 }
84
85 if (is_vlan_dev(dev)) {
86 netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
87 return -EINVAL;
88 }
89
90 if (dev->priv_flags & IFF_DONT_BRIDGE) {
91 netdev_info(dev, "This device does not support bridging.\n");
92 return -EOPNOTSUPP;
93 }
94
95
96
97
98
99 return 0;
100}
101
102
103
104static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port)
105{
106 int res;
107
108 dev_hold(dev);
109 res = dev_set_promiscuity(dev, 1);
110 if (res)
111 goto fail_promiscuity;
112
113
114
115
116
117
118 res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
119 if (res)
120 goto fail_rx_handler;
121 dev_disable_lro(dev);
122
123 return 0;
124
125fail_rx_handler:
126 dev_set_promiscuity(dev, -1);
127fail_promiscuity:
128 dev_put(dev);
129
130 return res;
131}
132
133int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
134 enum hsr_port_type type)
135{
136 struct hsr_port *port, *master;
137 int res;
138
139 if (type != HSR_PT_MASTER) {
140 res = hsr_check_dev_ok(dev);
141 if (res)
142 return res;
143 }
144
145 port = hsr_port_get_hsr(hsr, type);
146 if (port != NULL)
147 return -EBUSY;
148
149 port = kzalloc(sizeof(*port), GFP_KERNEL);
150 if (port == NULL)
151 return -ENOMEM;
152
153 if (type != HSR_PT_MASTER) {
154 res = hsr_portdev_setup(dev, port);
155 if (res)
156 goto fail_dev_setup;
157 }
158
159 port->hsr = hsr;
160 port->dev = dev;
161 port->type = type;
162
163 list_add_tail_rcu(&port->port_list, &hsr->ports);
164 synchronize_rcu();
165
166 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
167 netdev_update_features(master->dev);
168 dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
169
170 return 0;
171
172fail_dev_setup:
173 kfree(port);
174 return res;
175}
176
177void hsr_del_port(struct hsr_port *port)
178{
179 struct hsr_priv *hsr;
180 struct hsr_port *master;
181
182 hsr = port->hsr;
183 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
184 list_del_rcu(&port->port_list);
185
186 if (port != master) {
187 if (master != NULL) {
188 netdev_update_features(master->dev);
189 dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
190 }
191 netdev_rx_handler_unregister(port->dev);
192 dev_set_promiscuity(port->dev, -1);
193 }
194
195
196
197
198
199 synchronize_rcu();
200
201 if (port != master)
202 dev_put(port->dev);
203}
204