1
2
3
4
5
6
7
8#include <linux/netdevice.h>
9#include <net/fib_rules.h>
10#include <net/l3mdev.h>
11
12static DEFINE_SPINLOCK(l3mdev_lock);
13
14struct l3mdev_handler {
15 lookup_by_table_id_t dev_lookup;
16};
17
18static struct l3mdev_handler l3mdev_handlers[L3MDEV_TYPE_MAX + 1];
19
20static int l3mdev_check_type(enum l3mdev_type l3type)
21{
22 if (l3type <= L3MDEV_TYPE_UNSPEC || l3type > L3MDEV_TYPE_MAX)
23 return -EINVAL;
24
25 return 0;
26}
27
28int l3mdev_table_lookup_register(enum l3mdev_type l3type,
29 lookup_by_table_id_t fn)
30{
31 struct l3mdev_handler *hdlr;
32 int res;
33
34 res = l3mdev_check_type(l3type);
35 if (res)
36 return res;
37
38 hdlr = &l3mdev_handlers[l3type];
39
40 spin_lock(&l3mdev_lock);
41
42 if (hdlr->dev_lookup) {
43 res = -EBUSY;
44 goto unlock;
45 }
46
47 hdlr->dev_lookup = fn;
48 res = 0;
49
50unlock:
51 spin_unlock(&l3mdev_lock);
52
53 return res;
54}
55EXPORT_SYMBOL_GPL(l3mdev_table_lookup_register);
56
57void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
58 lookup_by_table_id_t fn)
59{
60 struct l3mdev_handler *hdlr;
61
62 if (l3mdev_check_type(l3type))
63 return;
64
65 hdlr = &l3mdev_handlers[l3type];
66
67 spin_lock(&l3mdev_lock);
68
69 if (hdlr->dev_lookup == fn)
70 hdlr->dev_lookup = NULL;
71
72 spin_unlock(&l3mdev_lock);
73}
74EXPORT_SYMBOL_GPL(l3mdev_table_lookup_unregister);
75
76int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type,
77 struct net *net, u32 table_id)
78{
79 lookup_by_table_id_t lookup;
80 struct l3mdev_handler *hdlr;
81 int ifindex = -EINVAL;
82 int res;
83
84 res = l3mdev_check_type(l3type);
85 if (res)
86 return res;
87
88 hdlr = &l3mdev_handlers[l3type];
89
90 spin_lock(&l3mdev_lock);
91
92 lookup = hdlr->dev_lookup;
93 if (!lookup)
94 goto unlock;
95
96 ifindex = lookup(net, table_id);
97
98unlock:
99 spin_unlock(&l3mdev_lock);
100
101 return ifindex;
102}
103EXPORT_SYMBOL_GPL(l3mdev_ifindex_lookup_by_table_id);
104
105
106
107
108
109
110int l3mdev_master_ifindex_rcu(const struct net_device *dev)
111{
112 int ifindex = 0;
113
114 if (!dev)
115 return 0;
116
117 if (netif_is_l3_master(dev)) {
118 ifindex = dev->ifindex;
119 } else if (netif_is_l3_slave(dev)) {
120 struct net_device *master;
121 struct net_device *_dev = (struct net_device *)dev;
122
123
124
125
126
127
128
129 master = netdev_master_upper_dev_get_rcu(_dev);
130 if (master)
131 ifindex = master->ifindex;
132 }
133
134 return ifindex;
135}
136EXPORT_SYMBOL_GPL(l3mdev_master_ifindex_rcu);
137
138
139
140
141
142
143
144int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
145{
146 struct net_device *dev;
147
148 dev = dev_get_by_index_rcu(net, ifindex);
149 while (dev && !netif_is_l3_master(dev))
150 dev = netdev_master_upper_dev_get(dev);
151
152 return dev ? dev->ifindex : 0;
153}
154EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
155
156
157
158
159
160
161
162u32 l3mdev_fib_table_rcu(const struct net_device *dev)
163{
164 u32 tb_id = 0;
165
166 if (!dev)
167 return 0;
168
169 if (netif_is_l3_master(dev)) {
170 if (dev->l3mdev_ops->l3mdev_fib_table)
171 tb_id = dev->l3mdev_ops->l3mdev_fib_table(dev);
172 } else if (netif_is_l3_slave(dev)) {
173
174
175
176 struct net_device *_dev = (struct net_device *) dev;
177 const struct net_device *master;
178
179 master = netdev_master_upper_dev_get_rcu(_dev);
180 if (master &&
181 master->l3mdev_ops->l3mdev_fib_table)
182 tb_id = master->l3mdev_ops->l3mdev_fib_table(master);
183 }
184
185 return tb_id;
186}
187EXPORT_SYMBOL_GPL(l3mdev_fib_table_rcu);
188
189u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
190{
191 struct net_device *dev;
192 u32 tb_id = 0;
193
194 if (!ifindex)
195 return 0;
196
197 rcu_read_lock();
198
199 dev = dev_get_by_index_rcu(net, ifindex);
200 if (dev)
201 tb_id = l3mdev_fib_table_rcu(dev);
202
203 rcu_read_unlock();
204
205 return tb_id;
206}
207EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
208
209
210
211
212
213
214
215
216
217
218struct dst_entry *l3mdev_link_scope_lookup(struct net *net,
219 struct flowi6 *fl6)
220{
221 struct dst_entry *dst = NULL;
222 struct net_device *dev;
223
224 WARN_ON_ONCE(!rcu_read_lock_held());
225 if (fl6->flowi6_oif) {
226 dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
227 if (dev && netif_is_l3_slave(dev))
228 dev = netdev_master_upper_dev_get_rcu(dev);
229
230 if (dev && netif_is_l3_master(dev) &&
231 dev->l3mdev_ops->l3mdev_link_scope_lookup)
232 dst = dev->l3mdev_ops->l3mdev_link_scope_lookup(dev, fl6);
233 }
234
235 return dst;
236}
237EXPORT_SYMBOL_GPL(l3mdev_link_scope_lookup);
238
239
240
241
242
243
244
245
246
247int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
248 struct fib_lookup_arg *arg)
249{
250 struct net_device *dev;
251 int rc = 0;
252
253 rcu_read_lock();
254
255 dev = dev_get_by_index_rcu(net, fl->flowi_oif);
256 if (dev && netif_is_l3_master(dev) &&
257 dev->l3mdev_ops->l3mdev_fib_table) {
258 arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
259 rc = 1;
260 goto out;
261 }
262
263 dev = dev_get_by_index_rcu(net, fl->flowi_iif);
264 if (dev && netif_is_l3_master(dev) &&
265 dev->l3mdev_ops->l3mdev_fib_table) {
266 arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
267 rc = 1;
268 goto out;
269 }
270
271out:
272 rcu_read_unlock();
273
274 return rc;
275}
276
277void l3mdev_update_flow(struct net *net, struct flowi *fl)
278{
279 struct net_device *dev;
280 int ifindex;
281
282 rcu_read_lock();
283
284 if (fl->flowi_oif) {
285 dev = dev_get_by_index_rcu(net, fl->flowi_oif);
286 if (dev) {
287 ifindex = l3mdev_master_ifindex_rcu(dev);
288 if (ifindex) {
289 fl->flowi_oif = ifindex;
290 fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
291 goto out;
292 }
293 }
294 }
295
296 if (fl->flowi_iif) {
297 dev = dev_get_by_index_rcu(net, fl->flowi_iif);
298 if (dev) {
299 ifindex = l3mdev_master_ifindex_rcu(dev);
300 if (ifindex) {
301 fl->flowi_iif = ifindex;
302 fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
303 }
304 }
305 }
306
307out:
308 rcu_read_unlock();
309}
310EXPORT_SYMBOL_GPL(l3mdev_update_flow);
311