1
2#include <linux/kernel.h>
3#include <linux/slab.h>
4#include <net/flow_offload.h>
5#include <linux/rtnetlink.h>
6#include <linux/mutex.h>
7#include <linux/rhashtable.h>
8
9struct flow_rule *flow_rule_alloc(unsigned int num_actions)
10{
11 struct flow_rule *rule;
12 int i;
13
14 rule = kzalloc(struct_size(rule, action.entries, num_actions),
15 GFP_KERNEL);
16 if (!rule)
17 return NULL;
18
19 rule->action.num_entries = num_actions;
20
21
22
23 for (i = 0; i < num_actions; i++)
24 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
25
26 return rule;
27}
28EXPORT_SYMBOL(flow_rule_alloc);
29
30#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
31 const struct flow_match *__m = &(__rule)->match; \
32 struct flow_dissector *__d = (__m)->dissector; \
33 \
34 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
35 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
36
37void flow_rule_match_meta(const struct flow_rule *rule,
38 struct flow_match_meta *out)
39{
40 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
41}
42EXPORT_SYMBOL(flow_rule_match_meta);
43
44void flow_rule_match_basic(const struct flow_rule *rule,
45 struct flow_match_basic *out)
46{
47 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
48}
49EXPORT_SYMBOL(flow_rule_match_basic);
50
51void flow_rule_match_control(const struct flow_rule *rule,
52 struct flow_match_control *out)
53{
54 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
55}
56EXPORT_SYMBOL(flow_rule_match_control);
57
58void flow_rule_match_eth_addrs(const struct flow_rule *rule,
59 struct flow_match_eth_addrs *out)
60{
61 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
62}
63EXPORT_SYMBOL(flow_rule_match_eth_addrs);
64
65void flow_rule_match_vlan(const struct flow_rule *rule,
66 struct flow_match_vlan *out)
67{
68 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
69}
70EXPORT_SYMBOL(flow_rule_match_vlan);
71
72void flow_rule_match_cvlan(const struct flow_rule *rule,
73 struct flow_match_vlan *out)
74{
75 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
76}
77EXPORT_SYMBOL(flow_rule_match_cvlan);
78
79void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
80 struct flow_match_ipv4_addrs *out)
81{
82 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
83}
84EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
85
86void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
87 struct flow_match_ipv6_addrs *out)
88{
89 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
90}
91EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
92
93void flow_rule_match_ip(const struct flow_rule *rule,
94 struct flow_match_ip *out)
95{
96 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
97}
98EXPORT_SYMBOL(flow_rule_match_ip);
99
100void flow_rule_match_ports(const struct flow_rule *rule,
101 struct flow_match_ports *out)
102{
103 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
104}
105EXPORT_SYMBOL(flow_rule_match_ports);
106
107void flow_rule_match_tcp(const struct flow_rule *rule,
108 struct flow_match_tcp *out)
109{
110 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
111}
112EXPORT_SYMBOL(flow_rule_match_tcp);
113
114void flow_rule_match_icmp(const struct flow_rule *rule,
115 struct flow_match_icmp *out)
116{
117 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
118}
119EXPORT_SYMBOL(flow_rule_match_icmp);
120
121void flow_rule_match_mpls(const struct flow_rule *rule,
122 struct flow_match_mpls *out)
123{
124 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
125}
126EXPORT_SYMBOL(flow_rule_match_mpls);
127
128void flow_rule_match_enc_control(const struct flow_rule *rule,
129 struct flow_match_control *out)
130{
131 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
132}
133EXPORT_SYMBOL(flow_rule_match_enc_control);
134
135void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
136 struct flow_match_ipv4_addrs *out)
137{
138 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
139}
140EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
141
142void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
143 struct flow_match_ipv6_addrs *out)
144{
145 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
146}
147EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
148
149void flow_rule_match_enc_ip(const struct flow_rule *rule,
150 struct flow_match_ip *out)
151{
152 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
153}
154EXPORT_SYMBOL(flow_rule_match_enc_ip);
155
156void flow_rule_match_enc_ports(const struct flow_rule *rule,
157 struct flow_match_ports *out)
158{
159 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
160}
161EXPORT_SYMBOL(flow_rule_match_enc_ports);
162
163void flow_rule_match_enc_keyid(const struct flow_rule *rule,
164 struct flow_match_enc_keyid *out)
165{
166 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
167}
168EXPORT_SYMBOL(flow_rule_match_enc_keyid);
169
170void flow_rule_match_enc_opts(const struct flow_rule *rule,
171 struct flow_match_enc_opts *out)
172{
173 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
174}
175EXPORT_SYMBOL(flow_rule_match_enc_opts);
176
177struct flow_action_cookie *flow_action_cookie_create(void *data,
178 unsigned int len,
179 gfp_t gfp)
180{
181 struct flow_action_cookie *cookie;
182
183 cookie = kmalloc(sizeof(*cookie) + len, gfp);
184 if (!cookie)
185 return NULL;
186 cookie->cookie_len = len;
187 memcpy(cookie->cookie, data, len);
188 return cookie;
189}
190EXPORT_SYMBOL(flow_action_cookie_create);
191
192void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
193{
194 kfree(cookie);
195}
196EXPORT_SYMBOL(flow_action_cookie_destroy);
197
198void flow_rule_match_ct(const struct flow_rule *rule,
199 struct flow_match_ct *out)
200{
201 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
202}
203EXPORT_SYMBOL(flow_rule_match_ct);
204
205struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
206 void *cb_ident, void *cb_priv,
207 void (*release)(void *cb_priv))
208{
209 struct flow_block_cb *block_cb;
210
211 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
212 if (!block_cb)
213 return ERR_PTR(-ENOMEM);
214
215 block_cb->cb = cb;
216 block_cb->cb_ident = cb_ident;
217 block_cb->cb_priv = cb_priv;
218 block_cb->release = release;
219
220 return block_cb;
221}
222EXPORT_SYMBOL(flow_block_cb_alloc);
223
224void flow_block_cb_free(struct flow_block_cb *block_cb)
225{
226 if (block_cb->release)
227 block_cb->release(block_cb->cb_priv);
228
229 kfree(block_cb);
230}
231EXPORT_SYMBOL(flow_block_cb_free);
232
233struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
234 flow_setup_cb_t *cb, void *cb_ident)
235{
236 struct flow_block_cb *block_cb;
237
238 list_for_each_entry(block_cb, &block->cb_list, list) {
239 if (block_cb->cb == cb &&
240 block_cb->cb_ident == cb_ident)
241 return block_cb;
242 }
243
244 return NULL;
245}
246EXPORT_SYMBOL(flow_block_cb_lookup);
247
248void *flow_block_cb_priv(struct flow_block_cb *block_cb)
249{
250 return block_cb->cb_priv;
251}
252EXPORT_SYMBOL(flow_block_cb_priv);
253
254void flow_block_cb_incref(struct flow_block_cb *block_cb)
255{
256 block_cb->refcnt++;
257}
258EXPORT_SYMBOL(flow_block_cb_incref);
259
260unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
261{
262 return --block_cb->refcnt;
263}
264EXPORT_SYMBOL(flow_block_cb_decref);
265
266bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
267 struct list_head *driver_block_list)
268{
269 struct flow_block_cb *block_cb;
270
271 list_for_each_entry(block_cb, driver_block_list, driver_list) {
272 if (block_cb->cb == cb &&
273 block_cb->cb_ident == cb_ident)
274 return true;
275 }
276
277 return false;
278}
279EXPORT_SYMBOL(flow_block_cb_is_busy);
280
281int flow_block_cb_setup_simple(struct flow_block_offload *f,
282 struct list_head *driver_block_list,
283 flow_setup_cb_t *cb,
284 void *cb_ident, void *cb_priv,
285 bool ingress_only)
286{
287 struct flow_block_cb *block_cb;
288
289 if (ingress_only &&
290 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
291 return -EOPNOTSUPP;
292
293 f->driver_block_list = driver_block_list;
294
295 switch (f->command) {
296 case FLOW_BLOCK_BIND:
297 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
298 return -EBUSY;
299
300 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
301 if (IS_ERR(block_cb))
302 return PTR_ERR(block_cb);
303
304 flow_block_cb_add(block_cb, f);
305 list_add_tail(&block_cb->driver_list, driver_block_list);
306 return 0;
307 case FLOW_BLOCK_UNBIND:
308 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
309 if (!block_cb)
310 return -ENOENT;
311
312 flow_block_cb_remove(block_cb, f);
313 list_del(&block_cb->driver_list);
314 return 0;
315 default:
316 return -EOPNOTSUPP;
317 }
318}
319EXPORT_SYMBOL(flow_block_cb_setup_simple);
320
321static DEFINE_MUTEX(flow_indr_block_lock);
322static LIST_HEAD(flow_block_indr_list);
323static LIST_HEAD(flow_block_indr_dev_list);
324
325struct flow_indr_dev {
326 struct list_head list;
327 flow_indr_block_bind_cb_t *cb;
328 void *cb_priv;
329 refcount_t refcnt;
330 struct rcu_head rcu;
331};
332
333static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
334 void *cb_priv)
335{
336 struct flow_indr_dev *indr_dev;
337
338 indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
339 if (!indr_dev)
340 return NULL;
341
342 indr_dev->cb = cb;
343 indr_dev->cb_priv = cb_priv;
344 refcount_set(&indr_dev->refcnt, 1);
345
346 return indr_dev;
347}
348
349int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
350{
351 struct flow_indr_dev *indr_dev;
352
353 mutex_lock(&flow_indr_block_lock);
354 list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
355 if (indr_dev->cb == cb &&
356 indr_dev->cb_priv == cb_priv) {
357 refcount_inc(&indr_dev->refcnt);
358 mutex_unlock(&flow_indr_block_lock);
359 return 0;
360 }
361 }
362
363 indr_dev = flow_indr_dev_alloc(cb, cb_priv);
364 if (!indr_dev) {
365 mutex_unlock(&flow_indr_block_lock);
366 return -ENOMEM;
367 }
368
369 list_add(&indr_dev->list, &flow_block_indr_dev_list);
370 mutex_unlock(&flow_indr_block_lock);
371
372 return 0;
373}
374EXPORT_SYMBOL(flow_indr_dev_register);
375
376static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
377 void *cb_priv,
378 struct list_head *cleanup_list)
379{
380 struct flow_block_cb *this, *next;
381
382 list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
383 if (this->release == release &&
384 this->indr.cb_priv == cb_priv)
385 list_move(&this->indr.list, cleanup_list);
386 }
387}
388
389static void flow_block_indr_notify(struct list_head *cleanup_list)
390{
391 struct flow_block_cb *this, *next;
392
393 list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
394 list_del(&this->indr.list);
395 this->indr.cleanup(this);
396 }
397}
398
399void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
400 void (*release)(void *cb_priv))
401{
402 struct flow_indr_dev *this, *next, *indr_dev = NULL;
403 LIST_HEAD(cleanup_list);
404
405 mutex_lock(&flow_indr_block_lock);
406 list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
407 if (this->cb == cb &&
408 this->cb_priv == cb_priv &&
409 refcount_dec_and_test(&this->refcnt)) {
410 indr_dev = this;
411 list_del(&indr_dev->list);
412 break;
413 }
414 }
415
416 if (!indr_dev) {
417 mutex_unlock(&flow_indr_block_lock);
418 return;
419 }
420
421 __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
422 mutex_unlock(&flow_indr_block_lock);
423
424 flow_block_indr_notify(&cleanup_list);
425 kfree(indr_dev);
426}
427EXPORT_SYMBOL(flow_indr_dev_unregister);
428
429static void flow_block_indr_init(struct flow_block_cb *flow_block,
430 struct flow_block_offload *bo,
431 struct net_device *dev, struct Qdisc *sch, void *data,
432 void *cb_priv,
433 void (*cleanup)(struct flow_block_cb *block_cb))
434{
435 flow_block->indr.binder_type = bo->binder_type;
436 flow_block->indr.data = data;
437 flow_block->indr.cb_priv = cb_priv;
438 flow_block->indr.dev = dev;
439 flow_block->indr.sch = sch;
440 flow_block->indr.cleanup = cleanup;
441}
442
443struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
444 void *cb_ident, void *cb_priv,
445 void (*release)(void *cb_priv),
446 struct flow_block_offload *bo,
447 struct net_device *dev,
448 struct Qdisc *sch, void *data,
449 void *indr_cb_priv,
450 void (*cleanup)(struct flow_block_cb *block_cb))
451{
452 struct flow_block_cb *block_cb;
453
454 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
455 if (IS_ERR(block_cb))
456 goto out;
457
458 flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
459 list_add(&block_cb->indr.list, &flow_block_indr_list);
460
461out:
462 return block_cb;
463}
464EXPORT_SYMBOL(flow_indr_block_cb_alloc);
465
466int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
467 enum tc_setup_type type, void *data,
468 struct flow_block_offload *bo,
469 void (*cleanup)(struct flow_block_cb *block_cb))
470{
471 struct flow_indr_dev *this;
472
473 mutex_lock(&flow_indr_block_lock);
474 list_for_each_entry(this, &flow_block_indr_dev_list, list)
475 this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
476
477 mutex_unlock(&flow_indr_block_lock);
478
479 return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
480}
481EXPORT_SYMBOL(flow_indr_dev_setup_offload);
482