1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include "cxgb4.h"
36#include "t4_regs.h"
37#include "l2t.h"
38#include "t4fw_api.h"
39#include "cxgb4_filter.h"
40
41static inline bool is_field_set(u32 val, u32 mask)
42{
43 return val || mask;
44}
45
46static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
47{
48 return !(conf & conf_mask) && is_field_set(val, mask);
49}
50
51
52static int validate_filter(struct net_device *dev,
53 struct ch_filter_specification *fs)
54{
55 struct adapter *adapter = netdev2adap(dev);
56 u32 fconf, iconf;
57
58
59 fconf = adapter->params.tp.vlan_pri_map;
60 iconf = adapter->params.tp.ingress_config;
61
62 if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
63 unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
64 unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
65 unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
66 fs->mask.ethtype) ||
67 unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
68 unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
69 fs->mask.matchtype) ||
70 unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
71 unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
72 unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
73 fs->mask.pfvf_vld) ||
74 unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
75 fs->mask.ovlan_vld) ||
76 unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
77 return -EOPNOTSUPP;
78
79
80
81
82
83
84
85
86 if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
87 is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
88 return -EOPNOTSUPP;
89 if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
90 (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
91 (iconf & VNIC_F)))
92 return -EOPNOTSUPP;
93 if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
94 return -ERANGE;
95 fs->mask.pf &= 0x7;
96 fs->mask.vf &= 0x7f;
97
98
99
100
101
102 if (fs->action == FILTER_SWITCH &&
103 fs->eport >= adapter->params.nports)
104 return -ERANGE;
105
106
107 if (fs->val.iport >= adapter->params.nports)
108 return -ERANGE;
109
110
111 if (is_t4(adapter->params.chip) &&
112 fs->action == FILTER_SWITCH &&
113 (fs->newvlan == VLAN_REMOVE ||
114 fs->newvlan == VLAN_REWRITE))
115 return -EOPNOTSUPP;
116
117 return 0;
118}
119
120static int get_filter_steerq(struct net_device *dev,
121 struct ch_filter_specification *fs)
122{
123 struct adapter *adapter = netdev2adap(dev);
124 int iq;
125
126
127
128
129
130
131 if (!fs->dirsteer) {
132 if (fs->iq)
133 return -EINVAL;
134 iq = 0;
135 } else {
136 struct port_info *pi = netdev_priv(dev);
137
138
139
140
141 if (fs->iq < pi->nqsets)
142 iq = adapter->sge.ethrxq[pi->first_qset +
143 fs->iq].rspq.abs_id;
144 else
145 iq = fs->iq;
146 }
147
148 return iq;
149}
150
151static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
152{
153 spin_lock_bh(&t->ftid_lock);
154
155 if (test_bit(fidx, t->ftid_bmap)) {
156 spin_unlock_bh(&t->ftid_lock);
157 return -EBUSY;
158 }
159
160 if (family == PF_INET)
161 __set_bit(fidx, t->ftid_bmap);
162 else
163 bitmap_allocate_region(t->ftid_bmap, fidx, 2);
164
165 spin_unlock_bh(&t->ftid_lock);
166 return 0;
167}
168
169static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
170{
171 spin_lock_bh(&t->ftid_lock);
172 if (family == PF_INET)
173 __clear_bit(fidx, t->ftid_bmap);
174 else
175 bitmap_release_region(t->ftid_bmap, fidx, 2);
176 spin_unlock_bh(&t->ftid_lock);
177}
178
179
180static int del_filter_wr(struct adapter *adapter, int fidx)
181{
182 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
183 struct fw_filter_wr *fwr;
184 struct sk_buff *skb;
185 unsigned int len;
186
187 len = sizeof(*fwr);
188
189 skb = alloc_skb(len, GFP_KERNEL);
190 if (!skb)
191 return -ENOMEM;
192
193 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
194 t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
195
196
197
198
199 f->pending = 1;
200 t4_mgmt_tx(adapter, skb);
201 return 0;
202}
203
204
205
206
207
208
209
210int set_filter_wr(struct adapter *adapter, int fidx)
211{
212 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
213 struct fw_filter_wr *fwr;
214 struct sk_buff *skb;
215
216 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
217 if (!skb)
218 return -ENOMEM;
219
220
221
222
223
224 if (f->fs.newdmac || f->fs.newvlan) {
225
226 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
227 f->fs.eport, f->fs.dmac);
228 if (!f->l2t) {
229 kfree_skb(skb);
230 return -ENOMEM;
231 }
232 }
233
234 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
235 memset(fwr, 0, sizeof(*fwr));
236
237
238
239
240
241
242
243
244
245 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
246 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
247 fwr->tid_to_iq =
248 htonl(FW_FILTER_WR_TID_V(f->tid) |
249 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
250 FW_FILTER_WR_NOREPLY_V(0) |
251 FW_FILTER_WR_IQ_V(f->fs.iq));
252 fwr->del_filter_to_l2tix =
253 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
254 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
255 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
256 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
257 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
258 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
259 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
260 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
261 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
262 f->fs.newvlan == VLAN_REWRITE) |
263 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
264 f->fs.newvlan == VLAN_REWRITE) |
265 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
266 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
267 FW_FILTER_WR_PRIO_V(f->fs.prio) |
268 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
269 fwr->ethtype = htons(f->fs.val.ethtype);
270 fwr->ethtypem = htons(f->fs.mask.ethtype);
271 fwr->frag_to_ovlan_vldm =
272 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
273 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
274 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
275 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
276 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
277 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
278 fwr->smac_sel = 0;
279 fwr->rx_chan_rx_rpl_iq =
280 htons(FW_FILTER_WR_RX_CHAN_V(0) |
281 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
282 fwr->maci_to_matchtypem =
283 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
284 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
285 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
286 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
287 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
288 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
289 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
290 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
291 fwr->ptcl = f->fs.val.proto;
292 fwr->ptclm = f->fs.mask.proto;
293 fwr->ttyp = f->fs.val.tos;
294 fwr->ttypm = f->fs.mask.tos;
295 fwr->ivlan = htons(f->fs.val.ivlan);
296 fwr->ivlanm = htons(f->fs.mask.ivlan);
297 fwr->ovlan = htons(f->fs.val.ovlan);
298 fwr->ovlanm = htons(f->fs.mask.ovlan);
299 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
300 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
301 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
302 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
303 fwr->lp = htons(f->fs.val.lport);
304 fwr->lpm = htons(f->fs.mask.lport);
305 fwr->fp = htons(f->fs.val.fport);
306 fwr->fpm = htons(f->fs.mask.fport);
307 if (f->fs.newsmac)
308 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
309
310
311
312
313 f->pending = 1;
314 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
315 t4_ofld_send(adapter, skb);
316 return 0;
317}
318
319
320int writable_filter(struct filter_entry *f)
321{
322 if (f->locked)
323 return -EPERM;
324 if (f->pending)
325 return -EBUSY;
326
327 return 0;
328}
329
330
331
332
333
334int delete_filter(struct adapter *adapter, unsigned int fidx)
335{
336 struct filter_entry *f;
337 int ret;
338
339 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
340 return -EINVAL;
341
342 f = &adapter->tids.ftid_tab[fidx];
343 ret = writable_filter(f);
344 if (ret)
345 return ret;
346 if (f->valid)
347 return del_filter_wr(adapter, fidx);
348
349 return 0;
350}
351
352
353
354
355void clear_filter(struct adapter *adap, struct filter_entry *f)
356{
357
358
359
360
361
362
363 if (f->l2t)
364 cxgb4_l2t_release(f->l2t);
365
366
367
368
369
370 memset(f, 0, sizeof(*f));
371}
372
373void clear_all_filters(struct adapter *adapter)
374{
375 unsigned int i;
376
377 if (adapter->tids.ftid_tab) {
378 struct filter_entry *f = &adapter->tids.ftid_tab[0];
379 unsigned int max_ftid = adapter->tids.nftids +
380 adapter->tids.nsftids;
381
382 for (i = 0; i < max_ftid; i++, f++)
383 if (f->valid || f->pending)
384 clear_filter(adapter, f);
385 }
386}
387
388
389static void fill_default_mask(struct ch_filter_specification *fs)
390{
391 unsigned int lip = 0, lip_mask = 0;
392 unsigned int fip = 0, fip_mask = 0;
393 unsigned int i;
394
395 if (fs->val.iport && !fs->mask.iport)
396 fs->mask.iport |= ~0;
397 if (fs->val.fcoe && !fs->mask.fcoe)
398 fs->mask.fcoe |= ~0;
399 if (fs->val.matchtype && !fs->mask.matchtype)
400 fs->mask.matchtype |= ~0;
401 if (fs->val.macidx && !fs->mask.macidx)
402 fs->mask.macidx |= ~0;
403 if (fs->val.ethtype && !fs->mask.ethtype)
404 fs->mask.ethtype |= ~0;
405 if (fs->val.ivlan && !fs->mask.ivlan)
406 fs->mask.ivlan |= ~0;
407 if (fs->val.ovlan && !fs->mask.ovlan)
408 fs->mask.ovlan |= ~0;
409 if (fs->val.frag && !fs->mask.frag)
410 fs->mask.frag |= ~0;
411 if (fs->val.tos && !fs->mask.tos)
412 fs->mask.tos |= ~0;
413 if (fs->val.proto && !fs->mask.proto)
414 fs->mask.proto |= ~0;
415
416 for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
417 lip |= fs->val.lip[i];
418 lip_mask |= fs->mask.lip[i];
419 fip |= fs->val.fip[i];
420 fip_mask |= fs->mask.fip[i];
421 }
422
423 if (lip && !lip_mask)
424 memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
425
426 if (fip && !fip_mask)
427 memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
428
429 if (fs->val.lport && !fs->mask.lport)
430 fs->mask.lport = ~0;
431 if (fs->val.fport && !fs->mask.fport)
432 fs->mask.fport = ~0;
433}
434
435
436
437
438
439
440
441int __cxgb4_set_filter(struct net_device *dev, int filter_id,
442 struct ch_filter_specification *fs,
443 struct filter_ctx *ctx)
444{
445 struct adapter *adapter = netdev2adap(dev);
446 unsigned int max_fidx, fidx;
447 struct filter_entry *f;
448 u32 iconf;
449 int iq, ret;
450
451 max_fidx = adapter->tids.nftids;
452 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
453 filter_id >= max_fidx)
454 return -E2BIG;
455
456 fill_default_mask(fs);
457
458 ret = validate_filter(dev, fs);
459 if (ret)
460 return ret;
461
462 iq = get_filter_steerq(dev, fs);
463 if (iq < 0)
464 return iq;
465
466
467
468
469
470
471
472
473 if (fs->type == 0) {
474
475
476
477
478
479 fidx = filter_id & ~0x3;
480 if (fidx != filter_id &&
481 adapter->tids.ftid_tab[fidx].fs.type) {
482 f = &adapter->tids.ftid_tab[fidx];
483 if (f->valid) {
484 dev_err(adapter->pdev_dev,
485 "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
486 fidx, fidx + 3);
487 return -EINVAL;
488 }
489 }
490 } else {
491
492
493
494 if (filter_id & 0x3) {
495 dev_err(adapter->pdev_dev,
496 "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
497 return -EINVAL;
498 }
499
500
501 for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
502 f = &adapter->tids.ftid_tab[fidx];
503 if (f->valid) {
504 dev_err(adapter->pdev_dev,
505 "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
506 fidx);
507 return -EINVAL;
508 }
509 }
510 }
511
512
513
514
515 f = &adapter->tids.ftid_tab[filter_id];
516 if (f->valid)
517 return -EBUSY;
518
519 fidx = filter_id + adapter->tids.ftid_base;
520 ret = cxgb4_set_ftid(&adapter->tids, filter_id,
521 fs->type ? PF_INET6 : PF_INET);
522 if (ret)
523 return ret;
524
525
526 ret = writable_filter(f);
527 if (ret) {
528
529 cxgb4_clear_ftid(&adapter->tids, filter_id,
530 fs->type ? PF_INET6 : PF_INET);
531 return ret;
532 }
533
534
535
536
537 if (f->valid)
538 clear_filter(adapter, f);
539
540
541
542
543
544
545 f->fs = *fs;
546 f->fs.iq = iq;
547 f->dev = dev;
548
549 iconf = adapter->params.tp.ingress_config;
550 if (iconf & VNIC_F) {
551 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
552 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
553 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
554 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
555 }
556
557
558
559
560 f->ctx = ctx;
561 f->tid = fidx;
562 ret = set_filter_wr(adapter, filter_id);
563 if (ret) {
564 cxgb4_clear_ftid(&adapter->tids, filter_id,
565 fs->type ? PF_INET6 : PF_INET);
566 clear_filter(adapter, f);
567 }
568
569 return ret;
570}
571
572
573
574
575
576
577int __cxgb4_del_filter(struct net_device *dev, int filter_id,
578 struct filter_ctx *ctx)
579{
580 struct adapter *adapter = netdev2adap(dev);
581 struct filter_entry *f;
582 unsigned int max_fidx;
583 int ret;
584
585 max_fidx = adapter->tids.nftids;
586 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
587 filter_id >= max_fidx)
588 return -E2BIG;
589
590 f = &adapter->tids.ftid_tab[filter_id];
591 ret = writable_filter(f);
592 if (ret)
593 return ret;
594
595 if (f->valid) {
596 f->ctx = ctx;
597 cxgb4_clear_ftid(&adapter->tids, filter_id,
598 f->fs.type ? PF_INET6 : PF_INET);
599 return del_filter_wr(adapter, filter_id);
600 }
601
602
603
604
605
606 if (ctx) {
607 ctx->result = 0;
608 complete(&ctx->completion);
609 }
610 return ret;
611}
612
613int cxgb4_set_filter(struct net_device *dev, int filter_id,
614 struct ch_filter_specification *fs)
615{
616 struct filter_ctx ctx;
617 int ret;
618
619 init_completion(&ctx.completion);
620
621 ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
622 if (ret)
623 goto out;
624
625
626 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
627 if (!ret)
628 return -ETIMEDOUT;
629
630 ret = ctx.result;
631out:
632 return ret;
633}
634
635int cxgb4_del_filter(struct net_device *dev, int filter_id)
636{
637 struct filter_ctx ctx;
638 int ret;
639
640 init_completion(&ctx.completion);
641
642 ret = __cxgb4_del_filter(dev, filter_id, &ctx);
643 if (ret)
644 goto out;
645
646
647 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
648 if (!ret)
649 return -ETIMEDOUT;
650
651 ret = ctx.result;
652out:
653 return ret;
654}
655
656
657void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
658{
659 unsigned int tid = GET_TID(rpl);
660 struct filter_entry *f = NULL;
661 unsigned int max_fidx;
662 int idx;
663
664 max_fidx = adap->tids.nftids + adap->tids.nsftids;
665
666 if (adap->tids.ftid_tab) {
667
668 idx = tid - adap->tids.ftid_base;
669 if (idx >= max_fidx)
670 return;
671 f = &adap->tids.ftid_tab[idx];
672 if (f->tid != tid)
673 return;
674 }
675
676
677 if (f) {
678 unsigned int ret = TCB_COOKIE_G(rpl->cookie);
679 struct filter_ctx *ctx;
680
681
682
683
684 ctx = f->ctx;
685 f->ctx = NULL;
686
687 if (ret == FW_FILTER_WR_FLT_DELETED) {
688
689
690
691 clear_filter(adap, f);
692 if (ctx)
693 ctx->result = 0;
694 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
695 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
696 idx);
697 clear_filter(adap, f);
698 if (ctx)
699 ctx->result = -ENOMEM;
700 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
701 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
702 f->pending = 0;
703 f->valid = 1;
704 if (ctx) {
705 ctx->result = 0;
706 ctx->tid = idx;
707 }
708 } else {
709
710
711
712 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
713 idx, ret);
714 clear_filter(adap, f);
715 if (ctx)
716 ctx->result = -EINVAL;
717 }
718 if (ctx)
719 complete(&ctx->completion);
720 }
721}
722