1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147#include <linux/slab.h>
148#include <linux/kernel.h>
149#include <linux/if_arp.h>
150#include <linux/netdevice.h>
151#include <linux/workqueue.h>
152#include <linux/export.h>
153#include <linux/moduleparam.h>
154#include "i2400m.h"
155
156
157#define D_SUBMODULE rx
158#include "debug-levels.h"
159
160static int i2400m_rx_reorder_disabled;
161module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
162MODULE_PARM_DESC(rx_reorder_disabled,
163 "If true, RX reordering will be disabled.");
164
165struct i2400m_report_hook_args {
166 struct sk_buff *skb_rx;
167 const struct i2400m_l3l4_hdr *l3l4_hdr;
168 size_t size;
169 struct list_head list_node;
170};
171
172
173
174
175
176
177
178
179
180
181
182
183void i2400m_report_hook_work(struct work_struct *ws)
184{
185 struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);
186 struct device *dev = i2400m_dev(i2400m);
187 struct i2400m_report_hook_args *args, *args_next;
188 LIST_HEAD(list);
189 unsigned long flags;
190
191 while (1) {
192 spin_lock_irqsave(&i2400m->rx_lock, flags);
193 list_splice_init(&i2400m->rx_reports, &list);
194 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
195 if (list_empty(&list))
196 break;
197 else
198 d_printf(1, dev, "processing queued reports\n");
199 list_for_each_entry_safe(args, args_next, &list, list_node) {
200 d_printf(2, dev, "processing queued report %p\n", args);
201 i2400m_report_hook(i2400m, args->l3l4_hdr, args->size);
202 kfree_skb(args->skb_rx);
203 list_del(&args->list_node);
204 kfree(args);
205 }
206 }
207}
208
209
210
211
212
213static
214void i2400m_report_hook_flush(struct i2400m *i2400m)
215{
216 struct device *dev = i2400m_dev(i2400m);
217 struct i2400m_report_hook_args *args, *args_next;
218 LIST_HEAD(list);
219 unsigned long flags;
220
221 d_printf(1, dev, "flushing queued reports\n");
222 spin_lock_irqsave(&i2400m->rx_lock, flags);
223 list_splice_init(&i2400m->rx_reports, &list);
224 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
225 list_for_each_entry_safe(args, args_next, &list, list_node) {
226 d_printf(2, dev, "flushing queued report %p\n", args);
227 kfree_skb(args->skb_rx);
228 list_del(&args->list_node);
229 kfree(args);
230 }
231}
232
233
234
235
236
237
238
239
240
241
242static
243void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx,
244 const void *l3l4_hdr, size_t size)
245{
246 struct device *dev = i2400m_dev(i2400m);
247 unsigned long flags;
248 struct i2400m_report_hook_args *args;
249
250 args = kzalloc(sizeof(*args), GFP_NOIO);
251 if (args) {
252 args->skb_rx = skb_get(skb_rx);
253 args->l3l4_hdr = l3l4_hdr;
254 args->size = size;
255 spin_lock_irqsave(&i2400m->rx_lock, flags);
256 list_add_tail(&args->list_node, &i2400m->rx_reports);
257 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
258 d_printf(2, dev, "queued report %p\n", args);
259 rmb();
260 if (likely(i2400m->ready))
261 queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
262 } else {
263 if (printk_ratelimit())
264 dev_err(dev, "%s:%u: Can't allocate %zu B\n",
265 __func__, __LINE__, sizeof(*args));
266 }
267}
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285static
286void i2400m_rx_ctl_ack(struct i2400m *i2400m,
287 const void *payload, size_t size)
288{
289 struct device *dev = i2400m_dev(i2400m);
290 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
291 unsigned long flags;
292 struct sk_buff *ack_skb;
293
294
295 spin_lock_irqsave(&i2400m->rx_lock, flags);
296 if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
297 dev_err(dev, "Huh? reply to command with no waiters\n");
298 goto error_no_waiter;
299 }
300 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
301
302 ack_skb = wimax_msg_alloc(wimax_dev, NULL, payload, size, GFP_KERNEL);
303
304
305 spin_lock_irqsave(&i2400m->rx_lock, flags);
306 if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
307 d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
308 goto error_waiter_cancelled;
309 }
310 if (IS_ERR(ack_skb))
311 dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
312 i2400m->ack_skb = ack_skb;
313 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
314 complete(&i2400m->msg_completion);
315 return;
316
317error_waiter_cancelled:
318 if (!IS_ERR(ack_skb))
319 kfree_skb(ack_skb);
320error_no_waiter:
321 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
322}
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357static
358void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
359 const void *payload, size_t size)
360{
361 int result;
362 struct device *dev = i2400m_dev(i2400m);
363 const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
364 unsigned msg_type;
365
366 result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
367 if (result < 0) {
368 dev_err(dev, "HW BUG? device sent a bad message: %d\n",
369 result);
370 goto error_check;
371 }
372 msg_type = le16_to_cpu(l3l4_hdr->type);
373 d_printf(1, dev, "%s 0x%04x: %zu bytes\n",
374 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
375 msg_type, size);
376 d_dump(2, dev, l3l4_hdr, size);
377 if (msg_type & I2400M_MT_REPORT_MASK) {
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400 i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size);
401 if (unlikely(i2400m->trace_msg_from_user))
402 wimax_msg(&i2400m->wimax_dev, "echo",
403 l3l4_hdr, size, GFP_KERNEL);
404 result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size,
405 GFP_KERNEL);
406 if (result < 0)
407 dev_err(dev, "error sending report to userspace: %d\n",
408 result);
409 } else
410 i2400m_rx_ctl_ack(i2400m, payload, size);
411error_check:
412 return;
413}
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433static
434void i2400m_rx_trace(struct i2400m *i2400m,
435 const void *payload, size_t size)
436{
437 int result;
438 struct device *dev = i2400m_dev(i2400m);
439 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
440 const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
441 unsigned msg_type;
442
443 result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
444 if (result < 0) {
445 dev_err(dev, "HW BUG? device sent a bad trace message: %d\n",
446 result);
447 goto error_check;
448 }
449 msg_type = le16_to_cpu(l3l4_hdr->type);
450 d_printf(1, dev, "Trace %s 0x%04x: %zu bytes\n",
451 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
452 msg_type, size);
453 d_dump(2, dev, l3l4_hdr, size);
454 result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
455 if (result < 0)
456 dev_err(dev, "error sending trace to userspace: %d\n",
457 result);
458error_check:
459 return;
460}
461
462
463
464
465
466
467struct i2400m_roq_data {
468 unsigned sn;
469 enum i2400m_cs cs;
470};
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488struct i2400m_roq
489{
490 unsigned ws;
491 struct sk_buff_head queue;
492 struct i2400m_roq_log *log;
493};
494
495
496static
497void __i2400m_roq_init(struct i2400m_roq *roq)
498{
499 roq->ws = 0;
500 skb_queue_head_init(&roq->queue);
501}
502
503
504static
505unsigned __i2400m_roq_index(struct i2400m *i2400m, struct i2400m_roq *roq)
506{
507 return ((unsigned long) roq - (unsigned long) i2400m->rx_roq)
508 / sizeof(*roq);
509}
510
511
512
513
514
515
516
517
518
519
520
521static
522unsigned __i2400m_roq_nsn(struct i2400m_roq *roq, unsigned sn)
523{
524 int r;
525 r = ((int) sn - (int) roq->ws) % 2048;
526 if (r < 0)
527 r += 2048;
528 return r;
529}
530
531
532
533
534
535
536
537
538enum {
539 I2400M_ROQ_LOG_LENGTH = 32,
540};
541
542struct i2400m_roq_log {
543 struct i2400m_roq_log_entry {
544 enum i2400m_ro_type type;
545 unsigned ws, count, sn, nsn, new_ws;
546 } entry[I2400M_ROQ_LOG_LENGTH];
547 unsigned in, out;
548};
549
550
551
552static
553void i2400m_roq_log_entry_print(struct i2400m *i2400m, unsigned index,
554 unsigned e_index,
555 struct i2400m_roq_log_entry *e)
556{
557 struct device *dev = i2400m_dev(i2400m);
558
559 switch(e->type) {
560 case I2400M_RO_TYPE_RESET:
561 dev_err(dev, "q#%d reset ws %u cnt %u sn %u/%u"
562 " - new nws %u\n",
563 index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
564 break;
565 case I2400M_RO_TYPE_PACKET:
566 dev_err(dev, "q#%d queue ws %u cnt %u sn %u/%u\n",
567 index, e->ws, e->count, e->sn, e->nsn);
568 break;
569 case I2400M_RO_TYPE_WS:
570 dev_err(dev, "q#%d update_ws ws %u cnt %u sn %u/%u"
571 " - new nws %u\n",
572 index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
573 break;
574 case I2400M_RO_TYPE_PACKET_WS:
575 dev_err(dev, "q#%d queue_update_ws ws %u cnt %u sn %u/%u"
576 " - new nws %u\n",
577 index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
578 break;
579 default:
580 dev_err(dev, "q#%d BUG? entry %u - unknown type %u\n",
581 index, e_index, e->type);
582 break;
583 }
584}
585
586
587static
588void i2400m_roq_log_add(struct i2400m *i2400m,
589 struct i2400m_roq *roq, enum i2400m_ro_type type,
590 unsigned ws, unsigned count, unsigned sn,
591 unsigned nsn, unsigned new_ws)
592{
593 struct i2400m_roq_log_entry *e;
594 unsigned cnt_idx;
595 int index = __i2400m_roq_index(i2400m, roq);
596
597
598 if (roq->log->in - roq->log->out == I2400M_ROQ_LOG_LENGTH)
599 roq->log->out++;
600 cnt_idx = roq->log->in++ % I2400M_ROQ_LOG_LENGTH;
601 e = &roq->log->entry[cnt_idx];
602
603 e->type = type;
604 e->ws = ws;
605 e->count = count;
606 e->sn = sn;
607 e->nsn = nsn;
608 e->new_ws = new_ws;
609
610 if (d_test(1))
611 i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
612}
613
614
615
616static
617void i2400m_roq_log_dump(struct i2400m *i2400m, struct i2400m_roq *roq)
618{
619 unsigned cnt, cnt_idx;
620 struct i2400m_roq_log_entry *e;
621 int index = __i2400m_roq_index(i2400m, roq);
622
623 BUG_ON(roq->log->out > roq->log->in);
624 for (cnt = roq->log->out; cnt < roq->log->in; cnt++) {
625 cnt_idx = cnt % I2400M_ROQ_LOG_LENGTH;
626 e = &roq->log->entry[cnt_idx];
627 i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
628 memset(e, 0, sizeof(*e));
629 }
630 roq->log->in = roq->log->out = 0;
631}
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656static
657void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
658 struct sk_buff *skb, unsigned sn, unsigned nsn)
659{
660 struct device *dev = i2400m_dev(i2400m);
661 struct sk_buff *skb_itr;
662 struct i2400m_roq_data *roq_data_itr, *roq_data;
663 unsigned nsn_itr;
664
665 d_fnstart(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %u)\n",
666 i2400m, roq, skb, sn, nsn);
667
668 roq_data = (struct i2400m_roq_data *) &skb->cb;
669 BUILD_BUG_ON(sizeof(*roq_data) > sizeof(skb->cb));
670 roq_data->sn = sn;
671 d_printf(3, dev, "ERX: roq %p [ws %u] nsn %d sn %u\n",
672 roq, roq->ws, nsn, roq_data->sn);
673
674
675
676 if (skb_queue_empty(&roq->queue)) {
677 d_printf(2, dev, "ERX: roq %p - first one\n", roq);
678 __skb_queue_head(&roq->queue, skb);
679 goto out;
680 }
681
682 skb_itr = skb_peek_tail(&roq->queue);
683 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
684 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
685
686 if (nsn >= nsn_itr) {
687 d_printf(2, dev, "ERX: roq %p - appended after %p (nsn %d sn %u)\n",
688 roq, skb_itr, nsn_itr, roq_data_itr->sn);
689 __skb_queue_tail(&roq->queue, skb);
690 goto out;
691 }
692
693
694
695
696
697
698 skb_queue_walk(&roq->queue, skb_itr) {
699 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
700 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
701
702 if (nsn_itr > nsn) {
703 d_printf(2, dev, "ERX: roq %p - queued before %p "
704 "(nsn %d sn %u)\n", roq, skb_itr, nsn_itr,
705 roq_data_itr->sn);
706 __skb_queue_before(&roq->queue, skb_itr, skb);
707 goto out;
708 }
709 }
710
711
712 dev_err(dev, "SW BUG? failed to insert packet\n");
713 dev_err(dev, "ERX: roq %p [ws %u] skb %p nsn %d sn %u\n",
714 roq, roq->ws, skb, nsn, roq_data->sn);
715 skb_queue_walk(&roq->queue, skb_itr) {
716 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
717 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
718
719 dev_err(dev, "ERX: roq %p skb_itr %p nsn %d sn %u\n",
720 roq, skb_itr, nsn_itr, roq_data_itr->sn);
721 }
722 BUG();
723out:
724 d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
725 i2400m, roq, skb, sn, nsn);
726}
727
728
729
730
731
732
733
734
735
736
737
738
739
740static
741unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
742 unsigned sn)
743{
744 struct device *dev = i2400m_dev(i2400m);
745 struct sk_buff *skb_itr, *tmp_itr;
746 struct i2400m_roq_data *roq_data_itr;
747 unsigned new_nws, nsn_itr;
748
749 new_nws = __i2400m_roq_nsn(roq, sn);
750
751
752
753
754
755
756 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
757 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
758 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
759
760 if (nsn_itr < new_nws) {
761 d_printf(2, dev, "ERX: roq %p - release skb %p "
762 "(nsn %u/%u new nws %u)\n",
763 roq, skb_itr, nsn_itr, roq_data_itr->sn,
764 new_nws);
765 __skb_unlink(skb_itr, &roq->queue);
766 i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs);
767 }
768 else
769 break;
770 }
771 roq->ws = sn;
772 return new_nws;
773}
774
775
776
777
778
779
780
781
782
783
784
785static
786void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
787{
788 struct device *dev = i2400m_dev(i2400m);
789 struct sk_buff *skb_itr, *tmp_itr;
790 struct i2400m_roq_data *roq_data_itr;
791
792 d_fnstart(2, dev, "(i2400m %p roq %p)\n", i2400m, roq);
793 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_RESET,
794 roq->ws, skb_queue_len(&roq->queue),
795 ~0, ~0, 0);
796 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
797 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
798 d_printf(2, dev, "ERX: roq %p - release skb %p (sn %u)\n",
799 roq, skb_itr, roq_data_itr->sn);
800 __skb_unlink(skb_itr, &roq->queue);
801 i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs);
802 }
803 roq->ws = 0;
804 d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
805}
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820static
821void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
822 struct sk_buff * skb, unsigned lbn)
823{
824 struct device *dev = i2400m_dev(i2400m);
825 unsigned nsn, len;
826
827 d_fnstart(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
828 i2400m, roq, skb, lbn);
829 len = skb_queue_len(&roq->queue);
830 nsn = __i2400m_roq_nsn(roq, lbn);
831 if (unlikely(nsn >= 1024)) {
832 dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n",
833 nsn, lbn, roq->ws);
834 i2400m_roq_log_dump(i2400m, roq);
835 i2400m_reset(i2400m, I2400M_RT_WARM);
836 } else {
837 __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
838 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET,
839 roq->ws, len, lbn, nsn, ~0);
840 }
841 d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
842 i2400m, roq, skb, lbn);
843}
844
845
846
847
848
849
850
851
852
853
854static
855void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
856 unsigned sn)
857{
858 struct device *dev = i2400m_dev(i2400m);
859 unsigned old_ws, nsn, len;
860
861 d_fnstart(2, dev, "(i2400m %p roq %p sn %u)\n", i2400m, roq, sn);
862 old_ws = roq->ws;
863 len = skb_queue_len(&roq->queue);
864 nsn = __i2400m_roq_update_ws(i2400m, roq, sn);
865 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
866 old_ws, len, sn, nsn, roq->ws);
867 d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
868}
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883static
884void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
885 struct sk_buff * skb, unsigned sn)
886{
887 struct device *dev = i2400m_dev(i2400m);
888 unsigned nsn, old_ws, len;
889
890 d_fnstart(2, dev, "(i2400m %p roq %p skb %p sn %u)\n",
891 i2400m, roq, skb, sn);
892 len = skb_queue_len(&roq->queue);
893 nsn = __i2400m_roq_nsn(roq, sn);
894
895
896
897
898
899
900 old_ws = roq->ws;
901
902
903
904 if (len == 0) {
905 struct i2400m_roq_data *roq_data;
906 roq_data = (struct i2400m_roq_data *) &skb->cb;
907 i2400m_net_erx(i2400m, skb, roq_data->cs);
908 } else
909 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
910
911 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
912 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
913 old_ws, len, sn, nsn, roq->ws);
914
915 d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
916 i2400m, roq, skb, sn);
917}
918
919
920
921
922
923
924
925
926
927static void i2400m_rx_roq_destroy(struct kref *ref)
928{
929 unsigned itr;
930 struct i2400m *i2400m
931 = container_of(ref, struct i2400m, rx_roq_refcount);
932 for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
933 __skb_queue_purge(&i2400m->rx_roq[itr].queue);
934 kfree(i2400m->rx_roq[0].log);
935 kfree(i2400m->rx_roq);
936 i2400m->rx_roq = NULL;
937}
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979static
980void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
981 unsigned single_last, const void *payload, size_t size)
982{
983 struct device *dev = i2400m_dev(i2400m);
984 const struct i2400m_pl_edata_hdr *hdr = payload;
985 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
986 struct sk_buff *skb;
987 enum i2400m_cs cs;
988 u32 reorder;
989 unsigned ro_needed, ro_type, ro_cin, ro_sn;
990 struct i2400m_roq *roq;
991 struct i2400m_roq_data *roq_data;
992 unsigned long flags;
993
994 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
995
996 d_fnstart(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
997 "size %zu)\n", i2400m, skb_rx, single_last, payload, size);
998 if (size < sizeof(*hdr)) {
999 dev_err(dev, "ERX: HW BUG? message with short header (%zu "
1000 "vs %zu bytes expected)\n", size, sizeof(*hdr));
1001 goto error;
1002 }
1003
1004 if (single_last) {
1005 skb = skb_get(skb_rx);
1006 d_printf(3, dev, "ERX: skb %p reusing\n", skb);
1007 } else {
1008 skb = skb_clone(skb_rx, GFP_KERNEL);
1009 if (skb == NULL) {
1010 dev_err(dev, "ERX: no memory to clone skb\n");
1011 net_dev->stats.rx_dropped++;
1012 goto error_skb_clone;
1013 }
1014 d_printf(3, dev, "ERX: skb %p cloned from %p\n", skb, skb_rx);
1015 }
1016
1017
1018
1019
1020 skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data);
1021 skb_trim(skb, (void *) skb_end_pointer(skb) - payload - sizeof(*hdr));
1022
1023 reorder = le32_to_cpu(hdr->reorder);
1024 ro_needed = reorder & I2400M_RO_NEEDED;
1025 cs = hdr->cs;
1026 if (ro_needed) {
1027 ro_type = (reorder >> I2400M_RO_TYPE_SHIFT) & I2400M_RO_TYPE;
1028 ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
1029 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
1030
1031 spin_lock_irqsave(&i2400m->rx_lock, flags);
1032 if (i2400m->rx_roq == NULL) {
1033 kfree_skb(skb);
1034 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1035 goto error;
1036 }
1037 roq = &i2400m->rx_roq[ro_cin];
1038 kref_get(&i2400m->rx_roq_refcount);
1039 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1040
1041 roq_data = (struct i2400m_roq_data *) &skb->cb;
1042 roq_data->sn = ro_sn;
1043 roq_data->cs = cs;
1044 d_printf(2, dev, "ERX: reorder needed: "
1045 "type %u cin %u [ws %u] sn %u/%u len %zuB\n",
1046 ro_type, ro_cin, roq->ws, ro_sn,
1047 __i2400m_roq_nsn(roq, ro_sn), size);
1048 d_dump(2, dev, payload, size);
1049 switch(ro_type) {
1050 case I2400M_RO_TYPE_RESET:
1051 i2400m_roq_reset(i2400m, roq);
1052 kfree_skb(skb);
1053 break;
1054 case I2400M_RO_TYPE_PACKET:
1055 i2400m_roq_queue(i2400m, roq, skb, ro_sn);
1056 break;
1057 case I2400M_RO_TYPE_WS:
1058 i2400m_roq_update_ws(i2400m, roq, ro_sn);
1059 kfree_skb(skb);
1060 break;
1061 case I2400M_RO_TYPE_PACKET_WS:
1062 i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn);
1063 break;
1064 default:
1065 dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
1066 }
1067
1068 spin_lock_irqsave(&i2400m->rx_lock, flags);
1069 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1070 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1071 }
1072 else
1073 i2400m_net_erx(i2400m, skb, cs);
1074error_skb_clone:
1075error:
1076 d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
1077 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static
1097void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx,
1098 unsigned single_last, const struct i2400m_pld *pld,
1099 const void *payload)
1100{
1101 struct device *dev = i2400m_dev(i2400m);
1102 size_t pl_size = i2400m_pld_size(pld);
1103 enum i2400m_pt pl_type = i2400m_pld_type(pld);
1104
1105 d_printf(7, dev, "RX: received payload type %u, %zu bytes\n",
1106 pl_type, pl_size);
1107 d_dump(8, dev, payload, pl_size);
1108
1109 switch (pl_type) {
1110 case I2400M_PT_DATA:
1111 d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size);
1112 i2400m_net_rx(i2400m, skb_rx, single_last, payload, pl_size);
1113 break;
1114 case I2400M_PT_CTRL:
1115 i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size);
1116 break;
1117 case I2400M_PT_TRACE:
1118 i2400m_rx_trace(i2400m, payload, pl_size);
1119 break;
1120 case I2400M_PT_EDATA:
1121 d_printf(3, dev, "ERX: data payload %zu bytes\n", pl_size);
1122 i2400m_rx_edata(i2400m, skb_rx, single_last, payload, pl_size);
1123 break;
1124 default:
1125 if (printk_ratelimit())
1126 dev_err(dev, "RX: HW BUG? unexpected payload type %u\n",
1127 pl_type);
1128 }
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static
1143int i2400m_rx_msg_hdr_check(struct i2400m *i2400m,
1144 const struct i2400m_msg_hdr *msg_hdr,
1145 size_t buf_size)
1146{
1147 int result = -EIO;
1148 struct device *dev = i2400m_dev(i2400m);
1149 if (buf_size < sizeof(*msg_hdr)) {
1150 dev_err(dev, "RX: HW BUG? message with short header (%zu "
1151 "vs %zu bytes expected)\n", buf_size, sizeof(*msg_hdr));
1152 goto error;
1153 }
1154 if (msg_hdr->barker != cpu_to_le32(I2400M_D2H_MSG_BARKER)) {
1155 dev_err(dev, "RX: HW BUG? message received with unknown "
1156 "barker 0x%08x (buf_size %zu bytes)\n",
1157 le32_to_cpu(msg_hdr->barker), buf_size);
1158 goto error;
1159 }
1160 if (msg_hdr->num_pls == 0) {
1161 dev_err(dev, "RX: HW BUG? zero payload packets in message\n");
1162 goto error;
1163 }
1164 if (le16_to_cpu(msg_hdr->num_pls) > I2400M_MAX_PLS_IN_MSG) {
1165 dev_err(dev, "RX: HW BUG? message contains more payload "
1166 "than maximum; ignoring.\n");
1167 goto error;
1168 }
1169 result = 0;
1170error:
1171 return result;
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187static
1188int i2400m_rx_pl_descr_check(struct i2400m *i2400m,
1189 const struct i2400m_pld *pld,
1190 size_t pl_itr, size_t buf_size)
1191{
1192 int result = -EIO;
1193 struct device *dev = i2400m_dev(i2400m);
1194 size_t pl_size = i2400m_pld_size(pld);
1195 enum i2400m_pt pl_type = i2400m_pld_type(pld);
1196
1197 if (pl_size > i2400m->bus_pl_size_max) {
1198 dev_err(dev, "RX: HW BUG? payload @%zu: size %zu is "
1199 "bigger than maximum %zu; ignoring message\n",
1200 pl_itr, pl_size, i2400m->bus_pl_size_max);
1201 goto error;
1202 }
1203 if (pl_itr + pl_size > buf_size) {
1204 dev_err(dev, "RX: HW BUG? payload @%zu: size %zu "
1205 "goes beyond the received buffer "
1206 "size (%zu bytes); ignoring message\n",
1207 pl_itr, pl_size, buf_size);
1208 goto error;
1209 }
1210 if (pl_type >= I2400M_PT_ILLEGAL) {
1211 dev_err(dev, "RX: HW BUG? illegal payload type %u; "
1212 "ignoring message\n", pl_type);
1213 goto error;
1214 }
1215 result = 0;
1216error:
1217 return result;
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1245{
1246 int i, result;
1247 struct device *dev = i2400m_dev(i2400m);
1248 const struct i2400m_msg_hdr *msg_hdr;
1249 size_t pl_itr, pl_size;
1250 unsigned long flags;
1251 unsigned num_pls, single_last, skb_len;
1252
1253 skb_len = skb->len;
1254 d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
1255 i2400m, skb, skb_len);
1256 result = -EIO;
1257 msg_hdr = (void *) skb->data;
1258 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
1259 if (result < 0)
1260 goto error_msg_hdr_check;
1261 result = -EIO;
1262 num_pls = le16_to_cpu(msg_hdr->num_pls);
1263 pl_itr = sizeof(*msg_hdr) +
1264 num_pls * sizeof(msg_hdr->pld[0]);
1265 pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
1266 if (pl_itr > skb_len) {
1267 dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
1268 "%u payload descriptors (%zu each, total %zu)\n",
1269 skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
1270 goto error_pl_descr_short;
1271 }
1272
1273 for (i = 0; i < num_pls; i++) {
1274
1275 pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
1276 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
1277 pl_itr, skb_len);
1278 if (result < 0)
1279 goto error_pl_descr_check;
1280 single_last = num_pls == 1 || i == num_pls - 1;
1281 i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i],
1282 skb->data + pl_itr);
1283 pl_itr += ALIGN(pl_size, I2400M_PL_ALIGN);
1284 cond_resched();
1285 }
1286 kfree_skb(skb);
1287
1288 spin_lock_irqsave(&i2400m->rx_lock, flags);
1289 i2400m->rx_pl_num += i;
1290 if (i > i2400m->rx_pl_max)
1291 i2400m->rx_pl_max = i;
1292 if (i < i2400m->rx_pl_min)
1293 i2400m->rx_pl_min = i;
1294 i2400m->rx_num++;
1295 i2400m->rx_size_acc += skb_len;
1296 if (skb_len < i2400m->rx_size_min)
1297 i2400m->rx_size_min = skb_len;
1298 if (skb_len > i2400m->rx_size_max)
1299 i2400m->rx_size_max = skb_len;
1300 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1301error_pl_descr_check:
1302error_pl_descr_short:
1303error_msg_hdr_check:
1304 d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
1305 i2400m, skb, skb_len, result);
1306 return result;
1307}
1308EXPORT_SYMBOL_GPL(i2400m_rx);
1309
1310
1311void i2400m_unknown_barker(struct i2400m *i2400m,
1312 const void *buf, size_t size)
1313{
1314 struct device *dev = i2400m_dev(i2400m);
1315 char prefix[64];
1316 const __le32 *barker = buf;
1317 dev_err(dev, "RX: HW BUG? unknown barker %08x, "
1318 "dropping %zu bytes\n", le32_to_cpu(*barker), size);
1319 snprintf(prefix, sizeof(prefix), "%s %s: ",
1320 dev_driver_string(dev), dev_name(dev));
1321 if (size > 64) {
1322 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
1323 8, 4, buf, 64, 0);
1324 printk(KERN_ERR "%s... (only first 64 bytes "
1325 "dumped)\n", prefix);
1326 } else
1327 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
1328 8, 4, buf, size, 0);
1329}
1330EXPORT_SYMBOL(i2400m_unknown_barker);
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346int i2400m_rx_setup(struct i2400m *i2400m)
1347{
1348 int result = 0;
1349
1350 i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1;
1351 if (i2400m->rx_reorder) {
1352 unsigned itr;
1353 struct i2400m_roq_log *rd;
1354
1355 result = -ENOMEM;
1356
1357 i2400m->rx_roq = kcalloc(I2400M_RO_CIN + 1,
1358 sizeof(i2400m->rx_roq[0]), GFP_KERNEL);
1359 if (i2400m->rx_roq == NULL)
1360 goto error_roq_alloc;
1361
1362 rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
1363 GFP_KERNEL);
1364 if (rd == NULL) {
1365 result = -ENOMEM;
1366 goto error_roq_log_alloc;
1367 }
1368
1369 for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) {
1370 __i2400m_roq_init(&i2400m->rx_roq[itr]);
1371 i2400m->rx_roq[itr].log = &rd[itr];
1372 }
1373 kref_init(&i2400m->rx_roq_refcount);
1374 }
1375 return 0;
1376
1377error_roq_log_alloc:
1378 kfree(i2400m->rx_roq);
1379error_roq_alloc:
1380 return result;
1381}
1382
1383
1384
1385void i2400m_rx_release(struct i2400m *i2400m)
1386{
1387 unsigned long flags;
1388
1389 if (i2400m->rx_reorder) {
1390 spin_lock_irqsave(&i2400m->rx_lock, flags);
1391 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1392 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1393 }
1394
1395 i2400m_report_hook_flush(i2400m);
1396}
1397