1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/interrupt.h>
24#include <linux/in.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/ioctl.h>
29#include <linux/cdev.h>
30#include <linux/hugetlb.h>
31#include <linux/in6.h>
32#include <linux/timer.h>
33#include <linux/io.h>
34#include <linux/u64_stats_sync.h>
35#include <asm/checksum.h>
36#include <asm/homecache.h>
37
38#include <hv/drv_xgbe_intf.h>
39#include <hv/drv_xgbe_impl.h>
40#include <hv/hypervisor.h>
41#include <hv/netio_intf.h>
42
43
44#include <linux/ip.h>
45#include <linux/tcp.h>
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90#define TILE_NET_MTU 1500
91
92
93
94
95
96#define TILE_NET_TX_QUEUE_LEN 0
97
98
99
100
101
102
103
104
105
106
107
108
109#define TILE_NET_TIMEOUT (5 * HZ)
110
111
112#define TILE_NET_RETRY_INTERVAL (5 * HZ)
113
114
115#define TILE_NET_DEVS 4
116
117
118
119
120#if NET_IP_ALIGN != LIPP_PACKET_PADDING
121#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING."
122#endif
123
124
125
126#ifdef TILE_NET_DEBUG
127#define PDEBUG(fmt, args...) net_printk(fmt, ## args)
128#else
129#define PDEBUG(fmt, args...)
130#endif
131
132
133MODULE_AUTHOR("Tilera");
134MODULE_LICENSE("GPL");
135
136
137
138
139
140
141
142struct tile_netio_queue {
143 netio_queue_impl_t *__system_part;
144 netio_queue_user_impl_t __user_part;
145
146};
147
148
149
150
151
152struct tile_net_stats_t {
153 struct u64_stats_sync syncp;
154 u64 rx_packets;
155 u64 tx_packets;
156 u64 rx_bytes;
157 u64 tx_bytes;
158 u64 rx_errors;
159 u64 rx_dropped;
160};
161
162
163
164
165
166
167
168struct tile_net_cpu {
169
170 struct napi_struct napi;
171
172 struct tile_netio_queue queue;
173
174 struct tile_net_stats_t stats;
175
176 bool napi_enabled;
177
178 bool registered;
179
180 bool link_down;
181
182 bool egress_timer_scheduled;
183
184 unsigned int num_needed_small_buffers;
185
186 unsigned int num_needed_large_buffers;
187
188 struct timer_list egress_timer;
189};
190
191
192
193
194
195struct tile_net_priv {
196
197 struct net_device *dev;
198
199 struct page *eq_pages;
200
201 lepp_queue_t *eq;
202
203 spinlock_t eq_lock;
204
205 int hv_devhdl;
206
207 u32 intr_id;
208
209 bool partly_opened;
210
211 bool active;
212
213 struct cpumask network_cpus_map;
214
215 int network_cpus_count;
216
217 int network_cpus_credits;
218
219 struct delayed_work retry_work;
220
221 struct tile_net_cpu *cpu[NR_CPUS];
222};
223
224
225#define EQ_ORDER get_order(sizeof(lepp_queue_t))
226
227#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER))
228
229
230
231
232static struct net_device *tile_net_devs[TILE_NET_DEVS];
233
234
235
236
237static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0);
238static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1);
239static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0);
240static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1);
241
242
243
244
245
246static bool network_cpus_used;
247
248
249
250
251static struct cpumask network_cpus_map;
252
253
254
255#ifdef TILE_NET_DEBUG
256
257
258
259
260
261static void net_printk(char *fmt, ...)
262{
263 int i;
264 int len;
265 va_list args;
266 static char buf[256];
267
268 len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id());
269 va_start(args, fmt);
270 i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args);
271 va_end(args);
272 buf[255] = '\0';
273 pr_notice(buf);
274}
275#endif
276
277
278#ifdef TILE_NET_DUMP_PACKETS
279
280
281
282static void dump_packet(unsigned char *data, unsigned long length, char *s)
283{
284 int my_cpu = smp_processor_id();
285
286 unsigned long i;
287 char buf[128];
288
289 static unsigned int count;
290
291 pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
292 data, length, s, count++);
293
294 pr_info("\n");
295
296 for (i = 0; i < length; i++) {
297 if ((i & 0xf) == 0)
298 sprintf(buf, "[%02d] %8.8lx:", my_cpu, i);
299 sprintf(buf + strlen(buf), " %2.2x", data[i]);
300 if ((i & 0xf) == 0xf || i == length - 1) {
301 strcat(buf, "\n");
302 pr_info("%s", buf);
303 }
304 }
305}
306#endif
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323inline int __netio_fastio1(u32 fastio_index, u32 arg0)
324{
325 long result, clobber_r1, clobber_r10;
326 asm volatile("swint2"
327 : "=R00" (result),
328 "=R01" (clobber_r1), "=R10" (clobber_r10)
329 : "R10" (fastio_index), "R01" (arg0)
330 : "memory", "r2", "r3", "r4",
331 "r5", "r6", "r7", "r8", "r9",
332 "r11", "r12", "r13", "r14",
333 "r15", "r16", "r17", "r18", "r19",
334 "r20", "r21", "r22", "r23", "r24",
335 "r25", "r26", "r27", "r28", "r29");
336 return result;
337}
338
339
340static void tile_net_return_credit(struct tile_net_cpu *info)
341{
342 struct tile_netio_queue *queue = &info->queue;
343 netio_queue_user_impl_t *qup = &queue->__user_part;
344
345
346 if (--qup->__receive_credit_remaining == 0) {
347 u32 interval = qup->__receive_credit_interval;
348 qup->__receive_credit_remaining = interval;
349 __netio_fastio_return_credits(qup->__fastio_index, interval);
350 }
351}
352
353
354
355
356
357
358static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
359 void *va, bool small)
360{
361 struct tile_netio_queue *queue = &info->queue;
362
363
364 unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small;
365
366 __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer);
367}
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
421 bool small)
422{
423#if TILE_NET_MTU <= 1536
424
425 unsigned int large_size = NET_IP_ALIGN + 1536;
426#else
427
428 unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
429#endif
430
431
432
433 unsigned int len =
434 (((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
435 CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
436
437 unsigned int padding = 128 - NET_SKB_PAD;
438 unsigned int align;
439
440 struct sk_buff *skb;
441 void *va;
442
443 struct sk_buff **skb_ptr;
444
445
446 skb = netdev_alloc_skb(info->napi.dev, len + padding);
447 if (skb == NULL)
448 return false;
449
450
451 align = -(long)skb->data & (128 - 1);
452 BUG_ON(align > padding);
453 skb_reserve(skb, align);
454
455
456 va = skb->data;
457
458
459 BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0);
460
461#ifdef TILE_NET_PARANOIA
462#if CHIP_HAS_CBOX_HOME_MAP()
463 if (hash_default) {
464 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
465 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
466 panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx",
467 va, hv_pte_get_mode(pte), hv_pte_val(pte));
468 }
469#endif
470#endif
471
472
473 if (!hash_default)
474 __inv_buffer(va, len);
475
476
477
478
479 skb_reserve(skb, NET_IP_ALIGN);
480
481
482 skb_ptr = va - sizeof(*skb_ptr);
483 *skb_ptr = skb;
484
485
486 __insn_mf();
487
488
489 tile_net_provide_linux_buffer(info, va, small);
490
491 return true;
492}
493
494
495
496
497
498static void tile_net_provide_needed_buffers(struct tile_net_cpu *info)
499{
500 while (info->num_needed_small_buffers != 0) {
501 if (!tile_net_provide_needed_buffer(info, true))
502 goto oops;
503 info->num_needed_small_buffers--;
504 }
505
506 while (info->num_needed_large_buffers != 0) {
507 if (!tile_net_provide_needed_buffer(info, false))
508 goto oops;
509 info->num_needed_large_buffers--;
510 }
511
512 return;
513
514oops:
515
516
517 pr_notice("Could not provide a linux buffer to LIPP.\n");
518}
519
520
521
522
523
524
525
526static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq,
527 struct sk_buff *comps[],
528 unsigned int comps_size,
529 unsigned int min_size)
530{
531 unsigned int n = 0;
532
533 unsigned int comp_head = eq->comp_head;
534 unsigned int comp_busy = eq->comp_busy;
535
536 while (comp_head != comp_busy && n < comps_size) {
537 comps[n++] = eq->comps[comp_head];
538 LEPP_QINC(comp_head);
539 }
540
541 if (n < min_size)
542 return 0;
543
544 eq->comp_head = comp_head;
545
546 return n;
547}
548
549
550
551
552
553static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
554{
555 struct tile_net_priv *priv = netdev_priv(dev);
556
557 lepp_queue_t *eq = priv->eq;
558
559 struct sk_buff *olds[64];
560 unsigned int wanted = 64;
561 unsigned int i, n;
562 bool pending;
563
564 spin_lock(&priv->eq_lock);
565
566 if (all)
567 eq->comp_busy = eq->comp_tail;
568
569 n = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
570
571 pending = (eq->comp_head != eq->comp_tail);
572
573 spin_unlock(&priv->eq_lock);
574
575 for (i = 0; i < n; i++)
576 kfree_skb(olds[i]);
577
578 return pending;
579}
580
581
582
583
584
585
586
587
588static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
589{
590 if (!info->egress_timer_scheduled) {
591 mod_timer_pinned(&info->egress_timer, jiffies + 1);
592 info->egress_timer_scheduled = true;
593 }
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611static void tile_net_handle_egress_timer(unsigned long arg)
612{
613 struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
614 struct net_device *dev = info->napi.dev;
615
616
617 info->egress_timer_scheduled = false;
618
619
620 if (tile_net_lepp_free_comps(dev, false))
621 tile_net_schedule_egress_timer(info);
622}
623
624
625static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
626{
627 struct tile_netio_queue *queue = &info->queue;
628 netio_queue_impl_t *qsp = queue->__system_part;
629 netio_queue_user_impl_t *qup = &queue->__user_part;
630
631 int index2_aux = index + sizeof(netio_pkt_t);
632 int index2 =
633 ((index2_aux ==
634 qsp->__packet_receive_queue.__last_packet_plus_one) ?
635 0 : index2_aux);
636
637 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
638
639
640 unsigned int buffer = pkt->__packet.word;
641
642
643 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
644
645
646 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
647 struct sk_buff *skb = *skb_ptr;
648
649 kfree_skb(skb);
650
651
652 qup->__packet_receive_read = index2;
653}
654
655
656
657
658
659static void tile_net_discard_packets(struct net_device *dev)
660{
661 struct tile_net_priv *priv = netdev_priv(dev);
662 int my_cpu = smp_processor_id();
663 struct tile_net_cpu *info = priv->cpu[my_cpu];
664 struct tile_netio_queue *queue = &info->queue;
665 netio_queue_impl_t *qsp = queue->__system_part;
666 netio_queue_user_impl_t *qup = &queue->__user_part;
667
668 while (qup->__packet_receive_read !=
669 qsp->__packet_receive_queue.__packet_write) {
670 int index = qup->__packet_receive_read;
671 tile_net_discard_aux(info, index);
672 }
673}
674
675
676
677
678
679static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
680{
681 struct net_device *dev = info->napi.dev;
682
683 struct tile_netio_queue *queue = &info->queue;
684 netio_queue_impl_t *qsp = queue->__system_part;
685 netio_queue_user_impl_t *qup = &queue->__user_part;
686 struct tile_net_stats_t *stats = &info->stats;
687
688 int filter;
689
690 int index2_aux = index + sizeof(netio_pkt_t);
691 int index2 =
692 ((index2_aux ==
693 qsp->__packet_receive_queue.__last_packet_plus_one) ?
694 0 : index2_aux);
695
696 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
697
698 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
699 netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt);
700
701
702
703 unsigned long len =
704 (NETIO_PKT_CUSTOM_LENGTH(pkt) +
705 NET_IP_ALIGN - NETIO_PACKET_PADDING);
706
707
708 unsigned int buffer = pkt->__packet.word;
709
710
711 bool small = ((buffer & 1) != 0);
712
713
714 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
715
716
717
718 unsigned char *buf = va + NET_IP_ALIGN;
719
720
721 if (!hash_default)
722 __inv_buffer(buf, len);
723
724
725 dev->last_rx = jiffies;
726
727#ifdef TILE_NET_DUMP_PACKETS
728 dump_packet(buf, len, "rx");
729#endif
730
731#ifdef TILE_NET_VERIFY_INGRESS
732 if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) {
733 dump_packet(buf, len, "rx");
734 panic("Unexpected OVERSIZE.");
735 }
736#endif
737
738 filter = 0;
739
740 if (pkt_status == NETIO_PKT_STATUS_BAD) {
741
742 filter = 2;
743 } else if (!(dev->flags & IFF_UP)) {
744
745 filter = 1;
746 } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) &&
747 pkt_status == NETIO_PKT_STATUS_UNDERSIZE) {
748
749 filter = 2;
750 } else if (!(dev->flags & IFF_PROMISC)) {
751 if (!is_multicast_ether_addr(buf)) {
752
753 const u8 *mine = dev->dev_addr;
754 filter = !ether_addr_equal(mine, buf);
755 }
756 }
757
758 u64_stats_update_begin(&stats->syncp);
759
760 if (filter != 0) {
761
762 if (filter == 1)
763 stats->rx_dropped++;
764 else
765 stats->rx_errors++;
766
767 tile_net_provide_linux_buffer(info, va, small);
768
769 } else {
770
771
772 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
773 struct sk_buff *skb = *skb_ptr;
774
775
776 if (skb->data != buf)
777 panic("Corrupt linux buffer from LIPP! "
778 "VA=%p, skb=%p, skb->data=%p\n",
779 va, skb, skb->data);
780
781
782 skb_put(skb, len);
783
784
785 skb->protocol = eth_type_trans(skb, dev);
786
787
788 if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
789 skb->ip_summed = CHECKSUM_UNNECESSARY;
790
791 netif_receive_skb(skb);
792
793 stats->rx_packets++;
794 stats->rx_bytes += len;
795 }
796
797 u64_stats_update_end(&stats->syncp);
798
799
800
801 tile_net_return_credit(info);
802
803
804 qup->__packet_receive_read = index2;
805
806 return !filter;
807}
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822static int tile_net_poll(struct napi_struct *napi, int budget)
823{
824 struct net_device *dev = napi->dev;
825 struct tile_net_priv *priv = netdev_priv(dev);
826 int my_cpu = smp_processor_id();
827 struct tile_net_cpu *info = priv->cpu[my_cpu];
828 struct tile_netio_queue *queue = &info->queue;
829 netio_queue_impl_t *qsp = queue->__system_part;
830 netio_queue_user_impl_t *qup = &queue->__user_part;
831
832 unsigned int work = 0;
833
834 if (budget <= 0)
835 goto done;
836
837 while (priv->active) {
838 int index = qup->__packet_receive_read;
839 if (index == qsp->__packet_receive_queue.__packet_write)
840 break;
841
842 if (tile_net_poll_aux(info, index)) {
843 if (++work >= budget)
844 goto done;
845 }
846 }
847
848 napi_complete(&info->napi);
849
850 if (!priv->active)
851 goto done;
852
853
854 enable_percpu_irq(priv->intr_id, 0);
855
856
857 if (qup->__packet_receive_read !=
858 qsp->__packet_receive_queue.__packet_write) {
859
860
861 (void)napi_reschedule(&info->napi);
862 }
863
864done:
865
866 if (priv->active)
867 tile_net_provide_needed_buffers(info);
868
869 return work;
870}
871
872
873
874
875
876
877
878
879
880
881
882static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
883{
884 struct net_device *dev = (struct net_device *)dev_ptr;
885 struct tile_net_priv *priv = netdev_priv(dev);
886 int my_cpu = smp_processor_id();
887 struct tile_net_cpu *info = priv->cpu[my_cpu];
888
889
890 disable_percpu_irq(priv->intr_id);
891
892
893 if (!priv->active)
894 return IRQ_HANDLED;
895
896
897
898 napi_schedule(&info->napi);
899
900 return IRQ_HANDLED;
901}
902
903
904
905
906
907static int tile_net_open_aux(struct net_device *dev)
908{
909 struct tile_net_priv *priv = netdev_priv(dev);
910
911 int ret;
912 int dummy;
913 unsigned int epp_lotar;
914
915
916
917
918 ret = hv_dev_pread(priv->hv_devhdl, 0,
919 (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar),
920 NETIO_EPP_SHM_OFF);
921 if (ret < 0) {
922 pr_err("could not read epp_shm_queue lotar.\n");
923 return -EIO;
924 }
925
926
927
928
929 {
930 int epp_home = hv_lotar_to_cpu(epp_lotar);
931 homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home);
932 }
933
934
935
936
937 {
938 netio_ipp_address_t ea = {
939 .va = 0,
940 .pa = __pa(priv->eq),
941 .pte = hv_pte(0),
942 .size = EQ_SIZE,
943 };
944 ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
945 ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
946 ret = hv_dev_pwrite(priv->hv_devhdl, 0,
947 (HV_VirtAddr)&ea,
948 sizeof(ea),
949 NETIO_EPP_SHM_OFF);
950 if (ret < 0)
951 return -EIO;
952 }
953
954
955
956
957 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
958 sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) {
959 pr_warn("Failed to start LIPP/LEPP\n");
960 return -EIO;
961 }
962
963 return 0;
964}
965
966
967
968
969
970
971
972
973
974static void tile_net_register(void *dev_ptr)
975{
976 struct net_device *dev = (struct net_device *)dev_ptr;
977 struct tile_net_priv *priv = netdev_priv(dev);
978 int my_cpu = smp_processor_id();
979 struct tile_net_cpu *info;
980
981 struct tile_netio_queue *queue;
982
983
984 int queue_id =
985 cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255;
986
987 netio_input_config_t config = {
988 .flags = 0,
989 .num_receive_packets = priv->network_cpus_credits,
990 .queue_id = queue_id
991 };
992
993 int ret = 0;
994 netio_queue_impl_t *queuep;
995
996 PDEBUG("tile_net_register(queue_id %d)\n", queue_id);
997
998 if (!strcmp(dev->name, "xgbe0"))
999 info = this_cpu_ptr(&hv_xgbe0);
1000 else if (!strcmp(dev->name, "xgbe1"))
1001 info = this_cpu_ptr(&hv_xgbe1);
1002 else if (!strcmp(dev->name, "gbe0"))
1003 info = this_cpu_ptr(&hv_gbe0);
1004 else if (!strcmp(dev->name, "gbe1"))
1005 info = this_cpu_ptr(&hv_gbe1);
1006 else
1007 BUG();
1008
1009
1010 init_timer(&info->egress_timer);
1011 info->egress_timer.data = (long)info;
1012 info->egress_timer.function = tile_net_handle_egress_timer;
1013
1014 u64_stats_init(&info->stats.syncp);
1015
1016 priv->cpu[my_cpu] = info;
1017
1018
1019
1020
1021
1022 ret = hv_dev_pwrite(priv->hv_devhdl, 0,
1023 (HV_VirtAddr)&config,
1024 sizeof(netio_input_config_t),
1025 NETIO_IPP_INPUT_REGISTER_OFF);
1026 PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
1027 ret);
1028 if (ret < 0) {
1029 if (ret != NETIO_LINK_DOWN) {
1030 printk(KERN_DEBUG "hv_dev_pwrite "
1031 "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n",
1032 ret);
1033 }
1034 info->link_down = (ret == NETIO_LINK_DOWN);
1035 return;
1036 }
1037
1038
1039
1040
1041
1042 ret = hv_dev_pread(priv->hv_devhdl, 0,
1043 (HV_VirtAddr)&queuep,
1044 sizeof(netio_queue_impl_t *),
1045 NETIO_IPP_INPUT_REGISTER_OFF);
1046 PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
1047 ret);
1048 PDEBUG("queuep %p\n", queuep);
1049 if (ret <= 0) {
1050
1051 pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n");
1052 return;
1053 }
1054
1055 queue = &info->queue;
1056
1057 queue->__system_part = queuep;
1058
1059 memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t));
1060
1061
1062 queue->__user_part.__receive_credit_interval = 4;
1063 queue->__user_part.__receive_credit_remaining =
1064 queue->__user_part.__receive_credit_interval;
1065
1066
1067
1068
1069
1070 ret = hv_dev_pread(priv->hv_devhdl, 0,
1071 (HV_VirtAddr)&queue->__user_part.__fastio_index,
1072 sizeof(queue->__user_part.__fastio_index),
1073 NETIO_IPP_GET_FASTIO_OFF);
1074 PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
1075
1076
1077 info->registered = true;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static void tile_net_deregister(void *dev_ptr)
1090{
1091 struct net_device *dev = (struct net_device *)dev_ptr;
1092 struct tile_net_priv *priv = netdev_priv(dev);
1093 int my_cpu = smp_processor_id();
1094 struct tile_net_cpu *info = priv->cpu[my_cpu];
1095
1096
1097 disable_percpu_irq(priv->intr_id);
1098
1099
1100 if (info == NULL || !info->registered)
1101 return;
1102
1103 {
1104 struct tile_netio_queue *queue = &info->queue;
1105 netio_queue_user_impl_t *qup = &queue->__user_part;
1106
1107
1108 __netio_fastio_return_credits(qup->__fastio_index, -1);
1109 }
1110}
1111
1112
1113
1114
1115
1116
1117
1118static void tile_net_unregister(void *dev_ptr)
1119{
1120 struct net_device *dev = (struct net_device *)dev_ptr;
1121 struct tile_net_priv *priv = netdev_priv(dev);
1122 int my_cpu = smp_processor_id();
1123 struct tile_net_cpu *info = priv->cpu[my_cpu];
1124
1125 int ret;
1126 int dummy = 0;
1127
1128
1129 disable_percpu_irq(priv->intr_id);
1130
1131
1132 if (info == NULL || !info->registered)
1133 return;
1134
1135
1136 ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1137 sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
1138 if (ret < 0)
1139 panic("Failed to unregister with LIPP/LEPP!\n");
1140
1141
1142 tile_net_discard_packets(dev);
1143
1144
1145 info->num_needed_small_buffers = 0;
1146 info->num_needed_large_buffers = 0;
1147
1148
1149 del_timer(&info->egress_timer);
1150 info->egress_timer_scheduled = false;
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160static void tile_net_stop_aux(struct net_device *dev)
1161{
1162 struct tile_net_priv *priv = netdev_priv(dev);
1163 int i;
1164
1165 int dummy = 0;
1166
1167
1168
1169
1170
1171
1172 on_each_cpu(tile_net_unregister, (void *)dev, 1);
1173 for_each_online_cpu(i) {
1174 struct tile_net_cpu *info = priv->cpu[i];
1175 if (info != NULL && info->registered) {
1176 netif_napi_del(&info->napi);
1177 info->registered = false;
1178 }
1179 }
1180
1181
1182 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1183 sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
1184 panic("Failed to stop LIPP/LEPP!\n");
1185
1186 priv->partly_opened = false;
1187}
1188
1189
1190
1191
1192
1193static void tile_net_stop_disable(void *dev_ptr)
1194{
1195 struct net_device *dev = (struct net_device *)dev_ptr;
1196 struct tile_net_priv *priv = netdev_priv(dev);
1197 int my_cpu = smp_processor_id();
1198 struct tile_net_cpu *info = priv->cpu[my_cpu];
1199
1200
1201 if (info != NULL && info->napi_enabled) {
1202 napi_disable(&info->napi);
1203 info->napi_enabled = false;
1204 }
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214static void tile_net_open_enable(void *dev_ptr)
1215{
1216 struct net_device *dev = (struct net_device *)dev_ptr;
1217 struct tile_net_priv *priv = netdev_priv(dev);
1218 int my_cpu = smp_processor_id();
1219 struct tile_net_cpu *info = priv->cpu[my_cpu];
1220
1221
1222 napi_enable(&info->napi);
1223 info->napi_enabled = true;
1224
1225
1226 enable_percpu_irq(priv->intr_id, 0);
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238static int tile_net_open_inner(struct net_device *dev)
1239{
1240 struct tile_net_priv *priv = netdev_priv(dev);
1241 int my_cpu = smp_processor_id();
1242 struct tile_net_cpu *info;
1243 struct tile_netio_queue *queue;
1244 int result = 0;
1245 int i;
1246 int dummy = 0;
1247
1248
1249
1250
1251
1252
1253 tile_net_register(dev);
1254 info = priv->cpu[my_cpu];
1255 if (!info->registered) {
1256 if (info->link_down)
1257 return 1;
1258 return -EAGAIN;
1259 }
1260
1261
1262
1263
1264
1265
1266
1267
1268 smp_call_function(tile_net_register, (void *)dev, 1);
1269 for_each_online_cpu(i) {
1270 struct tile_net_cpu *info = priv->cpu[i];
1271 if (info->registered)
1272 netif_napi_add(dev, &info->napi, tile_net_poll, 64);
1273 else
1274 result = -EAGAIN;
1275 }
1276 if (result != 0) {
1277 tile_net_stop_aux(dev);
1278 return result;
1279 }
1280
1281 queue = &info->queue;
1282
1283 if (priv->intr_id == 0) {
1284 unsigned int irq;
1285
1286
1287
1288
1289
1290
1291 priv->intr_id = queue->__system_part->__intr_id;
1292 BUG_ON(priv->intr_id == 0);
1293 irq = __ffs(priv->intr_id);
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304 tile_irq_activate(irq, TILE_IRQ_PERCPU);
1305 BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
1306 0, dev->name, (void *)dev) != 0);
1307 }
1308
1309 {
1310
1311
1312 int max_buffers =
1313 priv->network_cpus_count * priv->network_cpus_credits;
1314
1315 info->num_needed_small_buffers =
1316 min(LIPP_SMALL_BUFFERS, max_buffers);
1317
1318 info->num_needed_large_buffers =
1319 min(LIPP_LARGE_BUFFERS, max_buffers);
1320
1321 tile_net_provide_needed_buffers(info);
1322
1323 if (info->num_needed_small_buffers != 0 ||
1324 info->num_needed_large_buffers != 0)
1325 panic("Insufficient memory for buffer stack!");
1326 }
1327
1328
1329 priv->active = true;
1330
1331
1332 mb();
1333
1334
1335 on_each_cpu(tile_net_open_enable, (void *)dev, 1);
1336
1337
1338 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1339 sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
1340 panic("Failed to activate the LIPP Shim!\n");
1341
1342
1343 netif_start_queue(dev);
1344
1345 return 0;
1346}
1347
1348
1349
1350
1351
1352
1353static void tile_net_open_retry(struct work_struct *w)
1354{
1355 struct delayed_work *dw =
1356 container_of(w, struct delayed_work, work);
1357
1358 struct tile_net_priv *priv =
1359 container_of(dw, struct tile_net_priv, retry_work);
1360
1361
1362
1363
1364
1365
1366 if (tile_net_open_inner(priv->dev) != 0)
1367 schedule_delayed_work(&priv->retry_work,
1368 TILE_NET_RETRY_INTERVAL);
1369 else
1370 netif_carrier_on(priv->dev);
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388static int tile_net_open(struct net_device *dev)
1389{
1390 int ret = 0;
1391 struct tile_net_priv *priv = netdev_priv(dev);
1392
1393
1394
1395
1396
1397
1398
1399 if (!priv->partly_opened) {
1400
1401 int count;
1402 int credits;
1403
1404
1405 ret = tile_net_open_aux(dev);
1406 if (ret < 0) {
1407 pr_err("tile_net_open_aux failed: %d\n", ret);
1408 return ret;
1409 }
1410
1411
1412
1413 if (network_cpus_used)
1414 cpumask_copy(&priv->network_cpus_map,
1415 &network_cpus_map);
1416 else
1417 cpumask_copy(&priv->network_cpus_map, cpu_online_mask);
1418
1419
1420 count = cpumask_weight(&priv->network_cpus_map);
1421
1422
1423 credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1);
1424
1425
1426
1427 credits = min(NETIO_MAX_RECEIVE_PKTS, credits);
1428
1429 priv->network_cpus_count = count;
1430 priv->network_cpus_credits = credits;
1431
1432#ifdef TILE_NET_DEBUG
1433 pr_info("Using %d network cpus, with %d credits each\n",
1434 priv->network_cpus_count, priv->network_cpus_credits);
1435#endif
1436
1437 priv->partly_opened = true;
1438
1439 } else {
1440
1441
1442 }
1443
1444
1445
1446
1447 ret = tile_net_open_inner(dev);
1448 if (ret <= 0) {
1449 if (ret == 0)
1450 netif_carrier_on(dev);
1451 return ret;
1452 }
1453
1454
1455
1456
1457
1458
1459
1460 netif_carrier_off(dev);
1461 schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL);
1462
1463 return 0;
1464}
1465
1466
1467static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv)
1468{
1469 int n = 0;
1470
1471
1472 while (true) {
1473 unsigned int buffer;
1474
1475
1476 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
1477 sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0)
1478 break;
1479
1480
1481 if (buffer == 0)
1482 break;
1483
1484 {
1485
1486 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
1487
1488
1489 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
1490 struct sk_buff *skb = *skb_ptr;
1491
1492 kfree_skb(skb);
1493 }
1494
1495 n++;
1496 }
1497
1498 return n;
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static int tile_net_stop(struct net_device *dev)
1537{
1538 struct tile_net_priv *priv = netdev_priv(dev);
1539
1540 PDEBUG("tile_net_stop()\n");
1541
1542
1543 priv->active = false;
1544
1545
1546 mb();
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556 on_each_cpu(tile_net_deregister, (void *)dev, 1);
1557
1558
1559 (void)tile_net_drain_lipp_buffers(priv);
1560
1561
1562 cancel_delayed_work_sync(&priv->retry_work);
1563
1564
1565 netif_stop_queue(dev);
1566
1567
1568 on_each_cpu(tile_net_stop_disable, (void *)dev, 1);
1569
1570
1571
1572
1573
1574 if (tile_net_drain_lipp_buffers(priv) != 0)
1575 printk("Had to drain some extra LIPP buffers!\n");
1576
1577
1578 tile_net_stop_aux(dev);
1579
1580
1581
1582
1583
1584
1585 while (tile_net_lepp_free_comps(dev, true))
1586 ;
1587
1588
1589 memset(priv->eq, 0, sizeof(lepp_queue_t));
1590 mb();
1591
1592 return 0;
1593}
1594
1595
1596
1597
1598
1599
1600
1601static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1602 struct sk_buff *skb,
1603 void *b_data, unsigned int b_len)
1604{
1605 unsigned int i, n = 0;
1606
1607 struct skb_shared_info *sh = skb_shinfo(skb);
1608
1609 phys_addr_t cpa;
1610
1611 if (b_len != 0) {
1612
1613 if (!hash_default)
1614 finv_buffer_remote(b_data, b_len, 0);
1615
1616 cpa = __pa(b_data);
1617 frags[n].cpa_lo = cpa;
1618 frags[n].cpa_hi = cpa >> 32;
1619 frags[n].length = b_len;
1620 frags[n].hash_for_home = hash_default;
1621 n++;
1622 }
1623
1624 for (i = 0; i < sh->nr_frags; i++) {
1625
1626 skb_frag_t *f = &sh->frags[i];
1627 unsigned long pfn = page_to_pfn(skb_frag_page(f));
1628
1629
1630
1631 int hash_for_home = hash_default;
1632
1633
1634 if (!hash_default) {
1635 void *va = pfn_to_kaddr(pfn) + f->page_offset;
1636 BUG_ON(PageHighMem(skb_frag_page(f)));
1637 finv_buffer_remote(va, skb_frag_size(f), 0);
1638 }
1639
1640 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
1641 frags[n].cpa_lo = cpa;
1642 frags[n].cpa_hi = cpa >> 32;
1643 frags[n].length = skb_frag_size(f);
1644 frags[n].hash_for_home = hash_for_home;
1645 n++;
1646 }
1647
1648 return n;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1674{
1675 struct tile_net_priv *priv = netdev_priv(dev);
1676 int my_cpu = smp_processor_id();
1677 struct tile_net_cpu *info = priv->cpu[my_cpu];
1678 struct tile_net_stats_t *stats = &info->stats;
1679
1680 struct skb_shared_info *sh = skb_shinfo(skb);
1681
1682 unsigned char *data = skb->data;
1683
1684
1685 struct iphdr *ih = ip_hdr(skb);
1686 unsigned int ih_len = ih->ihl * 4;
1687
1688
1689 unsigned char *nh = skb_network_header(skb);
1690 unsigned int eh_len = nh - data;
1691
1692
1693 struct tcphdr *th = (struct tcphdr *)(nh + ih_len);
1694 unsigned int th_len = th->doff * 4;
1695
1696
1697
1698 unsigned int sh_len = eh_len + ih_len + th_len;
1699
1700
1701
1702 unsigned int b_len = skb_headlen(skb) - sh_len;
1703
1704
1705 unsigned int d_len = b_len + skb->data_len;
1706
1707
1708 unsigned int p_len = sh->gso_size;
1709
1710
1711 unsigned int num_segs = sh->gso_segs;
1712
1713
1714 u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4];
1715 lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body;
1716
1717
1718 unsigned int num_frags =
1719 tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len);
1720
1721
1722 size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len);
1723
1724
1725 lepp_tso_cmd_t cmd_init = {
1726 .tso = true,
1727 .header_size = sh_len,
1728 .ip_offset = eh_len,
1729 .tcp_offset = eh_len + ih_len,
1730 .payload_size = p_len,
1731 .num_frags = num_frags,
1732 };
1733
1734 unsigned long irqflags;
1735
1736 lepp_queue_t *eq = priv->eq;
1737
1738 struct sk_buff *olds[8];
1739 unsigned int wanted = 8;
1740 unsigned int i, nolds = 0;
1741
1742 unsigned int cmd_head, cmd_tail, cmd_next;
1743 unsigned int comp_tail;
1744
1745
1746
1747 BUG_ON(skb->protocol != htons(ETH_P_IP));
1748 BUG_ON(ih->protocol != IPPROTO_TCP);
1749 BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL);
1750 BUG_ON(num_frags > LEPP_MAX_FRAGS);
1751
1752 BUG_ON(num_segs <= 1);
1753
1754
1755
1756
1757
1758 *cmd = cmd_init;
1759
1760
1761 memcpy(&cmd->frags[num_frags], data, sh_len);
1762
1763
1764
1765 prefetch_L1(&eq->comp_tail);
1766 prefetch_L1(&eq->cmd_tail);
1767 mb();
1768
1769
1770
1771
1772 spin_lock_irqsave(&priv->eq_lock, irqflags);
1773
1774
1775
1776 if (lepp_num_free_comp_slots(eq) == 0) {
1777 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
1778 if (nolds == 0) {
1779busy:
1780 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
1781 return NETDEV_TX_BUSY;
1782 }
1783 }
1784
1785 cmd_head = eq->cmd_head;
1786 cmd_tail = eq->cmd_tail;
1787
1788
1789
1790 cmd_next = cmd_tail + cmd_size;
1791 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1792 goto busy;
1793 if (cmd_next > LEPP_CMD_LIMIT) {
1794 cmd_next = 0;
1795 if (cmd_next == cmd_head)
1796 goto busy;
1797 }
1798
1799
1800 memcpy(&eq->cmds[cmd_tail], cmd, cmd_size);
1801
1802
1803 cmd_tail = cmd_next;
1804
1805
1806 comp_tail = eq->comp_tail;
1807 eq->comps[comp_tail] = skb;
1808 LEPP_QINC(comp_tail);
1809 eq->comp_tail = comp_tail;
1810
1811
1812
1813 __insn_mf();
1814
1815 eq->cmd_tail = cmd_tail;
1816
1817
1818
1819
1820 if (nolds == 0)
1821 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
1822
1823 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
1824
1825
1826 for (i = 0; i < nolds; i++)
1827 dev_consume_skb_any(olds[i]);
1828
1829
1830 u64_stats_update_begin(&stats->syncp);
1831 stats->tx_packets += num_segs;
1832 stats->tx_bytes += (num_segs * sh_len) + d_len;
1833 u64_stats_update_end(&stats->syncp);
1834
1835
1836 tile_net_schedule_egress_timer(info);
1837
1838 return NETDEV_TX_OK;
1839}
1840
1841
1842
1843
1844
1845static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1846{
1847 struct tile_net_priv *priv = netdev_priv(dev);
1848 int my_cpu = smp_processor_id();
1849 struct tile_net_cpu *info = priv->cpu[my_cpu];
1850 struct tile_net_stats_t *stats = &info->stats;
1851
1852 unsigned long irqflags;
1853
1854 struct skb_shared_info *sh = skb_shinfo(skb);
1855
1856 unsigned int len = skb->len;
1857 unsigned char *data = skb->data;
1858
1859 unsigned int csum_start = skb_checksum_start_offset(skb);
1860
1861 lepp_frag_t frags[1 + MAX_SKB_FRAGS];
1862
1863 unsigned int num_frags;
1864
1865 lepp_queue_t *eq = priv->eq;
1866
1867 struct sk_buff *olds[8];
1868 unsigned int wanted = 8;
1869 unsigned int i, nolds = 0;
1870
1871 unsigned int cmd_size = sizeof(lepp_cmd_t);
1872
1873 unsigned int cmd_head, cmd_tail, cmd_next;
1874 unsigned int comp_tail;
1875
1876 lepp_cmd_t cmds[1 + MAX_SKB_FRAGS];
1877
1878
1879
1880
1881
1882
1883
1884
1885 if (!info->registered)
1886 return NETDEV_TX_BUSY;
1887
1888
1889
1890 dev->trans_start = jiffies;
1891
1892
1893#ifdef TILE_NET_PARANOIA
1894#if CHIP_HAS_CBOX_HOME_MAP()
1895 if (hash_default) {
1896 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
1897 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
1898 panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx",
1899 data, hv_pte_get_mode(pte), hv_pte_val(pte));
1900 }
1901#endif
1902#endif
1903
1904
1905#ifdef TILE_NET_DUMP_PACKETS
1906
1907 dump_packet(data, skb_headlen(skb), "tx");
1908#endif
1909
1910
1911 if (sh->gso_size != 0)
1912 return tile_net_tx_tso(skb, dev);
1913
1914
1915
1916
1917 num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1918
1919 for (i = 0; i < num_frags; i++) {
1920
1921 bool final = (i == num_frags - 1);
1922
1923 lepp_cmd_t cmd = {
1924 .cpa_lo = frags[i].cpa_lo,
1925 .cpa_hi = frags[i].cpa_hi,
1926 .length = frags[i].length,
1927 .hash_for_home = frags[i].hash_for_home,
1928 .send_completion = final,
1929 .end_of_packet = final
1930 };
1931
1932 if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) {
1933 cmd.compute_checksum = 1;
1934 cmd.checksum_data.bits.start_byte = csum_start;
1935 cmd.checksum_data.bits.count = len - csum_start;
1936 cmd.checksum_data.bits.destination_byte =
1937 csum_start + skb->csum_offset;
1938 }
1939
1940 cmds[i] = cmd;
1941 }
1942
1943
1944
1945 prefetch_L1(&eq->comp_tail);
1946 prefetch_L1(&eq->cmd_tail);
1947 mb();
1948
1949
1950
1951
1952 spin_lock_irqsave(&priv->eq_lock, irqflags);
1953
1954
1955
1956 if (lepp_num_free_comp_slots(eq) == 0) {
1957 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
1958 if (nolds == 0) {
1959busy:
1960 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
1961 return NETDEV_TX_BUSY;
1962 }
1963 }
1964
1965 cmd_head = eq->cmd_head;
1966 cmd_tail = eq->cmd_tail;
1967
1968
1969
1970 for (i = 0; i < num_frags; i++) {
1971
1972
1973 cmd_next = cmd_tail + cmd_size;
1974 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1975 goto busy;
1976 if (cmd_next > LEPP_CMD_LIMIT) {
1977 cmd_next = 0;
1978 if (cmd_next == cmd_head)
1979 goto busy;
1980 }
1981
1982
1983 *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i];
1984
1985
1986 cmd_tail = cmd_next;
1987 }
1988
1989
1990 comp_tail = eq->comp_tail;
1991 eq->comps[comp_tail] = skb;
1992 LEPP_QINC(comp_tail);
1993 eq->comp_tail = comp_tail;
1994
1995
1996
1997 __insn_mf();
1998
1999 eq->cmd_tail = cmd_tail;
2000
2001
2002
2003
2004 if (nolds == 0)
2005 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
2006
2007 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
2008
2009
2010 for (i = 0; i < nolds; i++)
2011 dev_consume_skb_any(olds[i]);
2012
2013
2014 u64_stats_update_begin(&stats->syncp);
2015 stats->tx_packets++;
2016 stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
2017 u64_stats_update_end(&stats->syncp);
2018
2019
2020 tile_net_schedule_egress_timer(info);
2021
2022 return NETDEV_TX_OK;
2023}
2024
2025
2026
2027
2028
2029static void tile_net_tx_timeout(struct net_device *dev)
2030{
2031 PDEBUG("tile_net_tx_timeout()\n");
2032 PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
2033 jiffies - dev->trans_start);
2034
2035
2036 netif_wake_queue(dev);
2037}
2038
2039
2040
2041
2042
2043static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2044{
2045 return -EOPNOTSUPP;
2046}
2047
2048
2049
2050
2051
2052
2053
2054static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
2055 struct rtnl_link_stats64 *stats)
2056{
2057 struct tile_net_priv *priv = netdev_priv(dev);
2058 u64 rx_packets = 0, tx_packets = 0;
2059 u64 rx_bytes = 0, tx_bytes = 0;
2060 u64 rx_errors = 0, rx_dropped = 0;
2061 int i;
2062
2063 for_each_online_cpu(i) {
2064 struct tile_net_stats_t *cpu_stats;
2065 u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes;
2066 u64 trx_errors, trx_dropped;
2067 unsigned int start;
2068
2069 if (priv->cpu[i] == NULL)
2070 continue;
2071 cpu_stats = &priv->cpu[i]->stats;
2072
2073 do {
2074 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2075 trx_packets = cpu_stats->rx_packets;
2076 ttx_packets = cpu_stats->tx_packets;
2077 trx_bytes = cpu_stats->rx_bytes;
2078 ttx_bytes = cpu_stats->tx_bytes;
2079 trx_errors = cpu_stats->rx_errors;
2080 trx_dropped = cpu_stats->rx_dropped;
2081 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2082
2083 rx_packets += trx_packets;
2084 tx_packets += ttx_packets;
2085 rx_bytes += trx_bytes;
2086 tx_bytes += ttx_bytes;
2087 rx_errors += trx_errors;
2088 rx_dropped += trx_dropped;
2089 }
2090
2091 stats->rx_packets = rx_packets;
2092 stats->tx_packets = tx_packets;
2093 stats->rx_bytes = rx_bytes;
2094 stats->tx_bytes = tx_bytes;
2095 stats->rx_errors = rx_errors;
2096 stats->rx_dropped = rx_dropped;
2097
2098 return stats;
2099}
2100
2101
2102
2103
2104
2105
2106
2107
2108static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
2109{
2110 PDEBUG("tile_net_change_mtu()\n");
2111
2112
2113 if ((new_mtu < 68) || (new_mtu > 1500))
2114 return -EINVAL;
2115
2116
2117 dev->mtu = new_mtu;
2118
2119 return 0;
2120}
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134static int tile_net_set_mac_address(struct net_device *dev, void *p)
2135{
2136 struct sockaddr *addr = p;
2137
2138 if (!is_valid_ether_addr(addr->sa_data))
2139 return -EADDRNOTAVAIL;
2140
2141
2142 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2143
2144 return 0;
2145}
2146
2147
2148
2149
2150
2151
2152static int tile_net_get_mac(struct net_device *dev)
2153{
2154 struct tile_net_priv *priv = netdev_priv(dev);
2155
2156 char hv_dev_name[32];
2157 int len;
2158
2159 __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF };
2160
2161 int ret;
2162
2163
2164 strcpy(hv_dev_name, dev->name);
2165 len = strlen(hv_dev_name);
2166
2167
2168 hv_dev_name[len] = hv_dev_name[len - 1];
2169 hv_dev_name[len - 1] = '/';
2170 len++;
2171
2172
2173 strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native");
2174
2175
2176 priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0);
2177 PDEBUG("hv_dev_open(%s) returned %d %p\n",
2178 hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl);
2179 if (priv->hv_devhdl < 0) {
2180 if (priv->hv_devhdl == HV_ENODEV)
2181 printk(KERN_DEBUG "Ignoring unconfigured device %s\n",
2182 hv_dev_name);
2183 else
2184 printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n",
2185 hv_dev_name, priv->hv_devhdl);
2186 return -1;
2187 }
2188
2189
2190
2191
2192
2193 offset.bits.class = NETIO_PARAM;
2194 offset.bits.addr = NETIO_PARAM_MAC;
2195 ret = hv_dev_pread(priv->hv_devhdl, 0,
2196 (HV_VirtAddr)dev->dev_addr, dev->addr_len,
2197 offset.word);
2198 PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret);
2199 if (ret <= 0) {
2200 printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n",
2201 dev->name);
2202
2203
2204
2205
2206
2207 eth_hw_addr_random(dev);
2208 }
2209
2210 return 0;
2211}
2212
2213
2214#ifdef CONFIG_NET_POLL_CONTROLLER
2215
2216
2217
2218
2219
2220static void tile_net_netpoll(struct net_device *dev)
2221{
2222 struct tile_net_priv *priv = netdev_priv(dev);
2223 disable_percpu_irq(priv->intr_id);
2224 tile_net_handle_ingress_interrupt(priv->intr_id, dev);
2225 enable_percpu_irq(priv->intr_id, 0);
2226}
2227#endif
2228
2229
2230static const struct net_device_ops tile_net_ops = {
2231 .ndo_open = tile_net_open,
2232 .ndo_stop = tile_net_stop,
2233 .ndo_start_xmit = tile_net_tx,
2234 .ndo_do_ioctl = tile_net_ioctl,
2235 .ndo_get_stats64 = tile_net_get_stats64,
2236 .ndo_change_mtu = tile_net_change_mtu,
2237 .ndo_tx_timeout = tile_net_tx_timeout,
2238 .ndo_set_mac_address = tile_net_set_mac_address,
2239#ifdef CONFIG_NET_POLL_CONTROLLER
2240 .ndo_poll_controller = tile_net_netpoll,
2241#endif
2242};
2243
2244
2245
2246
2247
2248
2249
2250
2251static void tile_net_setup(struct net_device *dev)
2252{
2253 netdev_features_t features = 0;
2254
2255 ether_setup(dev);
2256 dev->netdev_ops = &tile_net_ops;
2257 dev->watchdog_timeo = TILE_NET_TIMEOUT;
2258 dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
2259 dev->mtu = TILE_NET_MTU;
2260
2261 features |= NETIF_F_HW_CSUM;
2262 features |= NETIF_F_SG;
2263
2264
2265 if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS)
2266 features |= NETIF_F_TSO;
2267
2268
2269
2270
2271 if (hash_default)
2272 features |= NETIF_F_HIGHDMA;
2273
2274 dev->hw_features |= features;
2275 dev->vlan_features |= features;
2276 dev->features |= features;
2277}
2278
2279
2280
2281
2282
2283
2284static struct net_device *tile_net_dev_init(const char *name)
2285{
2286 int ret;
2287 struct net_device *dev;
2288 struct tile_net_priv *priv;
2289
2290
2291
2292
2293
2294
2295 dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
2296 tile_net_setup);
2297 if (!dev) {
2298 pr_err("alloc_netdev(%s) failed\n", name);
2299 return NULL;
2300 }
2301
2302 priv = netdev_priv(dev);
2303
2304
2305
2306 memset(priv, 0, sizeof(*priv));
2307
2308
2309 priv->dev = dev;
2310
2311 INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
2312
2313 spin_lock_init(&priv->eq_lock);
2314
2315
2316 priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER);
2317 if (!priv->eq_pages) {
2318 free_netdev(dev);
2319 return NULL;
2320 }
2321 priv->eq = page_address(priv->eq_pages);
2322
2323
2324 ret = register_netdev(dev);
2325 if (ret) {
2326 pr_err("register_netdev %s failed %d\n", dev->name, ret);
2327 __free_pages(priv->eq_pages, EQ_ORDER);
2328 free_netdev(dev);
2329 return NULL;
2330 }
2331
2332
2333 ret = tile_net_get_mac(dev);
2334 if (ret < 0) {
2335 unregister_netdev(dev);
2336 __free_pages(priv->eq_pages, EQ_ORDER);
2337 free_netdev(dev);
2338 return NULL;
2339 }
2340
2341 return dev;
2342}
2343
2344
2345
2346
2347
2348
2349
2350
2351static void tile_net_cleanup(void)
2352{
2353 int i;
2354
2355 for (i = 0; i < TILE_NET_DEVS; i++) {
2356 if (tile_net_devs[i]) {
2357 struct net_device *dev = tile_net_devs[i];
2358 struct tile_net_priv *priv = netdev_priv(dev);
2359 unregister_netdev(dev);
2360 finv_buffer_remote(priv->eq, EQ_SIZE, 0);
2361 __free_pages(priv->eq_pages, EQ_ORDER);
2362 free_netdev(dev);
2363 }
2364 }
2365}
2366
2367
2368
2369
2370
2371static int tile_net_init_module(void)
2372{
2373 pr_info("Tilera Network Driver\n");
2374
2375 tile_net_devs[0] = tile_net_dev_init("xgbe0");
2376 tile_net_devs[1] = tile_net_dev_init("xgbe1");
2377 tile_net_devs[2] = tile_net_dev_init("gbe0");
2378 tile_net_devs[3] = tile_net_dev_init("gbe1");
2379
2380 return 0;
2381}
2382
2383
2384module_init(tile_net_init_module);
2385module_exit(tile_net_cleanup);
2386
2387
2388#ifndef MODULE
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398static int __init network_cpus_setup(char *str)
2399{
2400 int rc = cpulist_parse_crop(str, &network_cpus_map);
2401 if (rc != 0) {
2402 pr_warn("network_cpus=%s: malformed cpu list\n", str);
2403 } else {
2404
2405
2406 cpumask_and(&network_cpus_map, &network_cpus_map,
2407 cpu_possible_mask);
2408
2409
2410 if (cpumask_empty(&network_cpus_map)) {
2411 pr_warn("Ignoring network_cpus='%s'\n", str);
2412 } else {
2413 pr_info("Linux network CPUs: %*pbl\n",
2414 cpumask_pr_args(&network_cpus_map));
2415 network_cpus_used = true;
2416 }
2417 }
2418
2419 return 0;
2420}
2421__setup("network_cpus=", network_cpus_setup);
2422
2423#endif
2424