1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/interrupt.h>
24#include <linux/in.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/ioctl.h>
29#include <linux/cdev.h>
30#include <linux/hugetlb.h>
31#include <linux/in6.h>
32#include <linux/timer.h>
33#include <linux/io.h>
34#include <linux/u64_stats_sync.h>
35#include <asm/checksum.h>
36#include <asm/homecache.h>
37
38#include <hv/drv_xgbe_intf.h>
39#include <hv/drv_xgbe_impl.h>
40#include <hv/hypervisor.h>
41#include <hv/netio_intf.h>
42
43
44#include <linux/ip.h>
45#include <linux/tcp.h>
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90#define TILE_NET_MTU 1500
91
92
93
94
95
96#define TILE_NET_TX_QUEUE_LEN 0
97
98
99
100
101
102
103
104
105
106
107
108
109#define TILE_NET_TIMEOUT (5 * HZ)
110
111
112#define TILE_NET_RETRY_INTERVAL (5 * HZ)
113
114
115#define TILE_NET_DEVS 4
116
117
118
119
120#if NET_IP_ALIGN != LIPP_PACKET_PADDING
121#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING."
122#endif
123
124
125
126#ifdef TILE_NET_DEBUG
127#define PDEBUG(fmt, args...) net_printk(fmt, ## args)
128#else
129#define PDEBUG(fmt, args...)
130#endif
131
132
133MODULE_AUTHOR("Tilera");
134MODULE_LICENSE("GPL");
135
136
137
138
139
140
141
142struct tile_netio_queue {
143 netio_queue_impl_t *__system_part;
144 netio_queue_user_impl_t __user_part;
145
146};
147
148
149
150
151
152struct tile_net_stats_t {
153 struct u64_stats_sync syncp;
154 u64 rx_packets;
155 u64 tx_packets;
156 u64 rx_bytes;
157 u64 tx_bytes;
158 u64 rx_errors;
159 u64 rx_dropped;
160};
161
162
163
164
165
166
167
168struct tile_net_cpu {
169
170 struct napi_struct napi;
171
172 struct tile_netio_queue queue;
173
174 struct tile_net_stats_t stats;
175
176 bool napi_enabled;
177
178 bool registered;
179
180 bool link_down;
181
182 bool egress_timer_scheduled;
183
184 unsigned int num_needed_small_buffers;
185
186 unsigned int num_needed_large_buffers;
187
188 struct timer_list egress_timer;
189};
190
191
192
193
194
195struct tile_net_priv {
196
197 struct net_device *dev;
198
199 struct page *eq_pages;
200
201 lepp_queue_t *eq;
202
203 spinlock_t eq_lock;
204
205 int hv_devhdl;
206
207 u32 intr_id;
208
209 bool partly_opened;
210
211 bool active;
212
213 struct cpumask network_cpus_map;
214
215 int network_cpus_count;
216
217 int network_cpus_credits;
218
219 struct delayed_work retry_work;
220
221 struct tile_net_cpu *cpu[NR_CPUS];
222};
223
224
225#define EQ_ORDER get_order(sizeof(lepp_queue_t))
226
227#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER))
228
229
230
231
232static struct net_device *tile_net_devs[TILE_NET_DEVS];
233
234
235
236
237static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0);
238static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1);
239static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0);
240static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1);
241
242
243
244
245
246static bool network_cpus_used;
247
248
249
250
251static struct cpumask network_cpus_map;
252
253
254
255#ifdef TILE_NET_DEBUG
256
257
258
259
260
261static void net_printk(char *fmt, ...)
262{
263 int i;
264 int len;
265 va_list args;
266 static char buf[256];
267
268 len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id());
269 va_start(args, fmt);
270 i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args);
271 va_end(args);
272 buf[255] = '\0';
273 pr_notice(buf);
274}
275#endif
276
277
278#ifdef TILE_NET_DUMP_PACKETS
279
280
281
282static void dump_packet(unsigned char *data, unsigned long length, char *s)
283{
284 int my_cpu = smp_processor_id();
285
286 unsigned long i;
287 char buf[128];
288
289 static unsigned int count;
290
291 pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
292 data, length, s, count++);
293
294 pr_info("\n");
295
296 for (i = 0; i < length; i++) {
297 if ((i & 0xf) == 0)
298 sprintf(buf, "[%02d] %8.8lx:", my_cpu, i);
299 sprintf(buf + strlen(buf), " %2.2x", data[i]);
300 if ((i & 0xf) == 0xf || i == length - 1) {
301 strcat(buf, "\n");
302 pr_info("%s", buf);
303 }
304 }
305}
306#endif
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323inline int __netio_fastio1(u32 fastio_index, u32 arg0)
324{
325 long result, clobber_r1, clobber_r10;
326 asm volatile("swint2"
327 : "=R00" (result),
328 "=R01" (clobber_r1), "=R10" (clobber_r10)
329 : "R10" (fastio_index), "R01" (arg0)
330 : "memory", "r2", "r3", "r4",
331 "r5", "r6", "r7", "r8", "r9",
332 "r11", "r12", "r13", "r14",
333 "r15", "r16", "r17", "r18", "r19",
334 "r20", "r21", "r22", "r23", "r24",
335 "r25", "r26", "r27", "r28", "r29");
336 return result;
337}
338
339
340static void tile_net_return_credit(struct tile_net_cpu *info)
341{
342 struct tile_netio_queue *queue = &info->queue;
343 netio_queue_user_impl_t *qup = &queue->__user_part;
344
345
346 if (--qup->__receive_credit_remaining == 0) {
347 u32 interval = qup->__receive_credit_interval;
348 qup->__receive_credit_remaining = interval;
349 __netio_fastio_return_credits(qup->__fastio_index, interval);
350 }
351}
352
353
354
355
356
357
358static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
359 void *va, bool small)
360{
361 struct tile_netio_queue *queue = &info->queue;
362
363
364 unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small;
365
366 __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer);
367}
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
421 bool small)
422{
423#if TILE_NET_MTU <= 1536
424
425 unsigned int large_size = NET_IP_ALIGN + 1536;
426#else
427
428 unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
429#endif
430
431
432
433 unsigned int len =
434 (((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
435 CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
436
437 unsigned int padding = 128 - NET_SKB_PAD;
438 unsigned int align;
439
440 struct sk_buff *skb;
441 void *va;
442
443 struct sk_buff **skb_ptr;
444
445
446 skb = netdev_alloc_skb(info->napi.dev, len + padding);
447 if (skb == NULL)
448 return false;
449
450
451 align = -(long)skb->data & (128 - 1);
452 BUG_ON(align > padding);
453 skb_reserve(skb, align);
454
455
456 va = skb->data;
457
458
459 BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0);
460
461#ifdef TILE_NET_PARANOIA
462#if CHIP_HAS_CBOX_HOME_MAP()
463 if (hash_default) {
464 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
465 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
466 panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx",
467 va, hv_pte_get_mode(pte), hv_pte_val(pte));
468 }
469#endif
470#endif
471
472
473 if (!hash_default)
474 __inv_buffer(va, len);
475
476
477
478
479 skb_reserve(skb, NET_IP_ALIGN);
480
481
482 skb_ptr = va - sizeof(*skb_ptr);
483 *skb_ptr = skb;
484
485
486 __insn_mf();
487
488
489 tile_net_provide_linux_buffer(info, va, small);
490
491 return true;
492}
493
494
495
496
497
498static void tile_net_provide_needed_buffers(struct tile_net_cpu *info)
499{
500 while (info->num_needed_small_buffers != 0) {
501 if (!tile_net_provide_needed_buffer(info, true))
502 goto oops;
503 info->num_needed_small_buffers--;
504 }
505
506 while (info->num_needed_large_buffers != 0) {
507 if (!tile_net_provide_needed_buffer(info, false))
508 goto oops;
509 info->num_needed_large_buffers--;
510 }
511
512 return;
513
514oops:
515
516
517 pr_notice("Could not provide a linux buffer to LIPP.\n");
518}
519
520
521
522
523
524
525
526static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq,
527 struct sk_buff *comps[],
528 unsigned int comps_size,
529 unsigned int min_size)
530{
531 unsigned int n = 0;
532
533 unsigned int comp_head = eq->comp_head;
534 unsigned int comp_busy = eq->comp_busy;
535
536 while (comp_head != comp_busy && n < comps_size) {
537 comps[n++] = eq->comps[comp_head];
538 LEPP_QINC(comp_head);
539 }
540
541 if (n < min_size)
542 return 0;
543
544 eq->comp_head = comp_head;
545
546 return n;
547}
548
549
550
551
552
553static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
554{
555 struct tile_net_priv *priv = netdev_priv(dev);
556
557 lepp_queue_t *eq = priv->eq;
558
559 struct sk_buff *olds[64];
560 unsigned int wanted = 64;
561 unsigned int i, n;
562 bool pending;
563
564 spin_lock(&priv->eq_lock);
565
566 if (all)
567 eq->comp_busy = eq->comp_tail;
568
569 n = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
570
571 pending = (eq->comp_head != eq->comp_tail);
572
573 spin_unlock(&priv->eq_lock);
574
575 for (i = 0; i < n; i++)
576 kfree_skb(olds[i]);
577
578 return pending;
579}
580
581
582
583
584
585
586
587
588static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
589{
590 if (!info->egress_timer_scheduled) {
591 mod_timer_pinned(&info->egress_timer, jiffies + 1);
592 info->egress_timer_scheduled = true;
593 }
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611static void tile_net_handle_egress_timer(unsigned long arg)
612{
613 struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
614 struct net_device *dev = info->napi.dev;
615
616
617 info->egress_timer_scheduled = false;
618
619
620 if (tile_net_lepp_free_comps(dev, false))
621 tile_net_schedule_egress_timer(info);
622}
623
624
625static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
626{
627 struct tile_netio_queue *queue = &info->queue;
628 netio_queue_impl_t *qsp = queue->__system_part;
629 netio_queue_user_impl_t *qup = &queue->__user_part;
630
631 int index2_aux = index + sizeof(netio_pkt_t);
632 int index2 =
633 ((index2_aux ==
634 qsp->__packet_receive_queue.__last_packet_plus_one) ?
635 0 : index2_aux);
636
637 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
638
639
640 unsigned int buffer = pkt->__packet.word;
641
642
643 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
644
645
646 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
647 struct sk_buff *skb = *skb_ptr;
648
649 kfree_skb(skb);
650
651
652 qup->__packet_receive_read = index2;
653}
654
655
656
657
658
659static void tile_net_discard_packets(struct net_device *dev)
660{
661 struct tile_net_priv *priv = netdev_priv(dev);
662 int my_cpu = smp_processor_id();
663 struct tile_net_cpu *info = priv->cpu[my_cpu];
664 struct tile_netio_queue *queue = &info->queue;
665 netio_queue_impl_t *qsp = queue->__system_part;
666 netio_queue_user_impl_t *qup = &queue->__user_part;
667
668 while (qup->__packet_receive_read !=
669 qsp->__packet_receive_queue.__packet_write) {
670 int index = qup->__packet_receive_read;
671 tile_net_discard_aux(info, index);
672 }
673}
674
675
676
677
678
679static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
680{
681 struct net_device *dev = info->napi.dev;
682
683 struct tile_netio_queue *queue = &info->queue;
684 netio_queue_impl_t *qsp = queue->__system_part;
685 netio_queue_user_impl_t *qup = &queue->__user_part;
686 struct tile_net_stats_t *stats = &info->stats;
687
688 int filter;
689
690 int index2_aux = index + sizeof(netio_pkt_t);
691 int index2 =
692 ((index2_aux ==
693 qsp->__packet_receive_queue.__last_packet_plus_one) ?
694 0 : index2_aux);
695
696 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
697
698 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
699 netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt);
700
701
702
703 unsigned long len =
704 (NETIO_PKT_CUSTOM_LENGTH(pkt) +
705 NET_IP_ALIGN - NETIO_PACKET_PADDING);
706
707
708 unsigned int buffer = pkt->__packet.word;
709
710
711 bool small = ((buffer & 1) != 0);
712
713
714 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
715
716
717
718 unsigned char *buf = va + NET_IP_ALIGN;
719
720
721 if (!hash_default)
722 __inv_buffer(buf, len);
723
724#ifdef TILE_NET_DUMP_PACKETS
725 dump_packet(buf, len, "rx");
726#endif
727
728#ifdef TILE_NET_VERIFY_INGRESS
729 if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) {
730 dump_packet(buf, len, "rx");
731 panic("Unexpected OVERSIZE.");
732 }
733#endif
734
735 filter = 0;
736
737 if (pkt_status == NETIO_PKT_STATUS_BAD) {
738
739 filter = 2;
740 } else if (!(dev->flags & IFF_UP)) {
741
742 filter = 1;
743 } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) &&
744 pkt_status == NETIO_PKT_STATUS_UNDERSIZE) {
745
746 filter = 2;
747 } else if (!(dev->flags & IFF_PROMISC)) {
748 if (!is_multicast_ether_addr(buf)) {
749
750 const u8 *mine = dev->dev_addr;
751 filter = !ether_addr_equal(mine, buf);
752 }
753 }
754
755 u64_stats_update_begin(&stats->syncp);
756
757 if (filter != 0) {
758
759 if (filter == 1)
760 stats->rx_dropped++;
761 else
762 stats->rx_errors++;
763
764 tile_net_provide_linux_buffer(info, va, small);
765
766 } else {
767
768
769 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
770 struct sk_buff *skb = *skb_ptr;
771
772
773 if (skb->data != buf)
774 panic("Corrupt linux buffer from LIPP! "
775 "VA=%p, skb=%p, skb->data=%p\n",
776 va, skb, skb->data);
777
778
779 skb_put(skb, len);
780
781
782 skb->protocol = eth_type_trans(skb, dev);
783
784
785 if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
786 skb->ip_summed = CHECKSUM_UNNECESSARY;
787
788 netif_receive_skb(skb);
789
790 stats->rx_packets++;
791 stats->rx_bytes += len;
792 }
793
794 u64_stats_update_end(&stats->syncp);
795
796
797
798 tile_net_return_credit(info);
799
800
801 qup->__packet_receive_read = index2;
802
803 return !filter;
804}
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819static int tile_net_poll(struct napi_struct *napi, int budget)
820{
821 struct net_device *dev = napi->dev;
822 struct tile_net_priv *priv = netdev_priv(dev);
823 int my_cpu = smp_processor_id();
824 struct tile_net_cpu *info = priv->cpu[my_cpu];
825 struct tile_netio_queue *queue = &info->queue;
826 netio_queue_impl_t *qsp = queue->__system_part;
827 netio_queue_user_impl_t *qup = &queue->__user_part;
828
829 unsigned int work = 0;
830
831 if (budget <= 0)
832 goto done;
833
834 while (priv->active) {
835 int index = qup->__packet_receive_read;
836 if (index == qsp->__packet_receive_queue.__packet_write)
837 break;
838
839 if (tile_net_poll_aux(info, index)) {
840 if (++work >= budget)
841 goto done;
842 }
843 }
844
845 napi_complete(&info->napi);
846
847 if (!priv->active)
848 goto done;
849
850
851 enable_percpu_irq(priv->intr_id, 0);
852
853
854 if (qup->__packet_receive_read !=
855 qsp->__packet_receive_queue.__packet_write) {
856
857
858 (void)napi_reschedule(&info->napi);
859 }
860
861done:
862
863 if (priv->active)
864 tile_net_provide_needed_buffers(info);
865
866 return work;
867}
868
869
870
871
872
873
874
875
876
877
878
879static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
880{
881 struct net_device *dev = (struct net_device *)dev_ptr;
882 struct tile_net_priv *priv = netdev_priv(dev);
883 int my_cpu = smp_processor_id();
884 struct tile_net_cpu *info = priv->cpu[my_cpu];
885
886
887 disable_percpu_irq(priv->intr_id);
888
889
890 if (!priv->active)
891 return IRQ_HANDLED;
892
893
894
895 napi_schedule(&info->napi);
896
897 return IRQ_HANDLED;
898}
899
900
901
902
903
904static int tile_net_open_aux(struct net_device *dev)
905{
906 struct tile_net_priv *priv = netdev_priv(dev);
907
908 int ret;
909 int dummy;
910 unsigned int epp_lotar;
911
912
913
914
915 ret = hv_dev_pread(priv->hv_devhdl, 0,
916 (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar),
917 NETIO_EPP_SHM_OFF);
918 if (ret < 0) {
919 pr_err("could not read epp_shm_queue lotar.\n");
920 return -EIO;
921 }
922
923
924
925
926 {
927 int epp_home = hv_lotar_to_cpu(epp_lotar);
928 homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home);
929 }
930
931
932
933
934 {
935 netio_ipp_address_t ea = {
936 .va = 0,
937 .pa = __pa(priv->eq),
938 .pte = hv_pte(0),
939 .size = EQ_SIZE,
940 };
941 ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
942 ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
943 ret = hv_dev_pwrite(priv->hv_devhdl, 0,
944 (HV_VirtAddr)&ea,
945 sizeof(ea),
946 NETIO_EPP_SHM_OFF);
947 if (ret < 0)
948 return -EIO;
949 }
950
951
952
953
954 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
955 sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) {
956 pr_warn("Failed to start LIPP/LEPP\n");
957 return -EIO;
958 }
959
960 return 0;
961}
962
963
964
965
966
967
968
969
970
971static void tile_net_register(void *dev_ptr)
972{
973 struct net_device *dev = (struct net_device *)dev_ptr;
974 struct tile_net_priv *priv = netdev_priv(dev);
975 int my_cpu = smp_processor_id();
976 struct tile_net_cpu *info;
977
978 struct tile_netio_queue *queue;
979
980
981 int queue_id =
982 cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255;
983
984 netio_input_config_t config = {
985 .flags = 0,
986 .num_receive_packets = priv->network_cpus_credits,
987 .queue_id = queue_id
988 };
989
990 int ret = 0;
991 netio_queue_impl_t *queuep;
992
993 PDEBUG("tile_net_register(queue_id %d)\n", queue_id);
994
995 if (!strcmp(dev->name, "xgbe0"))
996 info = this_cpu_ptr(&hv_xgbe0);
997 else if (!strcmp(dev->name, "xgbe1"))
998 info = this_cpu_ptr(&hv_xgbe1);
999 else if (!strcmp(dev->name, "gbe0"))
1000 info = this_cpu_ptr(&hv_gbe0);
1001 else if (!strcmp(dev->name, "gbe1"))
1002 info = this_cpu_ptr(&hv_gbe1);
1003 else
1004 BUG();
1005
1006
1007 init_timer(&info->egress_timer);
1008 info->egress_timer.data = (long)info;
1009 info->egress_timer.function = tile_net_handle_egress_timer;
1010
1011 u64_stats_init(&info->stats.syncp);
1012
1013 priv->cpu[my_cpu] = info;
1014
1015
1016
1017
1018
1019 ret = hv_dev_pwrite(priv->hv_devhdl, 0,
1020 (HV_VirtAddr)&config,
1021 sizeof(netio_input_config_t),
1022 NETIO_IPP_INPUT_REGISTER_OFF);
1023 PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
1024 ret);
1025 if (ret < 0) {
1026 if (ret != NETIO_LINK_DOWN) {
1027 printk(KERN_DEBUG "hv_dev_pwrite "
1028 "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n",
1029 ret);
1030 }
1031 info->link_down = (ret == NETIO_LINK_DOWN);
1032 return;
1033 }
1034
1035
1036
1037
1038
1039 ret = hv_dev_pread(priv->hv_devhdl, 0,
1040 (HV_VirtAddr)&queuep,
1041 sizeof(netio_queue_impl_t *),
1042 NETIO_IPP_INPUT_REGISTER_OFF);
1043 PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
1044 ret);
1045 PDEBUG("queuep %p\n", queuep);
1046 if (ret <= 0) {
1047
1048 pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n");
1049 return;
1050 }
1051
1052 queue = &info->queue;
1053
1054 queue->__system_part = queuep;
1055
1056 memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t));
1057
1058
1059 queue->__user_part.__receive_credit_interval = 4;
1060 queue->__user_part.__receive_credit_remaining =
1061 queue->__user_part.__receive_credit_interval;
1062
1063
1064
1065
1066
1067 ret = hv_dev_pread(priv->hv_devhdl, 0,
1068 (HV_VirtAddr)&queue->__user_part.__fastio_index,
1069 sizeof(queue->__user_part.__fastio_index),
1070 NETIO_IPP_GET_FASTIO_OFF);
1071 PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
1072
1073
1074 info->registered = true;
1075}
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086static void tile_net_deregister(void *dev_ptr)
1087{
1088 struct net_device *dev = (struct net_device *)dev_ptr;
1089 struct tile_net_priv *priv = netdev_priv(dev);
1090 int my_cpu = smp_processor_id();
1091 struct tile_net_cpu *info = priv->cpu[my_cpu];
1092
1093
1094 disable_percpu_irq(priv->intr_id);
1095
1096
1097 if (info == NULL || !info->registered)
1098 return;
1099
1100 {
1101 struct tile_netio_queue *queue = &info->queue;
1102 netio_queue_user_impl_t *qup = &queue->__user_part;
1103
1104
1105 __netio_fastio_return_credits(qup->__fastio_index, -1);
1106 }
1107}
1108
1109
1110
1111
1112
1113
1114
1115static void tile_net_unregister(void *dev_ptr)
1116{
1117 struct net_device *dev = (struct net_device *)dev_ptr;
1118 struct tile_net_priv *priv = netdev_priv(dev);
1119 int my_cpu = smp_processor_id();
1120 struct tile_net_cpu *info = priv->cpu[my_cpu];
1121
1122 int ret;
1123 int dummy = 0;
1124
1125
1126 disable_percpu_irq(priv->intr_id);
1127
1128
1129 if (info == NULL || !info->registered)
1130 return;
1131
1132
1133 ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1134 sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
1135 if (ret < 0)
1136 panic("Failed to unregister with LIPP/LEPP!\n");
1137
1138
1139 tile_net_discard_packets(dev);
1140
1141
1142 info->num_needed_small_buffers = 0;
1143 info->num_needed_large_buffers = 0;
1144
1145
1146 del_timer(&info->egress_timer);
1147 info->egress_timer_scheduled = false;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157static void tile_net_stop_aux(struct net_device *dev)
1158{
1159 struct tile_net_priv *priv = netdev_priv(dev);
1160 int i;
1161
1162 int dummy = 0;
1163
1164
1165
1166
1167
1168
1169 on_each_cpu(tile_net_unregister, (void *)dev, 1);
1170 for_each_online_cpu(i) {
1171 struct tile_net_cpu *info = priv->cpu[i];
1172 if (info != NULL && info->registered) {
1173 netif_napi_del(&info->napi);
1174 info->registered = false;
1175 }
1176 }
1177
1178
1179 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1180 sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
1181 panic("Failed to stop LIPP/LEPP!\n");
1182
1183 priv->partly_opened = false;
1184}
1185
1186
1187
1188
1189
1190static void tile_net_stop_disable(void *dev_ptr)
1191{
1192 struct net_device *dev = (struct net_device *)dev_ptr;
1193 struct tile_net_priv *priv = netdev_priv(dev);
1194 int my_cpu = smp_processor_id();
1195 struct tile_net_cpu *info = priv->cpu[my_cpu];
1196
1197
1198 if (info != NULL && info->napi_enabled) {
1199 napi_disable(&info->napi);
1200 info->napi_enabled = false;
1201 }
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211static void tile_net_open_enable(void *dev_ptr)
1212{
1213 struct net_device *dev = (struct net_device *)dev_ptr;
1214 struct tile_net_priv *priv = netdev_priv(dev);
1215 int my_cpu = smp_processor_id();
1216 struct tile_net_cpu *info = priv->cpu[my_cpu];
1217
1218
1219 napi_enable(&info->napi);
1220 info->napi_enabled = true;
1221
1222
1223 enable_percpu_irq(priv->intr_id, 0);
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235static int tile_net_open_inner(struct net_device *dev)
1236{
1237 struct tile_net_priv *priv = netdev_priv(dev);
1238 int my_cpu = smp_processor_id();
1239 struct tile_net_cpu *info;
1240 struct tile_netio_queue *queue;
1241 int result = 0;
1242 int i;
1243 int dummy = 0;
1244
1245
1246
1247
1248
1249
1250 tile_net_register(dev);
1251 info = priv->cpu[my_cpu];
1252 if (!info->registered) {
1253 if (info->link_down)
1254 return 1;
1255 return -EAGAIN;
1256 }
1257
1258
1259
1260
1261
1262
1263
1264
1265 smp_call_function(tile_net_register, (void *)dev, 1);
1266 for_each_online_cpu(i) {
1267 struct tile_net_cpu *info = priv->cpu[i];
1268 if (info->registered)
1269 netif_napi_add(dev, &info->napi, tile_net_poll, 64);
1270 else
1271 result = -EAGAIN;
1272 }
1273 if (result != 0) {
1274 tile_net_stop_aux(dev);
1275 return result;
1276 }
1277
1278 queue = &info->queue;
1279
1280 if (priv->intr_id == 0) {
1281 unsigned int irq;
1282
1283
1284
1285
1286
1287
1288 priv->intr_id = queue->__system_part->__intr_id;
1289 BUG_ON(priv->intr_id == 0);
1290 irq = __ffs(priv->intr_id);
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301 tile_irq_activate(irq, TILE_IRQ_PERCPU);
1302 BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
1303 0, dev->name, (void *)dev) != 0);
1304 }
1305
1306 {
1307
1308
1309 int max_buffers =
1310 priv->network_cpus_count * priv->network_cpus_credits;
1311
1312 info->num_needed_small_buffers =
1313 min(LIPP_SMALL_BUFFERS, max_buffers);
1314
1315 info->num_needed_large_buffers =
1316 min(LIPP_LARGE_BUFFERS, max_buffers);
1317
1318 tile_net_provide_needed_buffers(info);
1319
1320 if (info->num_needed_small_buffers != 0 ||
1321 info->num_needed_large_buffers != 0)
1322 panic("Insufficient memory for buffer stack!");
1323 }
1324
1325
1326 priv->active = true;
1327
1328
1329 mb();
1330
1331
1332 on_each_cpu(tile_net_open_enable, (void *)dev, 1);
1333
1334
1335 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1336 sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
1337 panic("Failed to activate the LIPP Shim!\n");
1338
1339
1340 netif_start_queue(dev);
1341
1342 return 0;
1343}
1344
1345
1346
1347
1348
1349
1350static void tile_net_open_retry(struct work_struct *w)
1351{
1352 struct delayed_work *dw = to_delayed_work(w);
1353
1354 struct tile_net_priv *priv =
1355 container_of(dw, struct tile_net_priv, retry_work);
1356
1357
1358
1359
1360
1361
1362 if (tile_net_open_inner(priv->dev) != 0)
1363 schedule_delayed_work(&priv->retry_work,
1364 TILE_NET_RETRY_INTERVAL);
1365 else
1366 netif_carrier_on(priv->dev);
1367}
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384static int tile_net_open(struct net_device *dev)
1385{
1386 int ret = 0;
1387 struct tile_net_priv *priv = netdev_priv(dev);
1388
1389
1390
1391
1392
1393
1394
1395 if (!priv->partly_opened) {
1396
1397 int count;
1398 int credits;
1399
1400
1401 ret = tile_net_open_aux(dev);
1402 if (ret < 0) {
1403 pr_err("tile_net_open_aux failed: %d\n", ret);
1404 return ret;
1405 }
1406
1407
1408
1409 if (network_cpus_used)
1410 cpumask_copy(&priv->network_cpus_map,
1411 &network_cpus_map);
1412 else
1413 cpumask_copy(&priv->network_cpus_map, cpu_online_mask);
1414
1415
1416 count = cpumask_weight(&priv->network_cpus_map);
1417
1418
1419 credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1);
1420
1421
1422
1423 credits = min(NETIO_MAX_RECEIVE_PKTS, credits);
1424
1425 priv->network_cpus_count = count;
1426 priv->network_cpus_credits = credits;
1427
1428#ifdef TILE_NET_DEBUG
1429 pr_info("Using %d network cpus, with %d credits each\n",
1430 priv->network_cpus_count, priv->network_cpus_credits);
1431#endif
1432
1433 priv->partly_opened = true;
1434
1435 } else {
1436
1437
1438 }
1439
1440
1441
1442
1443 ret = tile_net_open_inner(dev);
1444 if (ret <= 0) {
1445 if (ret == 0)
1446 netif_carrier_on(dev);
1447 return ret;
1448 }
1449
1450
1451
1452
1453
1454
1455
1456 netif_carrier_off(dev);
1457 schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL);
1458
1459 return 0;
1460}
1461
1462
1463static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv)
1464{
1465 int n = 0;
1466
1467
1468 while (true) {
1469 unsigned int buffer;
1470
1471
1472 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
1473 sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0)
1474 break;
1475
1476
1477 if (buffer == 0)
1478 break;
1479
1480 {
1481
1482 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
1483
1484
1485 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
1486 struct sk_buff *skb = *skb_ptr;
1487
1488 kfree_skb(skb);
1489 }
1490
1491 n++;
1492 }
1493
1494 return n;
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532static int tile_net_stop(struct net_device *dev)
1533{
1534 struct tile_net_priv *priv = netdev_priv(dev);
1535
1536 PDEBUG("tile_net_stop()\n");
1537
1538
1539 priv->active = false;
1540
1541
1542 mb();
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 on_each_cpu(tile_net_deregister, (void *)dev, 1);
1553
1554
1555 (void)tile_net_drain_lipp_buffers(priv);
1556
1557
1558 cancel_delayed_work_sync(&priv->retry_work);
1559
1560
1561 netif_stop_queue(dev);
1562
1563
1564 on_each_cpu(tile_net_stop_disable, (void *)dev, 1);
1565
1566
1567
1568
1569
1570 if (tile_net_drain_lipp_buffers(priv) != 0)
1571 printk("Had to drain some extra LIPP buffers!\n");
1572
1573
1574 tile_net_stop_aux(dev);
1575
1576
1577
1578
1579
1580
1581 while (tile_net_lepp_free_comps(dev, true))
1582 ;
1583
1584
1585 memset(priv->eq, 0, sizeof(lepp_queue_t));
1586 mb();
1587
1588 return 0;
1589}
1590
1591
1592
1593
1594
1595
1596
1597static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1598 struct sk_buff *skb,
1599 void *b_data, unsigned int b_len)
1600{
1601 unsigned int i, n = 0;
1602
1603 struct skb_shared_info *sh = skb_shinfo(skb);
1604
1605 phys_addr_t cpa;
1606
1607 if (b_len != 0) {
1608
1609 if (!hash_default)
1610 finv_buffer_remote(b_data, b_len, 0);
1611
1612 cpa = __pa(b_data);
1613 frags[n].cpa_lo = cpa;
1614 frags[n].cpa_hi = cpa >> 32;
1615 frags[n].length = b_len;
1616 frags[n].hash_for_home = hash_default;
1617 n++;
1618 }
1619
1620 for (i = 0; i < sh->nr_frags; i++) {
1621
1622 skb_frag_t *f = &sh->frags[i];
1623 unsigned long pfn = page_to_pfn(skb_frag_page(f));
1624
1625
1626
1627 int hash_for_home = hash_default;
1628
1629
1630 if (!hash_default) {
1631 void *va = pfn_to_kaddr(pfn) + f->page_offset;
1632 BUG_ON(PageHighMem(skb_frag_page(f)));
1633 finv_buffer_remote(va, skb_frag_size(f), 0);
1634 }
1635
1636 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
1637 frags[n].cpa_lo = cpa;
1638 frags[n].cpa_hi = cpa >> 32;
1639 frags[n].length = skb_frag_size(f);
1640 frags[n].hash_for_home = hash_for_home;
1641 n++;
1642 }
1643
1644 return n;
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1670{
1671 struct tile_net_priv *priv = netdev_priv(dev);
1672 int my_cpu = smp_processor_id();
1673 struct tile_net_cpu *info = priv->cpu[my_cpu];
1674 struct tile_net_stats_t *stats = &info->stats;
1675
1676 struct skb_shared_info *sh = skb_shinfo(skb);
1677
1678 unsigned char *data = skb->data;
1679
1680
1681 struct iphdr *ih = ip_hdr(skb);
1682 unsigned int ih_len = ih->ihl * 4;
1683
1684
1685 unsigned char *nh = skb_network_header(skb);
1686 unsigned int eh_len = nh - data;
1687
1688
1689 struct tcphdr *th = (struct tcphdr *)(nh + ih_len);
1690 unsigned int th_len = th->doff * 4;
1691
1692
1693
1694 unsigned int sh_len = eh_len + ih_len + th_len;
1695
1696
1697
1698 unsigned int b_len = skb_headlen(skb) - sh_len;
1699
1700
1701 unsigned int d_len = b_len + skb->data_len;
1702
1703
1704 unsigned int p_len = sh->gso_size;
1705
1706
1707 unsigned int num_segs = sh->gso_segs;
1708
1709
1710 u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4];
1711 lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body;
1712
1713
1714 unsigned int num_frags =
1715 tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len);
1716
1717
1718 size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len);
1719
1720
1721 lepp_tso_cmd_t cmd_init = {
1722 .tso = true,
1723 .header_size = sh_len,
1724 .ip_offset = eh_len,
1725 .tcp_offset = eh_len + ih_len,
1726 .payload_size = p_len,
1727 .num_frags = num_frags,
1728 };
1729
1730 unsigned long irqflags;
1731
1732 lepp_queue_t *eq = priv->eq;
1733
1734 struct sk_buff *olds[8];
1735 unsigned int wanted = 8;
1736 unsigned int i, nolds = 0;
1737
1738 unsigned int cmd_head, cmd_tail, cmd_next;
1739 unsigned int comp_tail;
1740
1741
1742
1743 BUG_ON(skb->protocol != htons(ETH_P_IP));
1744 BUG_ON(ih->protocol != IPPROTO_TCP);
1745 BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL);
1746 BUG_ON(num_frags > LEPP_MAX_FRAGS);
1747
1748 BUG_ON(num_segs <= 1);
1749
1750
1751
1752
1753
1754 *cmd = cmd_init;
1755
1756
1757 memcpy(&cmd->frags[num_frags], data, sh_len);
1758
1759
1760
1761 prefetch_L1(&eq->comp_tail);
1762 prefetch_L1(&eq->cmd_tail);
1763 mb();
1764
1765
1766
1767
1768 spin_lock_irqsave(&priv->eq_lock, irqflags);
1769
1770
1771
1772 if (lepp_num_free_comp_slots(eq) == 0) {
1773 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
1774 if (nolds == 0) {
1775busy:
1776 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
1777 return NETDEV_TX_BUSY;
1778 }
1779 }
1780
1781 cmd_head = eq->cmd_head;
1782 cmd_tail = eq->cmd_tail;
1783
1784
1785
1786 cmd_next = cmd_tail + cmd_size;
1787 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1788 goto busy;
1789 if (cmd_next > LEPP_CMD_LIMIT) {
1790 cmd_next = 0;
1791 if (cmd_next == cmd_head)
1792 goto busy;
1793 }
1794
1795
1796 memcpy(&eq->cmds[cmd_tail], cmd, cmd_size);
1797
1798
1799 cmd_tail = cmd_next;
1800
1801
1802 comp_tail = eq->comp_tail;
1803 eq->comps[comp_tail] = skb;
1804 LEPP_QINC(comp_tail);
1805 eq->comp_tail = comp_tail;
1806
1807
1808
1809 __insn_mf();
1810
1811 eq->cmd_tail = cmd_tail;
1812
1813
1814
1815
1816 if (nolds == 0)
1817 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
1818
1819 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
1820
1821
1822 for (i = 0; i < nolds; i++)
1823 dev_consume_skb_any(olds[i]);
1824
1825
1826 u64_stats_update_begin(&stats->syncp);
1827 stats->tx_packets += num_segs;
1828 stats->tx_bytes += (num_segs * sh_len) + d_len;
1829 u64_stats_update_end(&stats->syncp);
1830
1831
1832 tile_net_schedule_egress_timer(info);
1833
1834 return NETDEV_TX_OK;
1835}
1836
1837
1838
1839
1840
1841static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1842{
1843 struct tile_net_priv *priv = netdev_priv(dev);
1844 int my_cpu = smp_processor_id();
1845 struct tile_net_cpu *info = priv->cpu[my_cpu];
1846 struct tile_net_stats_t *stats = &info->stats;
1847
1848 unsigned long irqflags;
1849
1850 struct skb_shared_info *sh = skb_shinfo(skb);
1851
1852 unsigned int len = skb->len;
1853 unsigned char *data = skb->data;
1854
1855 unsigned int csum_start = skb_checksum_start_offset(skb);
1856
1857 lepp_frag_t frags[1 + MAX_SKB_FRAGS];
1858
1859 unsigned int num_frags;
1860
1861 lepp_queue_t *eq = priv->eq;
1862
1863 struct sk_buff *olds[8];
1864 unsigned int wanted = 8;
1865 unsigned int i, nolds = 0;
1866
1867 unsigned int cmd_size = sizeof(lepp_cmd_t);
1868
1869 unsigned int cmd_head, cmd_tail, cmd_next;
1870 unsigned int comp_tail;
1871
1872 lepp_cmd_t cmds[1 + MAX_SKB_FRAGS];
1873
1874
1875
1876
1877
1878
1879
1880
1881 if (!info->registered)
1882 return NETDEV_TX_BUSY;
1883
1884
1885
1886 dev->trans_start = jiffies;
1887
1888
1889#ifdef TILE_NET_PARANOIA
1890#if CHIP_HAS_CBOX_HOME_MAP()
1891 if (hash_default) {
1892 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
1893 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
1894 panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx",
1895 data, hv_pte_get_mode(pte), hv_pte_val(pte));
1896 }
1897#endif
1898#endif
1899
1900
1901#ifdef TILE_NET_DUMP_PACKETS
1902
1903 dump_packet(data, skb_headlen(skb), "tx");
1904#endif
1905
1906
1907 if (sh->gso_size != 0)
1908 return tile_net_tx_tso(skb, dev);
1909
1910
1911
1912
1913 num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1914
1915 for (i = 0; i < num_frags; i++) {
1916
1917 bool final = (i == num_frags - 1);
1918
1919 lepp_cmd_t cmd = {
1920 .cpa_lo = frags[i].cpa_lo,
1921 .cpa_hi = frags[i].cpa_hi,
1922 .length = frags[i].length,
1923 .hash_for_home = frags[i].hash_for_home,
1924 .send_completion = final,
1925 .end_of_packet = final
1926 };
1927
1928 if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) {
1929 cmd.compute_checksum = 1;
1930 cmd.checksum_data.bits.start_byte = csum_start;
1931 cmd.checksum_data.bits.count = len - csum_start;
1932 cmd.checksum_data.bits.destination_byte =
1933 csum_start + skb->csum_offset;
1934 }
1935
1936 cmds[i] = cmd;
1937 }
1938
1939
1940
1941 prefetch_L1(&eq->comp_tail);
1942 prefetch_L1(&eq->cmd_tail);
1943 mb();
1944
1945
1946
1947
1948 spin_lock_irqsave(&priv->eq_lock, irqflags);
1949
1950
1951
1952 if (lepp_num_free_comp_slots(eq) == 0) {
1953 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
1954 if (nolds == 0) {
1955busy:
1956 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
1957 return NETDEV_TX_BUSY;
1958 }
1959 }
1960
1961 cmd_head = eq->cmd_head;
1962 cmd_tail = eq->cmd_tail;
1963
1964
1965
1966 for (i = 0; i < num_frags; i++) {
1967
1968
1969 cmd_next = cmd_tail + cmd_size;
1970 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1971 goto busy;
1972 if (cmd_next > LEPP_CMD_LIMIT) {
1973 cmd_next = 0;
1974 if (cmd_next == cmd_head)
1975 goto busy;
1976 }
1977
1978
1979 *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i];
1980
1981
1982 cmd_tail = cmd_next;
1983 }
1984
1985
1986 comp_tail = eq->comp_tail;
1987 eq->comps[comp_tail] = skb;
1988 LEPP_QINC(comp_tail);
1989 eq->comp_tail = comp_tail;
1990
1991
1992
1993 __insn_mf();
1994
1995 eq->cmd_tail = cmd_tail;
1996
1997
1998
1999
2000 if (nolds == 0)
2001 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
2002
2003 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
2004
2005
2006 for (i = 0; i < nolds; i++)
2007 dev_consume_skb_any(olds[i]);
2008
2009
2010 u64_stats_update_begin(&stats->syncp);
2011 stats->tx_packets++;
2012 stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
2013 u64_stats_update_end(&stats->syncp);
2014
2015
2016 tile_net_schedule_egress_timer(info);
2017
2018 return NETDEV_TX_OK;
2019}
2020
2021
2022
2023
2024
2025static void tile_net_tx_timeout(struct net_device *dev)
2026{
2027 PDEBUG("tile_net_tx_timeout()\n");
2028 PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
2029 jiffies - dev->trans_start);
2030
2031
2032 netif_wake_queue(dev);
2033}
2034
2035
2036
2037
2038
2039static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2040{
2041 return -EOPNOTSUPP;
2042}
2043
2044
2045
2046
2047
2048
2049
2050static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
2051 struct rtnl_link_stats64 *stats)
2052{
2053 struct tile_net_priv *priv = netdev_priv(dev);
2054 u64 rx_packets = 0, tx_packets = 0;
2055 u64 rx_bytes = 0, tx_bytes = 0;
2056 u64 rx_errors = 0, rx_dropped = 0;
2057 int i;
2058
2059 for_each_online_cpu(i) {
2060 struct tile_net_stats_t *cpu_stats;
2061 u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes;
2062 u64 trx_errors, trx_dropped;
2063 unsigned int start;
2064
2065 if (priv->cpu[i] == NULL)
2066 continue;
2067 cpu_stats = &priv->cpu[i]->stats;
2068
2069 do {
2070 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2071 trx_packets = cpu_stats->rx_packets;
2072 ttx_packets = cpu_stats->tx_packets;
2073 trx_bytes = cpu_stats->rx_bytes;
2074 ttx_bytes = cpu_stats->tx_bytes;
2075 trx_errors = cpu_stats->rx_errors;
2076 trx_dropped = cpu_stats->rx_dropped;
2077 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2078
2079 rx_packets += trx_packets;
2080 tx_packets += ttx_packets;
2081 rx_bytes += trx_bytes;
2082 tx_bytes += ttx_bytes;
2083 rx_errors += trx_errors;
2084 rx_dropped += trx_dropped;
2085 }
2086
2087 stats->rx_packets = rx_packets;
2088 stats->tx_packets = tx_packets;
2089 stats->rx_bytes = rx_bytes;
2090 stats->tx_bytes = tx_bytes;
2091 stats->rx_errors = rx_errors;
2092 stats->rx_dropped = rx_dropped;
2093
2094 return stats;
2095}
2096
2097
2098
2099
2100
2101
2102
2103
2104static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
2105{
2106 PDEBUG("tile_net_change_mtu()\n");
2107
2108
2109 if ((new_mtu < 68) || (new_mtu > 1500))
2110 return -EINVAL;
2111
2112
2113 dev->mtu = new_mtu;
2114
2115 return 0;
2116}
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130static int tile_net_set_mac_address(struct net_device *dev, void *p)
2131{
2132 struct sockaddr *addr = p;
2133
2134 if (!is_valid_ether_addr(addr->sa_data))
2135 return -EADDRNOTAVAIL;
2136
2137
2138 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2139
2140 return 0;
2141}
2142
2143
2144
2145
2146
2147
2148static int tile_net_get_mac(struct net_device *dev)
2149{
2150 struct tile_net_priv *priv = netdev_priv(dev);
2151
2152 char hv_dev_name[32];
2153 int len;
2154
2155 __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF };
2156
2157 int ret;
2158
2159
2160 strcpy(hv_dev_name, dev->name);
2161 len = strlen(hv_dev_name);
2162
2163
2164 hv_dev_name[len] = hv_dev_name[len - 1];
2165 hv_dev_name[len - 1] = '/';
2166 len++;
2167
2168
2169 strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native");
2170
2171
2172 priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0);
2173 PDEBUG("hv_dev_open(%s) returned %d %p\n",
2174 hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl);
2175 if (priv->hv_devhdl < 0) {
2176 if (priv->hv_devhdl == HV_ENODEV)
2177 printk(KERN_DEBUG "Ignoring unconfigured device %s\n",
2178 hv_dev_name);
2179 else
2180 printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n",
2181 hv_dev_name, priv->hv_devhdl);
2182 return -1;
2183 }
2184
2185
2186
2187
2188
2189 offset.bits.class = NETIO_PARAM;
2190 offset.bits.addr = NETIO_PARAM_MAC;
2191 ret = hv_dev_pread(priv->hv_devhdl, 0,
2192 (HV_VirtAddr)dev->dev_addr, dev->addr_len,
2193 offset.word);
2194 PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret);
2195 if (ret <= 0) {
2196 printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n",
2197 dev->name);
2198
2199
2200
2201
2202
2203 eth_hw_addr_random(dev);
2204 }
2205
2206 return 0;
2207}
2208
2209
2210#ifdef CONFIG_NET_POLL_CONTROLLER
2211
2212
2213
2214
2215
2216static void tile_net_netpoll(struct net_device *dev)
2217{
2218 struct tile_net_priv *priv = netdev_priv(dev);
2219 disable_percpu_irq(priv->intr_id);
2220 tile_net_handle_ingress_interrupt(priv->intr_id, dev);
2221 enable_percpu_irq(priv->intr_id, 0);
2222}
2223#endif
2224
2225
2226static const struct net_device_ops tile_net_ops = {
2227 .ndo_open = tile_net_open,
2228 .ndo_stop = tile_net_stop,
2229 .ndo_start_xmit = tile_net_tx,
2230 .ndo_do_ioctl = tile_net_ioctl,
2231 .ndo_get_stats64 = tile_net_get_stats64,
2232 .ndo_change_mtu = tile_net_change_mtu,
2233 .ndo_tx_timeout = tile_net_tx_timeout,
2234 .ndo_set_mac_address = tile_net_set_mac_address,
2235#ifdef CONFIG_NET_POLL_CONTROLLER
2236 .ndo_poll_controller = tile_net_netpoll,
2237#endif
2238};
2239
2240
2241
2242
2243
2244
2245
2246
2247static void tile_net_setup(struct net_device *dev)
2248{
2249 netdev_features_t features = 0;
2250
2251 ether_setup(dev);
2252 dev->netdev_ops = &tile_net_ops;
2253 dev->watchdog_timeo = TILE_NET_TIMEOUT;
2254 dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
2255 dev->mtu = TILE_NET_MTU;
2256
2257 features |= NETIF_F_HW_CSUM;
2258 features |= NETIF_F_SG;
2259
2260
2261 if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS)
2262 features |= NETIF_F_TSO;
2263
2264
2265
2266
2267 if (hash_default)
2268 features |= NETIF_F_HIGHDMA;
2269
2270 dev->hw_features |= features;
2271 dev->vlan_features |= features;
2272 dev->features |= features;
2273}
2274
2275
2276
2277
2278
2279
2280static struct net_device *tile_net_dev_init(const char *name)
2281{
2282 int ret;
2283 struct net_device *dev;
2284 struct tile_net_priv *priv;
2285
2286
2287
2288
2289
2290
2291 dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
2292 tile_net_setup);
2293 if (!dev) {
2294 pr_err("alloc_netdev(%s) failed\n", name);
2295 return NULL;
2296 }
2297
2298 priv = netdev_priv(dev);
2299
2300
2301
2302 memset(priv, 0, sizeof(*priv));
2303
2304
2305 priv->dev = dev;
2306
2307 INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
2308
2309 spin_lock_init(&priv->eq_lock);
2310
2311
2312 priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER);
2313 if (!priv->eq_pages) {
2314 free_netdev(dev);
2315 return NULL;
2316 }
2317 priv->eq = page_address(priv->eq_pages);
2318
2319
2320 ret = register_netdev(dev);
2321 if (ret) {
2322 pr_err("register_netdev %s failed %d\n", dev->name, ret);
2323 __free_pages(priv->eq_pages, EQ_ORDER);
2324 free_netdev(dev);
2325 return NULL;
2326 }
2327
2328
2329 ret = tile_net_get_mac(dev);
2330 if (ret < 0) {
2331 unregister_netdev(dev);
2332 __free_pages(priv->eq_pages, EQ_ORDER);
2333 free_netdev(dev);
2334 return NULL;
2335 }
2336
2337 return dev;
2338}
2339
2340
2341
2342
2343
2344
2345
2346
2347static void tile_net_cleanup(void)
2348{
2349 int i;
2350
2351 for (i = 0; i < TILE_NET_DEVS; i++) {
2352 if (tile_net_devs[i]) {
2353 struct net_device *dev = tile_net_devs[i];
2354 struct tile_net_priv *priv = netdev_priv(dev);
2355 unregister_netdev(dev);
2356 finv_buffer_remote(priv->eq, EQ_SIZE, 0);
2357 __free_pages(priv->eq_pages, EQ_ORDER);
2358 free_netdev(dev);
2359 }
2360 }
2361}
2362
2363
2364
2365
2366
2367static int tile_net_init_module(void)
2368{
2369 pr_info("Tilera Network Driver\n");
2370
2371 tile_net_devs[0] = tile_net_dev_init("xgbe0");
2372 tile_net_devs[1] = tile_net_dev_init("xgbe1");
2373 tile_net_devs[2] = tile_net_dev_init("gbe0");
2374 tile_net_devs[3] = tile_net_dev_init("gbe1");
2375
2376 return 0;
2377}
2378
2379
2380module_init(tile_net_init_module);
2381module_exit(tile_net_cleanup);
2382
2383
2384#ifndef MODULE
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394static int __init network_cpus_setup(char *str)
2395{
2396 int rc = cpulist_parse_crop(str, &network_cpus_map);
2397 if (rc != 0) {
2398 pr_warn("network_cpus=%s: malformed cpu list\n", str);
2399 } else {
2400
2401
2402 cpumask_and(&network_cpus_map, &network_cpus_map,
2403 cpu_possible_mask);
2404
2405
2406 if (cpumask_empty(&network_cpus_map)) {
2407 pr_warn("Ignoring network_cpus='%s'\n", str);
2408 } else {
2409 pr_info("Linux network CPUs: %*pbl\n",
2410 cpumask_pr_args(&network_cpus_map));
2411 network_cpus_used = true;
2412 }
2413 }
2414
2415 return 0;
2416}
2417__setup("network_cpus=", network_cpus_setup);
2418
2419#endif
2420