1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/wait.h>
25#include <linux/mm.h>
26#include <linux/delay.h>
27#include <linux/io.h>
28#include <linux/slab.h>
29#include <linux/netdevice.h>
30#include <linux/if_ether.h>
31#include <linux/vmalloc.h>
32#include <linux/rtnetlink.h>
33#include <linux/prefetch.h>
34
35#include <asm/sync_bitops.h>
36
37#include "hyperv_net.h"
38#include "netvsc_trace.h"
39
40
41
42
43
44void netvsc_switch_datapath(struct net_device *ndev, bool vf)
45{
46 struct net_device_context *net_device_ctx = netdev_priv(ndev);
47 struct hv_device *dev = net_device_ctx->device_ctx;
48 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
49 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
50
51 memset(init_pkt, 0, sizeof(struct nvsp_message));
52 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
53 if (vf)
54 init_pkt->msg.v4_msg.active_dp.active_datapath =
55 NVSP_DATAPATH_VF;
56 else
57 init_pkt->msg.v4_msg.active_dp.active_datapath =
58 NVSP_DATAPATH_SYNTHETIC;
59
60 trace_nvsp_send(ndev, init_pkt);
61
62 vmbus_sendpacket(dev->channel, init_pkt,
63 sizeof(struct nvsp_message),
64 (unsigned long)init_pkt,
65 VM_PKT_DATA_INBAND, 0);
66}
67
68
69
70
71
72static void netvsc_subchan_work(struct work_struct *w)
73{
74 struct netvsc_device *nvdev =
75 container_of(w, struct netvsc_device, subchan_work);
76 struct rndis_device *rdev;
77 int i, ret;
78
79
80 if (!rtnl_trylock()) {
81 schedule_work(w);
82 return;
83 }
84
85 rdev = nvdev->extension;
86 if (rdev) {
87 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
88 if (ret == 0) {
89 netif_device_attach(rdev->ndev);
90 } else {
91
92 for (i = 1; i < nvdev->num_chn; i++)
93 netif_napi_del(&nvdev->chan_table[i].napi);
94
95 nvdev->max_chn = 1;
96 nvdev->num_chn = 1;
97 }
98 }
99
100 rtnl_unlock();
101}
102
103static struct netvsc_device *alloc_net_device(void)
104{
105 struct netvsc_device *net_device;
106
107 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
108 if (!net_device)
109 return NULL;
110
111 init_waitqueue_head(&net_device->wait_drain);
112 net_device->destroy = false;
113 net_device->tx_disable = true;
114
115 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
116 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
117
118 init_completion(&net_device->channel_init_wait);
119 init_waitqueue_head(&net_device->subchan_open);
120 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
121
122 return net_device;
123}
124
125static void free_netvsc_device(struct rcu_head *head)
126{
127 struct netvsc_device *nvdev
128 = container_of(head, struct netvsc_device, rcu);
129 int i;
130
131 kfree(nvdev->extension);
132 vfree(nvdev->recv_buf);
133 vfree(nvdev->send_buf);
134 kfree(nvdev->send_section_map);
135
136 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
137 xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
138 vfree(nvdev->chan_table[i].mrc.slots);
139 }
140
141 kfree(nvdev);
142}
143
144static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
145{
146 call_rcu(&nvdev->rcu, free_netvsc_device);
147}
148
149static void netvsc_revoke_recv_buf(struct hv_device *device,
150 struct netvsc_device *net_device,
151 struct net_device *ndev)
152{
153 struct nvsp_message *revoke_packet;
154 int ret;
155
156
157
158
159
160
161
162 if (net_device->recv_section_cnt) {
163
164 revoke_packet = &net_device->revoke_packet;
165 memset(revoke_packet, 0, sizeof(struct nvsp_message));
166
167 revoke_packet->hdr.msg_type =
168 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
169 revoke_packet->msg.v1_msg.
170 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
171
172 trace_nvsp_send(ndev, revoke_packet);
173
174 ret = vmbus_sendpacket(device->channel,
175 revoke_packet,
176 sizeof(struct nvsp_message),
177 (unsigned long)revoke_packet,
178 VM_PKT_DATA_INBAND, 0);
179
180
181
182
183
184 if (device->channel->rescind)
185 ret = 0;
186
187
188
189
190 if (ret != 0) {
191 netdev_err(ndev, "unable to send "
192 "revoke receive buffer to netvsp\n");
193 return;
194 }
195 net_device->recv_section_cnt = 0;
196 }
197}
198
199static void netvsc_revoke_send_buf(struct hv_device *device,
200 struct netvsc_device *net_device,
201 struct net_device *ndev)
202{
203 struct nvsp_message *revoke_packet;
204 int ret;
205
206
207
208
209
210
211
212 if (net_device->send_section_cnt) {
213
214 revoke_packet = &net_device->revoke_packet;
215 memset(revoke_packet, 0, sizeof(struct nvsp_message));
216
217 revoke_packet->hdr.msg_type =
218 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
219 revoke_packet->msg.v1_msg.revoke_send_buf.id =
220 NETVSC_SEND_BUFFER_ID;
221
222 trace_nvsp_send(ndev, revoke_packet);
223
224 ret = vmbus_sendpacket(device->channel,
225 revoke_packet,
226 sizeof(struct nvsp_message),
227 (unsigned long)revoke_packet,
228 VM_PKT_DATA_INBAND, 0);
229
230
231
232
233
234
235 if (device->channel->rescind)
236 ret = 0;
237
238
239
240
241 if (ret != 0) {
242 netdev_err(ndev, "unable to send "
243 "revoke send buffer to netvsp\n");
244 return;
245 }
246 net_device->send_section_cnt = 0;
247 }
248}
249
250static void netvsc_teardown_recv_gpadl(struct hv_device *device,
251 struct netvsc_device *net_device,
252 struct net_device *ndev)
253{
254 int ret;
255
256 if (net_device->recv_buf_gpadl_handle) {
257 ret = vmbus_teardown_gpadl(device->channel,
258 net_device->recv_buf_gpadl_handle);
259
260
261
262
263 if (ret != 0) {
264 netdev_err(ndev,
265 "unable to teardown receive buffer's gpadl\n");
266 return;
267 }
268 net_device->recv_buf_gpadl_handle = 0;
269 }
270}
271
272static void netvsc_teardown_send_gpadl(struct hv_device *device,
273 struct netvsc_device *net_device,
274 struct net_device *ndev)
275{
276 int ret;
277
278 if (net_device->send_buf_gpadl_handle) {
279 ret = vmbus_teardown_gpadl(device->channel,
280 net_device->send_buf_gpadl_handle);
281
282
283
284
285 if (ret != 0) {
286 netdev_err(ndev,
287 "unable to teardown send buffer's gpadl\n");
288 return;
289 }
290 net_device->send_buf_gpadl_handle = 0;
291 }
292}
293
294int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
295{
296 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
297 int node = cpu_to_node(nvchan->channel->target_cpu);
298 size_t size;
299
300 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
301 nvchan->mrc.slots = vzalloc_node(size, node);
302 if (!nvchan->mrc.slots)
303 nvchan->mrc.slots = vzalloc(size);
304
305 return nvchan->mrc.slots ? 0 : -ENOMEM;
306}
307
308static int netvsc_init_buf(struct hv_device *device,
309 struct netvsc_device *net_device,
310 const struct netvsc_device_info *device_info)
311{
312 struct nvsp_1_message_send_receive_buffer_complete *resp;
313 struct net_device *ndev = hv_get_drvdata(device);
314 struct nvsp_message *init_packet;
315 unsigned int buf_size;
316 size_t map_words;
317 int ret = 0;
318
319
320 buf_size = device_info->recv_sections * device_info->recv_section_size;
321 buf_size = roundup(buf_size, PAGE_SIZE);
322
323
324 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
325 buf_size = min_t(unsigned int, buf_size,
326 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
327
328 net_device->recv_buf = vzalloc(buf_size);
329 if (!net_device->recv_buf) {
330 netdev_err(ndev,
331 "unable to allocate receive buffer of size %u\n",
332 buf_size);
333 ret = -ENOMEM;
334 goto cleanup;
335 }
336
337 net_device->recv_buf_size = buf_size;
338
339
340
341
342
343
344 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
345 buf_size,
346 &net_device->recv_buf_gpadl_handle);
347 if (ret != 0) {
348 netdev_err(ndev,
349 "unable to establish receive buffer's gpadl\n");
350 goto cleanup;
351 }
352
353
354 init_packet = &net_device->channel_init_pkt;
355 memset(init_packet, 0, sizeof(struct nvsp_message));
356 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
357 init_packet->msg.v1_msg.send_recv_buf.
358 gpadl_handle = net_device->recv_buf_gpadl_handle;
359 init_packet->msg.v1_msg.
360 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
361
362 trace_nvsp_send(ndev, init_packet);
363
364
365 ret = vmbus_sendpacket(device->channel, init_packet,
366 sizeof(struct nvsp_message),
367 (unsigned long)init_packet,
368 VM_PKT_DATA_INBAND,
369 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
370 if (ret != 0) {
371 netdev_err(ndev,
372 "unable to send receive buffer's gpadl to netvsp\n");
373 goto cleanup;
374 }
375
376 wait_for_completion(&net_device->channel_init_wait);
377
378
379 resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
380 if (resp->status != NVSP_STAT_SUCCESS) {
381 netdev_err(ndev,
382 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
383 resp->status);
384 ret = -EINVAL;
385 goto cleanup;
386 }
387
388
389 netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
390 resp->num_sections, resp->sections[0].sub_alloc_size,
391 resp->sections[0].num_sub_allocs);
392
393
394 if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
395 ret = -EINVAL;
396 goto cleanup;
397 }
398
399 net_device->recv_section_size = resp->sections[0].sub_alloc_size;
400 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
401
402
403 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
404 (u64)net_device->recv_section_cnt > (u64)buf_size) {
405 netdev_err(ndev, "invalid recv_section_size %u\n",
406 net_device->recv_section_size);
407 ret = -EINVAL;
408 goto cleanup;
409 }
410
411
412
413
414
415 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
416 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
417 if (ret)
418 goto cleanup;
419
420
421 buf_size = device_info->send_sections * device_info->send_section_size;
422 buf_size = round_up(buf_size, PAGE_SIZE);
423
424 net_device->send_buf = vzalloc(buf_size);
425 if (!net_device->send_buf) {
426 netdev_err(ndev, "unable to allocate send buffer of size %u\n",
427 buf_size);
428 ret = -ENOMEM;
429 goto cleanup;
430 }
431
432
433
434
435
436 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
437 buf_size,
438 &net_device->send_buf_gpadl_handle);
439 if (ret != 0) {
440 netdev_err(ndev,
441 "unable to establish send buffer's gpadl\n");
442 goto cleanup;
443 }
444
445
446 init_packet = &net_device->channel_init_pkt;
447 memset(init_packet, 0, sizeof(struct nvsp_message));
448 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
449 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
450 net_device->send_buf_gpadl_handle;
451 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
452
453 trace_nvsp_send(ndev, init_packet);
454
455
456 ret = vmbus_sendpacket(device->channel, init_packet,
457 sizeof(struct nvsp_message),
458 (unsigned long)init_packet,
459 VM_PKT_DATA_INBAND,
460 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
461 if (ret != 0) {
462 netdev_err(ndev,
463 "unable to send send buffer's gpadl to netvsp\n");
464 goto cleanup;
465 }
466
467 wait_for_completion(&net_device->channel_init_wait);
468
469
470 if (init_packet->msg.v1_msg.
471 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
472 netdev_err(ndev, "Unable to complete send buffer "
473 "initialization with NetVsp - status %d\n",
474 init_packet->msg.v1_msg.
475 send_send_buf_complete.status);
476 ret = -EINVAL;
477 goto cleanup;
478 }
479
480
481 net_device->send_section_size = init_packet->msg.
482 v1_msg.send_send_buf_complete.section_size;
483 if (net_device->send_section_size < NETVSC_MTU_MIN) {
484 netdev_err(ndev, "invalid send_section_size %u\n",
485 net_device->send_section_size);
486 ret = -EINVAL;
487 goto cleanup;
488 }
489
490
491 net_device->send_section_cnt = buf_size / net_device->send_section_size;
492
493 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
494 net_device->send_section_size, net_device->send_section_cnt);
495
496
497 map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
498
499 net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
500 if (net_device->send_section_map == NULL) {
501 ret = -ENOMEM;
502 goto cleanup;
503 }
504
505 goto exit;
506
507cleanup:
508 netvsc_revoke_recv_buf(device, net_device, ndev);
509 netvsc_revoke_send_buf(device, net_device, ndev);
510 netvsc_teardown_recv_gpadl(device, net_device, ndev);
511 netvsc_teardown_send_gpadl(device, net_device, ndev);
512
513exit:
514 return ret;
515}
516
517
518static int negotiate_nvsp_ver(struct hv_device *device,
519 struct netvsc_device *net_device,
520 struct nvsp_message *init_packet,
521 u32 nvsp_ver)
522{
523 struct net_device *ndev = hv_get_drvdata(device);
524 int ret;
525
526 memset(init_packet, 0, sizeof(struct nvsp_message));
527 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
528 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
529 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
530 trace_nvsp_send(ndev, init_packet);
531
532
533 ret = vmbus_sendpacket(device->channel, init_packet,
534 sizeof(struct nvsp_message),
535 (unsigned long)init_packet,
536 VM_PKT_DATA_INBAND,
537 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
538
539 if (ret != 0)
540 return ret;
541
542 wait_for_completion(&net_device->channel_init_wait);
543
544 if (init_packet->msg.init_msg.init_complete.status !=
545 NVSP_STAT_SUCCESS)
546 return -EINVAL;
547
548 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
549 return 0;
550
551
552 memset(init_packet, 0, sizeof(struct nvsp_message));
553 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
554 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
555 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
556
557 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
558 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
559
560
561 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
562 }
563
564 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
565 init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
566
567 trace_nvsp_send(ndev, init_packet);
568
569 ret = vmbus_sendpacket(device->channel, init_packet,
570 sizeof(struct nvsp_message),
571 (unsigned long)init_packet,
572 VM_PKT_DATA_INBAND, 0);
573
574 return ret;
575}
576
577static int netvsc_connect_vsp(struct hv_device *device,
578 struct netvsc_device *net_device,
579 const struct netvsc_device_info *device_info)
580{
581 struct net_device *ndev = hv_get_drvdata(device);
582 static const u32 ver_list[] = {
583 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
584 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
585 NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
586 };
587 struct nvsp_message *init_packet;
588 int ndis_version, i, ret;
589
590 init_packet = &net_device->channel_init_pkt;
591
592
593 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
594 if (negotiate_nvsp_ver(device, net_device, init_packet,
595 ver_list[i]) == 0) {
596 net_device->nvsp_version = ver_list[i];
597 break;
598 }
599
600 if (i < 0) {
601 ret = -EPROTO;
602 goto cleanup;
603 }
604
605 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
606
607
608 memset(init_packet, 0, sizeof(struct nvsp_message));
609
610 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
611 ndis_version = 0x00060001;
612 else
613 ndis_version = 0x0006001e;
614
615 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
616 init_packet->msg.v1_msg.
617 send_ndis_ver.ndis_major_ver =
618 (ndis_version & 0xFFFF0000) >> 16;
619 init_packet->msg.v1_msg.
620 send_ndis_ver.ndis_minor_ver =
621 ndis_version & 0xFFFF;
622
623 trace_nvsp_send(ndev, init_packet);
624
625
626 ret = vmbus_sendpacket(device->channel, init_packet,
627 sizeof(struct nvsp_message),
628 (unsigned long)init_packet,
629 VM_PKT_DATA_INBAND, 0);
630 if (ret != 0)
631 goto cleanup;
632
633
634 ret = netvsc_init_buf(device, net_device, device_info);
635
636cleanup:
637 return ret;
638}
639
640
641
642
643void netvsc_device_remove(struct hv_device *device)
644{
645 struct net_device *ndev = hv_get_drvdata(device);
646 struct net_device_context *net_device_ctx = netdev_priv(ndev);
647 struct netvsc_device *net_device
648 = rtnl_dereference(net_device_ctx->nvdev);
649 int i;
650
651
652
653
654
655 netvsc_revoke_recv_buf(device, net_device, ndev);
656 if (vmbus_proto_version < VERSION_WIN10)
657 netvsc_teardown_recv_gpadl(device, net_device, ndev);
658
659 netvsc_revoke_send_buf(device, net_device, ndev);
660 if (vmbus_proto_version < VERSION_WIN10)
661 netvsc_teardown_send_gpadl(device, net_device, ndev);
662
663 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
664
665
666 for (i = 0; i < net_device->num_chn; i++) {
667
668 napi_disable(&net_device->chan_table[i].napi);
669 netif_napi_del(&net_device->chan_table[i].napi);
670 }
671
672
673
674
675
676 netdev_dbg(ndev, "net device safe to remove\n");
677
678
679 vmbus_close(device->channel);
680
681
682
683
684
685 if (vmbus_proto_version >= VERSION_WIN10) {
686 netvsc_teardown_recv_gpadl(device, net_device, ndev);
687 netvsc_teardown_send_gpadl(device, net_device, ndev);
688 }
689
690
691 free_netvsc_device_rcu(net_device);
692}
693
694#define RING_AVAIL_PERCENT_HIWATER 20
695#define RING_AVAIL_PERCENT_LOWATER 10
696
697static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
698 u32 index)
699{
700 sync_change_bit(index, net_device->send_section_map);
701}
702
703static void netvsc_send_tx_complete(struct net_device *ndev,
704 struct netvsc_device *net_device,
705 struct vmbus_channel *channel,
706 const struct vmpacket_descriptor *desc,
707 int budget)
708{
709 struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
710 struct net_device_context *ndev_ctx = netdev_priv(ndev);
711 u16 q_idx = 0;
712 int queue_sends;
713
714
715 if (likely(skb)) {
716 const struct hv_netvsc_packet *packet
717 = (struct hv_netvsc_packet *)skb->cb;
718 u32 send_index = packet->send_buf_index;
719 struct netvsc_stats *tx_stats;
720
721 if (send_index != NETVSC_INVALID_INDEX)
722 netvsc_free_send_slot(net_device, send_index);
723 q_idx = packet->q_idx;
724
725 tx_stats = &net_device->chan_table[q_idx].tx_stats;
726
727 u64_stats_update_begin(&tx_stats->syncp);
728 tx_stats->packets += packet->total_packets;
729 tx_stats->bytes += packet->total_bytes;
730 u64_stats_update_end(&tx_stats->syncp);
731
732 napi_consume_skb(skb, budget);
733 }
734
735 queue_sends =
736 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
737
738 if (unlikely(net_device->destroy)) {
739 if (queue_sends == 0)
740 wake_up(&net_device->wait_drain);
741 } else {
742 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
743
744 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
745 (hv_get_avail_to_write_percent(&channel->outbound) >
746 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
747 netif_tx_wake_queue(txq);
748 ndev_ctx->eth_stats.wake_queue++;
749 }
750 }
751}
752
753static void netvsc_send_completion(struct net_device *ndev,
754 struct netvsc_device *net_device,
755 struct vmbus_channel *incoming_channel,
756 const struct vmpacket_descriptor *desc,
757 int budget)
758{
759 const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
760 u32 msglen = hv_pkt_datalen(desc);
761
762
763 if (msglen < sizeof(struct nvsp_message_header)) {
764 netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
765 return;
766 }
767
768 switch (nvsp_packet->hdr.msg_type) {
769 case NVSP_MSG_TYPE_INIT_COMPLETE:
770 if (msglen < sizeof(struct nvsp_message_header) +
771 sizeof(struct nvsp_message_init_complete)) {
772 netdev_err(ndev, "nvsp_msg length too small: %u\n",
773 msglen);
774 return;
775 }
776 fallthrough;
777
778 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
779 if (msglen < sizeof(struct nvsp_message_header) +
780 sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
781 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
782 msglen);
783 return;
784 }
785 fallthrough;
786
787 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
788 if (msglen < sizeof(struct nvsp_message_header) +
789 sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
790 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
791 msglen);
792 return;
793 }
794 fallthrough;
795
796 case NVSP_MSG5_TYPE_SUBCHANNEL:
797 if (msglen < sizeof(struct nvsp_message_header) +
798 sizeof(struct nvsp_5_subchannel_complete)) {
799 netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
800 msglen);
801 return;
802 }
803
804 memcpy(&net_device->channel_init_pkt, nvsp_packet,
805 sizeof(struct nvsp_message));
806 complete(&net_device->channel_init_wait);
807 break;
808
809 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
810 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
811 desc, budget);
812 break;
813
814 default:
815 netdev_err(ndev,
816 "Unknown send completion type %d received!!\n",
817 nvsp_packet->hdr.msg_type);
818 }
819}
820
821static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
822{
823 unsigned long *map_addr = net_device->send_section_map;
824 unsigned int i;
825
826 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
827 if (sync_test_and_set_bit(i, map_addr) == 0)
828 return i;
829 }
830
831 return NETVSC_INVALID_INDEX;
832}
833
834static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
835 unsigned int section_index,
836 u32 pend_size,
837 struct hv_netvsc_packet *packet,
838 struct rndis_message *rndis_msg,
839 struct hv_page_buffer *pb,
840 bool xmit_more)
841{
842 char *start = net_device->send_buf;
843 char *dest = start + (section_index * net_device->send_section_size)
844 + pend_size;
845 int i;
846 u32 padding = 0;
847 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
848 packet->page_buf_cnt;
849 u32 remain;
850
851
852 remain = packet->total_data_buflen & (net_device->pkt_align - 1);
853 if (xmit_more && remain) {
854 padding = net_device->pkt_align - remain;
855 rndis_msg->msg_len += padding;
856 packet->total_data_buflen += padding;
857 }
858
859 for (i = 0; i < page_count; i++) {
860 char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
861 u32 offset = pb[i].offset;
862 u32 len = pb[i].len;
863
864 memcpy(dest, (src + offset), len);
865 dest += len;
866 }
867
868 if (padding)
869 memset(dest, 0, padding);
870}
871
872static inline int netvsc_send_pkt(
873 struct hv_device *device,
874 struct hv_netvsc_packet *packet,
875 struct netvsc_device *net_device,
876 struct hv_page_buffer *pb,
877 struct sk_buff *skb)
878{
879 struct nvsp_message nvmsg;
880 struct nvsp_1_message_send_rndis_packet *rpkt =
881 &nvmsg.msg.v1_msg.send_rndis_pkt;
882 struct netvsc_channel * const nvchan =
883 &net_device->chan_table[packet->q_idx];
884 struct vmbus_channel *out_channel = nvchan->channel;
885 struct net_device *ndev = hv_get_drvdata(device);
886 struct net_device_context *ndev_ctx = netdev_priv(ndev);
887 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
888 u64 req_id;
889 int ret;
890 u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
891
892 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
893 if (skb)
894 rpkt->channel_type = 0;
895 else
896 rpkt->channel_type = 1;
897
898 rpkt->send_buf_section_index = packet->send_buf_index;
899 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
900 rpkt->send_buf_section_size = 0;
901 else
902 rpkt->send_buf_section_size = packet->total_data_buflen;
903
904 req_id = (ulong)skb;
905
906 if (out_channel->rescind)
907 return -ENODEV;
908
909 trace_nvsp_send_pkt(ndev, out_channel, rpkt);
910
911 if (packet->page_buf_cnt) {
912 if (packet->cp_partial)
913 pb += packet->rmsg_pgcnt;
914
915 ret = vmbus_sendpacket_pagebuffer(out_channel,
916 pb, packet->page_buf_cnt,
917 &nvmsg, sizeof(nvmsg),
918 req_id);
919 } else {
920 ret = vmbus_sendpacket(out_channel,
921 &nvmsg, sizeof(nvmsg),
922 req_id, VM_PKT_DATA_INBAND,
923 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
924 }
925
926 if (ret == 0) {
927 atomic_inc_return(&nvchan->queue_sends);
928
929 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
930 netif_tx_stop_queue(txq);
931 ndev_ctx->eth_stats.stop_queue++;
932 }
933 } else if (ret == -EAGAIN) {
934 netif_tx_stop_queue(txq);
935 ndev_ctx->eth_stats.stop_queue++;
936 } else {
937 netdev_err(ndev,
938 "Unable to send packet pages %u len %u, ret %d\n",
939 packet->page_buf_cnt, packet->total_data_buflen,
940 ret);
941 }
942
943 if (netif_tx_queue_stopped(txq) &&
944 atomic_read(&nvchan->queue_sends) < 1 &&
945 !net_device->tx_disable) {
946 netif_tx_wake_queue(txq);
947 ndev_ctx->eth_stats.wake_queue++;
948 if (ret == -EAGAIN)
949 ret = -ENOSPC;
950 }
951
952 return ret;
953}
954
955
956static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
957 struct sk_buff **msd_skb,
958 struct multi_send_data *msdp)
959{
960 *msd_skb = msdp->skb;
961 *msd_send = msdp->pkt;
962 msdp->skb = NULL;
963 msdp->pkt = NULL;
964 msdp->count = 0;
965}
966
967
968int netvsc_send(struct net_device *ndev,
969 struct hv_netvsc_packet *packet,
970 struct rndis_message *rndis_msg,
971 struct hv_page_buffer *pb,
972 struct sk_buff *skb,
973 bool xdp_tx)
974{
975 struct net_device_context *ndev_ctx = netdev_priv(ndev);
976 struct netvsc_device *net_device
977 = rcu_dereference_bh(ndev_ctx->nvdev);
978 struct hv_device *device = ndev_ctx->device_ctx;
979 int ret = 0;
980 struct netvsc_channel *nvchan;
981 u32 pktlen = packet->total_data_buflen, msd_len = 0;
982 unsigned int section_index = NETVSC_INVALID_INDEX;
983 struct multi_send_data *msdp;
984 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
985 struct sk_buff *msd_skb = NULL;
986 bool try_batch, xmit_more;
987
988
989 if (unlikely(!net_device || net_device->destroy))
990 return -ENODEV;
991
992 nvchan = &net_device->chan_table[packet->q_idx];
993 packet->send_buf_index = NETVSC_INVALID_INDEX;
994 packet->cp_partial = false;
995
996
997
998
999
1000 if (!skb || xdp_tx)
1001 return netvsc_send_pkt(device, packet, net_device, pb, skb);
1002
1003
1004 msdp = &nvchan->msd;
1005 if (msdp->pkt)
1006 msd_len = msdp->pkt->total_data_buflen;
1007
1008 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
1009 if (try_batch && msd_len + pktlen + net_device->pkt_align <
1010 net_device->send_section_size) {
1011 section_index = msdp->pkt->send_buf_index;
1012
1013 } else if (try_batch && msd_len + packet->rmsg_size <
1014 net_device->send_section_size) {
1015 section_index = msdp->pkt->send_buf_index;
1016 packet->cp_partial = true;
1017
1018 } else if (pktlen + net_device->pkt_align <
1019 net_device->send_section_size) {
1020 section_index = netvsc_get_next_send_section(net_device);
1021 if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1022 ++ndev_ctx->eth_stats.tx_send_full;
1023 } else {
1024 move_pkt_msd(&msd_send, &msd_skb, msdp);
1025 msd_len = 0;
1026 }
1027 }
1028
1029
1030
1031
1032 xmit_more = netdev_xmit_more() &&
1033 !packet->cp_partial &&
1034 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1035
1036 if (section_index != NETVSC_INVALID_INDEX) {
1037 netvsc_copy_to_send_buf(net_device,
1038 section_index, msd_len,
1039 packet, rndis_msg, pb, xmit_more);
1040
1041 packet->send_buf_index = section_index;
1042
1043 if (packet->cp_partial) {
1044 packet->page_buf_cnt -= packet->rmsg_pgcnt;
1045 packet->total_data_buflen = msd_len + packet->rmsg_size;
1046 } else {
1047 packet->page_buf_cnt = 0;
1048 packet->total_data_buflen += msd_len;
1049 }
1050
1051 if (msdp->pkt) {
1052 packet->total_packets += msdp->pkt->total_packets;
1053 packet->total_bytes += msdp->pkt->total_bytes;
1054 }
1055
1056 if (msdp->skb)
1057 dev_consume_skb_any(msdp->skb);
1058
1059 if (xmit_more) {
1060 msdp->skb = skb;
1061 msdp->pkt = packet;
1062 msdp->count++;
1063 } else {
1064 cur_send = packet;
1065 msdp->skb = NULL;
1066 msdp->pkt = NULL;
1067 msdp->count = 0;
1068 }
1069 } else {
1070 move_pkt_msd(&msd_send, &msd_skb, msdp);
1071 cur_send = packet;
1072 }
1073
1074 if (msd_send) {
1075 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1076 NULL, msd_skb);
1077
1078 if (m_ret != 0) {
1079 netvsc_free_send_slot(net_device,
1080 msd_send->send_buf_index);
1081 dev_kfree_skb_any(msd_skb);
1082 }
1083 }
1084
1085 if (cur_send)
1086 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1087
1088 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1089 netvsc_free_send_slot(net_device, section_index);
1090
1091 return ret;
1092}
1093
1094
1095static int send_recv_completions(struct net_device *ndev,
1096 struct netvsc_device *nvdev,
1097 struct netvsc_channel *nvchan)
1098{
1099 struct multi_recv_comp *mrc = &nvchan->mrc;
1100 struct recv_comp_msg {
1101 struct nvsp_message_header hdr;
1102 u32 status;
1103 } __packed;
1104 struct recv_comp_msg msg = {
1105 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1106 };
1107 int ret;
1108
1109 while (mrc->first != mrc->next) {
1110 const struct recv_comp_data *rcd
1111 = mrc->slots + mrc->first;
1112
1113 msg.status = rcd->status;
1114 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1115 rcd->tid, VM_PKT_COMP, 0);
1116 if (unlikely(ret)) {
1117 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1118
1119 ++ndev_ctx->eth_stats.rx_comp_busy;
1120 return ret;
1121 }
1122
1123 if (++mrc->first == nvdev->recv_completion_cnt)
1124 mrc->first = 0;
1125 }
1126
1127
1128 if (unlikely(nvdev->destroy))
1129 wake_up(&nvdev->wait_drain);
1130
1131 return 0;
1132}
1133
1134
1135static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1136 const struct multi_recv_comp *mrc,
1137 u32 *filled, u32 *avail)
1138{
1139 u32 count = nvdev->recv_completion_cnt;
1140
1141 if (mrc->next >= mrc->first)
1142 *filled = mrc->next - mrc->first;
1143 else
1144 *filled = (count - mrc->first) + mrc->next;
1145
1146 *avail = count - *filled - 1;
1147}
1148
1149
1150static void enq_receive_complete(struct net_device *ndev,
1151 struct netvsc_device *nvdev, u16 q_idx,
1152 u64 tid, u32 status)
1153{
1154 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1155 struct multi_recv_comp *mrc = &nvchan->mrc;
1156 struct recv_comp_data *rcd;
1157 u32 filled, avail;
1158
1159 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1160
1161 if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1162 send_recv_completions(ndev, nvdev, nvchan);
1163 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1164 }
1165
1166 if (unlikely(!avail)) {
1167 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1168 q_idx, tid);
1169 return;
1170 }
1171
1172 rcd = mrc->slots + mrc->next;
1173 rcd->tid = tid;
1174 rcd->status = status;
1175
1176 if (++mrc->next == nvdev->recv_completion_cnt)
1177 mrc->next = 0;
1178}
1179
1180static int netvsc_receive(struct net_device *ndev,
1181 struct netvsc_device *net_device,
1182 struct netvsc_channel *nvchan,
1183 const struct vmpacket_descriptor *desc)
1184{
1185 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1186 struct vmbus_channel *channel = nvchan->channel;
1187 const struct vmtransfer_page_packet_header *vmxferpage_packet
1188 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1189 const struct nvsp_message *nvsp = hv_pkt_data(desc);
1190 u32 msglen = hv_pkt_datalen(desc);
1191 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1192 char *recv_buf = net_device->recv_buf;
1193 u32 status = NVSP_STAT_SUCCESS;
1194 int i;
1195 int count = 0;
1196
1197
1198 if (msglen < sizeof(struct nvsp_message_header)) {
1199 netif_err(net_device_ctx, rx_err, ndev,
1200 "invalid nvsp header, length too small: %u\n",
1201 msglen);
1202 return 0;
1203 }
1204
1205
1206 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1207 netif_err(net_device_ctx, rx_err, ndev,
1208 "Unknown nvsp packet type received %u\n",
1209 nvsp->hdr.msg_type);
1210 return 0;
1211 }
1212
1213
1214 if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1215 netif_err(net_device_ctx, rx_err, ndev,
1216 "Invalid xfer page pkt, offset too small: %u\n",
1217 desc->offset8 << 3);
1218 return 0;
1219 }
1220
1221 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1222 netif_err(net_device_ctx, rx_err, ndev,
1223 "Invalid xfer page set id - expecting %x got %x\n",
1224 NETVSC_RECEIVE_BUFFER_ID,
1225 vmxferpage_packet->xfer_pageset_id);
1226 return 0;
1227 }
1228
1229 count = vmxferpage_packet->range_cnt;
1230
1231
1232 if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1233 netif_err(net_device_ctx, rx_err, ndev,
1234 "Range count is not valid: %d\n",
1235 count);
1236 return 0;
1237 }
1238
1239
1240 for (i = 0; i < count; i++) {
1241 u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1242 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1243 void *data;
1244 int ret;
1245
1246 if (unlikely(offset > net_device->recv_buf_size ||
1247 buflen > net_device->recv_buf_size - offset)) {
1248 nvchan->rsc.cnt = 0;
1249 status = NVSP_STAT_FAIL;
1250 netif_err(net_device_ctx, rx_err, ndev,
1251 "Packet offset:%u + len:%u too big\n",
1252 offset, buflen);
1253
1254 continue;
1255 }
1256
1257 data = recv_buf + offset;
1258
1259 nvchan->rsc.is_last = (i == count - 1);
1260
1261 trace_rndis_recv(ndev, q_idx, data);
1262
1263
1264 ret = rndis_filter_receive(ndev, net_device,
1265 nvchan, data, buflen);
1266
1267 if (unlikely(ret != NVSP_STAT_SUCCESS))
1268 status = NVSP_STAT_FAIL;
1269 }
1270
1271 enq_receive_complete(ndev, net_device, q_idx,
1272 vmxferpage_packet->d.trans_id, status);
1273
1274 return count;
1275}
1276
1277static void netvsc_send_table(struct net_device *ndev,
1278 struct netvsc_device *nvscdev,
1279 const struct nvsp_message *nvmsg,
1280 u32 msglen)
1281{
1282 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1283 u32 count, offset, *tab;
1284 int i;
1285
1286
1287 if (msglen < sizeof(struct nvsp_message_header) +
1288 sizeof(struct nvsp_5_send_indirect_table)) {
1289 netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1290 return;
1291 }
1292
1293 count = nvmsg->msg.v5_msg.send_table.count;
1294 offset = nvmsg->msg.v5_msg.send_table.offset;
1295
1296 if (count != VRSS_SEND_TAB_SIZE) {
1297 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1298 return;
1299 }
1300
1301
1302
1303
1304 if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1305 msglen >= sizeof(struct nvsp_message_header) +
1306 sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1307 offset = sizeof(struct nvsp_message_header) +
1308 sizeof(union nvsp_6_message_uber);
1309
1310
1311 if (offset > msglen - count * sizeof(u32)) {
1312 netdev_err(ndev, "Received send-table offset too big:%u\n",
1313 offset);
1314 return;
1315 }
1316
1317 tab = (void *)nvmsg + offset;
1318
1319 for (i = 0; i < count; i++)
1320 net_device_ctx->tx_table[i] = tab[i];
1321}
1322
1323static void netvsc_send_vf(struct net_device *ndev,
1324 const struct nvsp_message *nvmsg,
1325 u32 msglen)
1326{
1327 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1328
1329
1330 if (msglen < sizeof(struct nvsp_message_header) +
1331 sizeof(struct nvsp_4_send_vf_association)) {
1332 netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1333 return;
1334 }
1335
1336 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1337 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1338 netdev_info(ndev, "VF slot %u %s\n",
1339 net_device_ctx->vf_serial,
1340 net_device_ctx->vf_alloc ? "added" : "removed");
1341}
1342
1343static void netvsc_receive_inband(struct net_device *ndev,
1344 struct netvsc_device *nvscdev,
1345 const struct vmpacket_descriptor *desc)
1346{
1347 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1348 u32 msglen = hv_pkt_datalen(desc);
1349
1350
1351 if (msglen < sizeof(struct nvsp_message_header)) {
1352 netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1353 return;
1354 }
1355
1356 switch (nvmsg->hdr.msg_type) {
1357 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1358 netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1359 break;
1360
1361 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1362 netvsc_send_vf(ndev, nvmsg, msglen);
1363 break;
1364 }
1365}
1366
1367static int netvsc_process_raw_pkt(struct hv_device *device,
1368 struct netvsc_channel *nvchan,
1369 struct netvsc_device *net_device,
1370 struct net_device *ndev,
1371 const struct vmpacket_descriptor *desc,
1372 int budget)
1373{
1374 struct vmbus_channel *channel = nvchan->channel;
1375 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1376
1377 trace_nvsp_recv(ndev, channel, nvmsg);
1378
1379 switch (desc->type) {
1380 case VM_PKT_COMP:
1381 netvsc_send_completion(ndev, net_device, channel, desc, budget);
1382 break;
1383
1384 case VM_PKT_DATA_USING_XFER_PAGES:
1385 return netvsc_receive(ndev, net_device, nvchan, desc);
1386 break;
1387
1388 case VM_PKT_DATA_INBAND:
1389 netvsc_receive_inband(ndev, net_device, desc);
1390 break;
1391
1392 default:
1393 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1394 desc->type, desc->trans_id);
1395 break;
1396 }
1397
1398 return 0;
1399}
1400
1401static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1402{
1403 struct vmbus_channel *primary = channel->primary_channel;
1404
1405 return primary ? primary->device_obj : channel->device_obj;
1406}
1407
1408
1409
1410
1411
1412int netvsc_poll(struct napi_struct *napi, int budget)
1413{
1414 struct netvsc_channel *nvchan
1415 = container_of(napi, struct netvsc_channel, napi);
1416 struct netvsc_device *net_device = nvchan->net_device;
1417 struct vmbus_channel *channel = nvchan->channel;
1418 struct hv_device *device = netvsc_channel_to_device(channel);
1419 struct net_device *ndev = hv_get_drvdata(device);
1420 int work_done = 0;
1421 int ret;
1422
1423
1424 if (!nvchan->desc)
1425 nvchan->desc = hv_pkt_iter_first(channel);
1426
1427 while (nvchan->desc && work_done < budget) {
1428 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1429 ndev, nvchan->desc, budget);
1430 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1431 }
1432
1433
1434 ret = send_recv_completions(ndev, net_device, nvchan);
1435
1436
1437
1438
1439
1440
1441
1442 if (work_done < budget &&
1443 napi_complete_done(napi, work_done) &&
1444 (ret || hv_end_read(&channel->inbound)) &&
1445 napi_schedule_prep(napi)) {
1446 hv_begin_read(&channel->inbound);
1447 __napi_schedule(napi);
1448 }
1449
1450
1451 return min(work_done, budget);
1452}
1453
1454
1455
1456
1457void netvsc_channel_cb(void *context)
1458{
1459 struct netvsc_channel *nvchan = context;
1460 struct vmbus_channel *channel = nvchan->channel;
1461 struct hv_ring_buffer_info *rbi = &channel->inbound;
1462
1463
1464 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1465
1466 if (napi_schedule_prep(&nvchan->napi)) {
1467
1468 hv_begin_read(rbi);
1469
1470 __napi_schedule_irqoff(&nvchan->napi);
1471 }
1472}
1473
1474
1475
1476
1477
1478struct netvsc_device *netvsc_device_add(struct hv_device *device,
1479 const struct netvsc_device_info *device_info)
1480{
1481 int i, ret = 0;
1482 struct netvsc_device *net_device;
1483 struct net_device *ndev = hv_get_drvdata(device);
1484 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1485
1486 net_device = alloc_net_device();
1487 if (!net_device)
1488 return ERR_PTR(-ENOMEM);
1489
1490 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1491 net_device_ctx->tx_table[i] = 0;
1492
1493
1494
1495
1496 set_channel_read_mode(device->channel, HV_CALL_ISR);
1497
1498
1499
1500
1501
1502
1503
1504
1505 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1506 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1507
1508 nvchan->channel = device->channel;
1509 nvchan->net_device = net_device;
1510 u64_stats_init(&nvchan->tx_stats.syncp);
1511 u64_stats_init(&nvchan->rx_stats.syncp);
1512
1513 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i);
1514
1515 if (ret) {
1516 netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1517 goto cleanup2;
1518 }
1519
1520 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1521 MEM_TYPE_PAGE_SHARED, NULL);
1522
1523 if (ret) {
1524 netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1525 goto cleanup2;
1526 }
1527 }
1528
1529
1530 netif_napi_add(ndev, &net_device->chan_table[0].napi,
1531 netvsc_poll, NAPI_POLL_WEIGHT);
1532
1533
1534 ret = vmbus_open(device->channel, netvsc_ring_bytes,
1535 netvsc_ring_bytes, NULL, 0,
1536 netvsc_channel_cb, net_device->chan_table);
1537
1538 if (ret != 0) {
1539 netdev_err(ndev, "unable to open channel: %d\n", ret);
1540 goto cleanup;
1541 }
1542
1543
1544 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1545
1546 napi_enable(&net_device->chan_table[0].napi);
1547
1548
1549 ret = netvsc_connect_vsp(device, net_device, device_info);
1550 if (ret != 0) {
1551 netdev_err(ndev,
1552 "unable to connect to NetVSP - %d\n", ret);
1553 goto close;
1554 }
1555
1556
1557
1558
1559 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1560
1561 return net_device;
1562
1563close:
1564 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1565 napi_disable(&net_device->chan_table[0].napi);
1566
1567
1568 vmbus_close(device->channel);
1569
1570cleanup:
1571 netif_napi_del(&net_device->chan_table[0].napi);
1572
1573cleanup2:
1574 free_netvsc_device(&net_device->rcu);
1575
1576 return ERR_PTR(ret);
1577}
1578