1
2
3
4#include <linux/inetdevice.h>
5#include <linux/etherdevice.h>
6#include <linux/ethtool.h>
7#include <linux/mm.h>
8
9#include <net/checksum.h>
10#include <net/ip6_checksum.h>
11
12#include "mana.h"
13
14
15
16static int mana_open(struct net_device *ndev)
17{
18 struct mana_port_context *apc = netdev_priv(ndev);
19 int err;
20
21 err = mana_alloc_queues(ndev);
22 if (err)
23 return err;
24
25 apc->port_is_up = true;
26
27
28 smp_wmb();
29
30 netif_carrier_on(ndev);
31 netif_tx_wake_all_queues(ndev);
32
33 return 0;
34}
35
36static int mana_close(struct net_device *ndev)
37{
38 struct mana_port_context *apc = netdev_priv(ndev);
39
40 if (!apc->port_is_up)
41 return 0;
42
43 return mana_detach(ndev, true);
44}
45
46static bool mana_can_tx(struct gdma_queue *wq)
47{
48 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
49}
50
51static unsigned int mana_checksum_info(struct sk_buff *skb)
52{
53 if (skb->protocol == htons(ETH_P_IP)) {
54 struct iphdr *ip = ip_hdr(skb);
55
56 if (ip->protocol == IPPROTO_TCP)
57 return IPPROTO_TCP;
58
59 if (ip->protocol == IPPROTO_UDP)
60 return IPPROTO_UDP;
61 } else if (skb->protocol == htons(ETH_P_IPV6)) {
62 struct ipv6hdr *ip6 = ipv6_hdr(skb);
63
64 if (ip6->nexthdr == IPPROTO_TCP)
65 return IPPROTO_TCP;
66
67 if (ip6->nexthdr == IPPROTO_UDP)
68 return IPPROTO_UDP;
69 }
70
71
72 return 0;
73}
74
75static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
76 struct mana_tx_package *tp)
77{
78 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
79 struct gdma_dev *gd = apc->ac->gdma_dev;
80 struct gdma_context *gc;
81 struct device *dev;
82 skb_frag_t *frag;
83 dma_addr_t da;
84 int i;
85
86 gc = gd->gdma_context;
87 dev = gc->dev;
88 da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
89
90 if (dma_mapping_error(dev, da))
91 return -ENOMEM;
92
93 ash->dma_handle[0] = da;
94 ash->size[0] = skb_headlen(skb);
95
96 tp->wqe_req.sgl[0].address = ash->dma_handle[0];
97 tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
98 tp->wqe_req.sgl[0].size = ash->size[0];
99
100 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
101 frag = &skb_shinfo(skb)->frags[i];
102 da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
103 DMA_TO_DEVICE);
104
105 if (dma_mapping_error(dev, da))
106 goto frag_err;
107
108 ash->dma_handle[i + 1] = da;
109 ash->size[i + 1] = skb_frag_size(frag);
110
111 tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
112 tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
113 tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
114 }
115
116 return 0;
117
118frag_err:
119 for (i = i - 1; i >= 0; i--)
120 dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
121 DMA_TO_DEVICE);
122
123 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
124
125 return -ENOMEM;
126}
127
128static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
129{
130 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
131 struct mana_port_context *apc = netdev_priv(ndev);
132 u16 txq_idx = skb_get_queue_mapping(skb);
133 struct gdma_dev *gd = apc->ac->gdma_dev;
134 bool ipv4 = false, ipv6 = false;
135 struct mana_tx_package pkg = {};
136 struct netdev_queue *net_txq;
137 struct mana_stats *tx_stats;
138 struct gdma_queue *gdma_sq;
139 unsigned int csum_type;
140 struct mana_txq *txq;
141 struct mana_cq *cq;
142 int err, len;
143
144 if (unlikely(!apc->port_is_up))
145 goto tx_drop;
146
147 if (skb_cow_head(skb, MANA_HEADROOM))
148 goto tx_drop_count;
149
150 txq = &apc->tx_qp[txq_idx].txq;
151 gdma_sq = txq->gdma_sq;
152 cq = &apc->tx_qp[txq_idx].tx_cq;
153
154 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
155 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
156
157 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
158 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
159 pkt_fmt = MANA_LONG_PKT_FMT;
160 } else {
161 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
162 }
163
164 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
165
166 if (pkt_fmt == MANA_SHORT_PKT_FMT)
167 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
168 else
169 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
170
171 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
172 pkg.wqe_req.flags = 0;
173 pkg.wqe_req.client_data_unit = 0;
174
175 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
176 WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
177
178 if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
179 pkg.wqe_req.sgl = pkg.sgl_array;
180 } else {
181 pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
182 sizeof(struct gdma_sge),
183 GFP_ATOMIC);
184 if (!pkg.sgl_ptr)
185 goto tx_drop_count;
186
187 pkg.wqe_req.sgl = pkg.sgl_ptr;
188 }
189
190 if (skb->protocol == htons(ETH_P_IP))
191 ipv4 = true;
192 else if (skb->protocol == htons(ETH_P_IPV6))
193 ipv6 = true;
194
195 if (skb_is_gso(skb)) {
196 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
197 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
198
199 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
200 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
201 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
202
203 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
204 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
205 if (ipv4) {
206 ip_hdr(skb)->tot_len = 0;
207 ip_hdr(skb)->check = 0;
208 tcp_hdr(skb)->check =
209 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
210 ip_hdr(skb)->daddr, 0,
211 IPPROTO_TCP, 0);
212 } else {
213 ipv6_hdr(skb)->payload_len = 0;
214 tcp_hdr(skb)->check =
215 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
216 &ipv6_hdr(skb)->daddr, 0,
217 IPPROTO_TCP, 0);
218 }
219 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
220 csum_type = mana_checksum_info(skb);
221
222 if (csum_type == IPPROTO_TCP) {
223 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
224 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
225
226 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
227 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
228
229 } else if (csum_type == IPPROTO_UDP) {
230 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
231 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
232
233 pkg.tx_oob.s_oob.comp_udp_csum = 1;
234 } else {
235
236 if (skb_checksum_help(skb))
237 goto free_sgl_ptr;
238 }
239 }
240
241 if (mana_map_skb(skb, apc, &pkg))
242 goto free_sgl_ptr;
243
244 skb_queue_tail(&txq->pending_skbs, skb);
245
246 len = skb->len;
247 net_txq = netdev_get_tx_queue(ndev, txq_idx);
248
249 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
250 (struct gdma_posted_wqe_info *)skb->cb);
251 if (!mana_can_tx(gdma_sq)) {
252 netif_tx_stop_queue(net_txq);
253 apc->eth_stats.stop_queue++;
254 }
255
256 if (err) {
257 (void)skb_dequeue_tail(&txq->pending_skbs);
258 netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
259 err = NETDEV_TX_BUSY;
260 goto tx_busy;
261 }
262
263 err = NETDEV_TX_OK;
264 atomic_inc(&txq->pending_sends);
265
266 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
267
268
269 skb = NULL;
270
271 tx_stats = &txq->stats;
272 u64_stats_update_begin(&tx_stats->syncp);
273 tx_stats->packets++;
274 tx_stats->bytes += len;
275 u64_stats_update_end(&tx_stats->syncp);
276
277tx_busy:
278 if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
279 netif_tx_wake_queue(net_txq);
280 apc->eth_stats.wake_queue++;
281 }
282
283 kfree(pkg.sgl_ptr);
284 return err;
285
286free_sgl_ptr:
287 kfree(pkg.sgl_ptr);
288tx_drop_count:
289 ndev->stats.tx_dropped++;
290tx_drop:
291 dev_kfree_skb_any(skb);
292 return NETDEV_TX_OK;
293}
294
295static void mana_get_stats64(struct net_device *ndev,
296 struct rtnl_link_stats64 *st)
297{
298 struct mana_port_context *apc = netdev_priv(ndev);
299 unsigned int num_queues = apc->num_queues;
300 struct mana_stats *stats;
301 unsigned int start;
302 u64 packets, bytes;
303 int q;
304
305 if (!apc->port_is_up)
306 return;
307
308 netdev_stats_to_stats64(st, &ndev->stats);
309
310 for (q = 0; q < num_queues; q++) {
311 stats = &apc->rxqs[q]->stats;
312
313 do {
314 start = u64_stats_fetch_begin_irq(&stats->syncp);
315 packets = stats->packets;
316 bytes = stats->bytes;
317 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
318
319 st->rx_packets += packets;
320 st->rx_bytes += bytes;
321 }
322
323 for (q = 0; q < num_queues; q++) {
324 stats = &apc->tx_qp[q].txq.stats;
325
326 do {
327 start = u64_stats_fetch_begin_irq(&stats->syncp);
328 packets = stats->packets;
329 bytes = stats->bytes;
330 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
331
332 st->tx_packets += packets;
333 st->tx_bytes += bytes;
334 }
335}
336
337static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
338 int old_q)
339{
340 struct mana_port_context *apc = netdev_priv(ndev);
341 u32 hash = skb_get_hash(skb);
342 struct sock *sk = skb->sk;
343 int txq;
344
345 txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
346
347 if (txq != old_q && sk && sk_fullsock(sk) &&
348 rcu_access_pointer(sk->sk_dst_cache))
349 sk_tx_queue_set(sk, txq);
350
351 return txq;
352}
353
354static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
355 struct net_device *sb_dev, select_queue_fallback_t fallback)
356{
357 int txq;
358
359 if (ndev->real_num_tx_queues == 1)
360 return 0;
361
362 txq = sk_tx_queue_get(skb->sk);
363
364 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
365 if (skb_rx_queue_recorded(skb))
366 txq = skb_get_rx_queue(skb);
367 else
368 txq = mana_get_tx_queue(ndev, skb, txq);
369 }
370
371 return txq;
372}
373
374static const struct net_device_ops mana_devops = {
375 .ndo_open = mana_open,
376 .ndo_stop = mana_close,
377 .ndo_select_queue = mana_select_queue,
378 .ndo_start_xmit = mana_start_xmit,
379 .ndo_validate_addr = eth_validate_addr,
380 .ndo_get_stats64 = mana_get_stats64,
381};
382
383static void mana_cleanup_port_context(struct mana_port_context *apc)
384{
385 kfree(apc->rxqs);
386 apc->rxqs = NULL;
387}
388
389static int mana_init_port_context(struct mana_port_context *apc)
390{
391 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
392 GFP_KERNEL);
393
394 return !apc->rxqs ? -ENOMEM : 0;
395}
396
397static int mana_send_request(struct mana_context *ac, void *in_buf,
398 u32 in_len, void *out_buf, u32 out_len)
399{
400 struct gdma_context *gc = ac->gdma_dev->gdma_context;
401 struct gdma_resp_hdr *resp = out_buf;
402 struct gdma_req_hdr *req = in_buf;
403 struct device *dev = gc->dev;
404 static atomic_t activity_id;
405 int err;
406
407 req->dev_id = gc->mana.dev_id;
408 req->activity_id = atomic_inc_return(&activity_id);
409
410 err = mana_gd_send_request(gc, in_len, in_buf, out_len,
411 out_buf);
412 if (err || resp->status) {
413 dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
414 err, resp->status);
415 return err ? err : -EPROTO;
416 }
417
418 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
419 req->activity_id != resp->activity_id) {
420 dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
421 req->dev_id.as_uint32, resp->dev_id.as_uint32,
422 req->activity_id, resp->activity_id);
423 return -EPROTO;
424 }
425
426 return 0;
427}
428
429static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
430 const enum mana_command_code expected_code,
431 const u32 min_size)
432{
433 if (resp_hdr->response.msg_type != expected_code)
434 return -EPROTO;
435
436 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
437 return -EPROTO;
438
439 if (resp_hdr->response.msg_size < min_size)
440 return -EPROTO;
441
442 return 0;
443}
444
445static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
446 u32 proto_minor_ver, u32 proto_micro_ver,
447 u16 *max_num_vports)
448{
449 struct gdma_context *gc = ac->gdma_dev->gdma_context;
450 struct mana_query_device_cfg_resp resp = {};
451 struct mana_query_device_cfg_req req = {};
452 struct device *dev = gc->dev;
453 int err = 0;
454
455 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
456 sizeof(req), sizeof(resp));
457 req.proto_major_ver = proto_major_ver;
458 req.proto_minor_ver = proto_minor_ver;
459 req.proto_micro_ver = proto_micro_ver;
460
461 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
462 if (err) {
463 dev_err(dev, "Failed to query config: %d", err);
464 return err;
465 }
466
467 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
468 sizeof(resp));
469 if (err || resp.hdr.status) {
470 dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
471 resp.hdr.status);
472 if (!err)
473 err = -EPROTO;
474 return err;
475 }
476
477 *max_num_vports = resp.max_num_vports;
478
479 return 0;
480}
481
482static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
483 u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
484{
485 struct mana_query_vport_cfg_resp resp = {};
486 struct mana_query_vport_cfg_req req = {};
487 int err;
488
489 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
490 sizeof(req), sizeof(resp));
491
492 req.vport_index = vport_index;
493
494 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
495 sizeof(resp));
496 if (err)
497 return err;
498
499 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
500 sizeof(resp));
501 if (err)
502 return err;
503
504 if (resp.hdr.status)
505 return -EPROTO;
506
507 *max_sq = resp.max_num_sq;
508 *max_rq = resp.max_num_rq;
509 *num_indir_entry = resp.num_indirection_ent;
510
511 apc->port_handle = resp.vport;
512 ether_addr_copy(apc->mac_addr, resp.mac_addr);
513
514 return 0;
515}
516
517static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
518 u32 doorbell_pg_id)
519{
520 struct mana_config_vport_resp resp = {};
521 struct mana_config_vport_req req = {};
522 int err;
523
524 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
525 sizeof(req), sizeof(resp));
526 req.vport = apc->port_handle;
527 req.pdid = protection_dom_id;
528 req.doorbell_pageid = doorbell_pg_id;
529
530 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
531 sizeof(resp));
532 if (err) {
533 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
534 goto out;
535 }
536
537 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
538 sizeof(resp));
539 if (err || resp.hdr.status) {
540 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
541 err, resp.hdr.status);
542 if (!err)
543 err = -EPROTO;
544
545 goto out;
546 }
547
548 apc->tx_shortform_allowed = resp.short_form_allowed;
549 apc->tx_vp_offset = resp.tx_vport_offset;
550out:
551 return err;
552}
553
554static int mana_cfg_vport_steering(struct mana_port_context *apc,
555 enum TRI_STATE rx,
556 bool update_default_rxobj, bool update_key,
557 bool update_tab)
558{
559 u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
560 struct mana_cfg_rx_steer_req *req = NULL;
561 struct mana_cfg_rx_steer_resp resp = {};
562 struct net_device *ndev = apc->ndev;
563 mana_handle_t *req_indir_tab;
564 u32 req_buf_size;
565 int err;
566
567 req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
568 req = kzalloc(req_buf_size, GFP_KERNEL);
569 if (!req)
570 return -ENOMEM;
571
572 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
573 sizeof(resp));
574
575 req->vport = apc->port_handle;
576 req->num_indir_entries = num_entries;
577 req->indir_tab_offset = sizeof(*req);
578 req->rx_enable = rx;
579 req->rss_enable = apc->rss_state;
580 req->update_default_rxobj = update_default_rxobj;
581 req->update_hashkey = update_key;
582 req->update_indir_tab = update_tab;
583 req->default_rxobj = apc->default_rxobj;
584
585 if (update_key)
586 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
587
588 if (update_tab) {
589 req_indir_tab = (mana_handle_t *)(req + 1);
590 memcpy(req_indir_tab, apc->rxobj_table,
591 req->num_indir_entries * sizeof(mana_handle_t));
592 }
593
594 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
595 sizeof(resp));
596 if (err) {
597 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
598 goto out;
599 }
600
601 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
602 sizeof(resp));
603 if (err) {
604 netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
605 goto out;
606 }
607
608 if (resp.hdr.status) {
609 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
610 resp.hdr.status);
611 err = -EPROTO;
612 }
613out:
614 kfree(req);
615 return err;
616}
617
618static int mana_create_wq_obj(struct mana_port_context *apc,
619 mana_handle_t vport,
620 u32 wq_type, struct mana_obj_spec *wq_spec,
621 struct mana_obj_spec *cq_spec,
622 mana_handle_t *wq_obj)
623{
624 struct mana_create_wqobj_resp resp = {};
625 struct mana_create_wqobj_req req = {};
626 struct net_device *ndev = apc->ndev;
627 int err;
628
629 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
630 sizeof(req), sizeof(resp));
631 req.vport = vport;
632 req.wq_type = wq_type;
633 req.wq_gdma_region = wq_spec->gdma_region;
634 req.cq_gdma_region = cq_spec->gdma_region;
635 req.wq_size = wq_spec->queue_size;
636 req.cq_size = cq_spec->queue_size;
637 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
638 req.cq_parent_qid = cq_spec->attached_eq;
639
640 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
641 sizeof(resp));
642 if (err) {
643 netdev_err(ndev, "Failed to create WQ object: %d\n", err);
644 goto out;
645 }
646
647 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
648 sizeof(resp));
649 if (err || resp.hdr.status) {
650 netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
651 resp.hdr.status);
652 if (!err)
653 err = -EPROTO;
654 goto out;
655 }
656
657 if (resp.wq_obj == INVALID_MANA_HANDLE) {
658 netdev_err(ndev, "Got an invalid WQ object handle\n");
659 err = -EPROTO;
660 goto out;
661 }
662
663 *wq_obj = resp.wq_obj;
664 wq_spec->queue_index = resp.wq_id;
665 cq_spec->queue_index = resp.cq_id;
666
667 return 0;
668out:
669 return err;
670}
671
672static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
673 mana_handle_t wq_obj)
674{
675 struct mana_destroy_wqobj_resp resp = {};
676 struct mana_destroy_wqobj_req req = {};
677 struct net_device *ndev = apc->ndev;
678 int err;
679
680 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
681 sizeof(req), sizeof(resp));
682 req.wq_type = wq_type;
683 req.wq_obj_handle = wq_obj;
684
685 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
686 sizeof(resp));
687 if (err) {
688 netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
689 return;
690 }
691
692 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
693 sizeof(resp));
694 if (err || resp.hdr.status)
695 netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
696 resp.hdr.status);
697}
698
699static void mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf)
700{
701 int i;
702
703 for (i = 0; i < CQE_POLLING_BUFFER; i++)
704 memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp));
705}
706
707static void mana_destroy_eq(struct gdma_context *gc,
708 struct mana_port_context *apc)
709{
710 struct gdma_queue *eq;
711 int i;
712
713 if (!apc->eqs)
714 return;
715
716 for (i = 0; i < apc->num_queues; i++) {
717 eq = apc->eqs[i].eq;
718 if (!eq)
719 continue;
720
721 mana_gd_destroy_queue(gc, eq);
722 }
723
724 kfree(apc->eqs);
725 apc->eqs = NULL;
726}
727
728static int mana_create_eq(struct mana_port_context *apc)
729{
730 struct gdma_dev *gd = apc->ac->gdma_dev;
731 struct gdma_queue_spec spec = {};
732 int err;
733 int i;
734
735 apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq),
736 GFP_KERNEL);
737 if (!apc->eqs)
738 return -ENOMEM;
739
740 spec.type = GDMA_EQ;
741 spec.monitor_avl_buf = false;
742 spec.queue_size = EQ_SIZE;
743 spec.eq.callback = NULL;
744 spec.eq.context = apc->eqs;
745 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
746 spec.eq.ndev = apc->ndev;
747
748 for (i = 0; i < apc->num_queues; i++) {
749 mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll);
750
751 err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq);
752 if (err)
753 goto out;
754 }
755
756 return 0;
757out:
758 mana_destroy_eq(gd->gdma_context, apc);
759 return err;
760}
761
762static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
763{
764 u32 used_space_old;
765 u32 used_space_new;
766
767 used_space_old = wq->head - wq->tail;
768 used_space_new = wq->head - (wq->tail + num_units);
769
770 if (WARN_ON_ONCE(used_space_new > used_space_old))
771 return -ERANGE;
772
773 wq->tail += num_units;
774 return 0;
775}
776
777static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
778{
779 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
780 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
781 struct device *dev = gc->dev;
782 int i;
783
784 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
785
786 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
787 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
788 DMA_TO_DEVICE);
789}
790
791static void mana_poll_tx_cq(struct mana_cq *cq)
792{
793 struct gdma_queue *gdma_eq = cq->gdma_cq->cq.parent;
794 struct gdma_comp *completions = cq->gdma_comp_buf;
795 struct gdma_posted_wqe_info *wqe_info;
796 unsigned int pkt_transmitted = 0;
797 unsigned int wqe_unit_cnt = 0;
798 struct mana_txq *txq = cq->txq;
799 struct mana_port_context *apc;
800 struct netdev_queue *net_txq;
801 struct gdma_queue *gdma_wq;
802 unsigned int avail_space;
803 struct net_device *ndev;
804 struct sk_buff *skb;
805 bool txq_stopped;
806 int comp_read;
807 int i;
808
809 ndev = txq->ndev;
810 apc = netdev_priv(ndev);
811
812 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
813 CQE_POLLING_BUFFER);
814
815 for (i = 0; i < comp_read; i++) {
816 struct mana_tx_comp_oob *cqe_oob;
817
818 if (WARN_ON_ONCE(!completions[i].is_sq))
819 return;
820
821 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
822 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
823 MANA_CQE_COMPLETION))
824 return;
825
826 switch (cqe_oob->cqe_hdr.cqe_type) {
827 case CQE_TX_OKAY:
828 break;
829
830 case CQE_TX_SA_DROP:
831 case CQE_TX_MTU_DROP:
832 case CQE_TX_INVALID_OOB:
833 case CQE_TX_INVALID_ETH_TYPE:
834 case CQE_TX_HDR_PROCESSING_ERROR:
835 case CQE_TX_VF_DISABLED:
836 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
837 case CQE_TX_VPORT_DISABLED:
838 case CQE_TX_VLAN_TAGGING_VIOLATION:
839 WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
840 cqe_oob->cqe_hdr.cqe_type);
841 break;
842
843 default:
844
845
846
847 WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
848 cqe_oob->cqe_hdr.cqe_type);
849 return;
850 }
851
852 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
853 return;
854
855 skb = skb_dequeue(&txq->pending_skbs);
856 if (WARN_ON_ONCE(!skb))
857 return;
858
859 wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
860 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
861
862 mana_unmap_skb(skb, apc);
863
864 napi_consume_skb(skb, gdma_eq->eq.budget);
865
866 pkt_transmitted++;
867 }
868
869 if (WARN_ON_ONCE(wqe_unit_cnt == 0))
870 return;
871
872 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
873
874 gdma_wq = txq->gdma_sq;
875 avail_space = mana_gd_wq_avail_space(gdma_wq);
876
877
878 smp_mb();
879
880 net_txq = txq->net_txq;
881 txq_stopped = netif_tx_queue_stopped(net_txq);
882
883
884 smp_rmb();
885
886 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
887 netif_tx_wake_queue(net_txq);
888 apc->eth_stats.wake_queue++;
889 }
890
891 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
892 WARN_ON_ONCE(1);
893}
894
895static void mana_post_pkt_rxq(struct mana_rxq *rxq)
896{
897 struct mana_recv_buf_oob *recv_buf_oob;
898 u32 curr_index;
899 int err;
900
901 curr_index = rxq->buf_index++;
902 if (rxq->buf_index == rxq->num_rx_buf)
903 rxq->buf_index = 0;
904
905 recv_buf_oob = &rxq->rx_oobs[curr_index];
906
907 err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
908 &recv_buf_oob->wqe_inf);
909 if (WARN_ON_ONCE(err))
910 return;
911
912 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
913}
914
915static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
916 struct mana_rxq *rxq)
917{
918 struct mana_stats *rx_stats = &rxq->stats;
919 struct net_device *ndev = rxq->ndev;
920 uint pkt_len = cqe->ppi[0].pkt_len;
921 struct mana_port_context *apc;
922 u16 rxq_idx = rxq->rxq_idx;
923 struct napi_struct *napi;
924 struct gdma_queue *eq;
925 struct sk_buff *skb;
926 u32 hash_value;
927
928 apc = netdev_priv(ndev);
929 eq = apc->eqs[rxq_idx].eq;
930 eq->eq.work_done++;
931 napi = &eq->eq.napi;
932
933 if (!buf_va) {
934 ++ndev->stats.rx_dropped;
935 return;
936 }
937
938 skb = build_skb(buf_va, PAGE_SIZE);
939
940 if (!skb) {
941 free_page((unsigned long)buf_va);
942 ++ndev->stats.rx_dropped;
943 return;
944 }
945
946 skb_put(skb, pkt_len);
947 skb->dev = napi->dev;
948
949 skb->protocol = eth_type_trans(skb, ndev);
950 skb_checksum_none_assert(skb);
951 skb_record_rx_queue(skb, rxq_idx);
952
953 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
954 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
955 skb->ip_summed = CHECKSUM_UNNECESSARY;
956 }
957
958 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
959 hash_value = cqe->ppi[0].pkt_hash;
960
961 if (cqe->rx_hashtype & MANA_HASH_L4)
962 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
963 else
964 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
965 }
966
967 napi_gro_receive(napi, skb);
968
969 u64_stats_update_begin(&rx_stats->syncp);
970 rx_stats->packets++;
971 rx_stats->bytes += pkt_len;
972 u64_stats_update_end(&rx_stats->syncp);
973}
974
975static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
976 struct gdma_comp *cqe)
977{
978 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
979 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
980 struct net_device *ndev = rxq->ndev;
981 struct mana_recv_buf_oob *rxbuf_oob;
982 struct device *dev = gc->dev;
983 void *new_buf, *old_buf;
984 struct page *new_page;
985 u32 curr, pktlen;
986 dma_addr_t da;
987
988 switch (oob->cqe_hdr.cqe_type) {
989 case CQE_RX_OKAY:
990 break;
991
992 case CQE_RX_TRUNCATED:
993 netdev_err(ndev, "Dropped a truncated packet\n");
994 return;
995
996 case CQE_RX_COALESCED_4:
997 netdev_err(ndev, "RX coalescing is unsupported\n");
998 return;
999
1000 case CQE_RX_OBJECT_FENCE:
1001 netdev_err(ndev, "RX Fencing is unsupported\n");
1002 return;
1003
1004 default:
1005 netdev_err(ndev, "Unknown RX CQE type = %d\n",
1006 oob->cqe_hdr.cqe_type);
1007 return;
1008 }
1009
1010 if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1011 return;
1012
1013 pktlen = oob->ppi[0].pkt_len;
1014
1015 if (pktlen == 0) {
1016
1017 netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1018 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1019 return;
1020 }
1021
1022 curr = rxq->buf_index;
1023 rxbuf_oob = &rxq->rx_oobs[curr];
1024 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1025
1026 new_page = alloc_page(GFP_ATOMIC);
1027
1028 if (new_page) {
1029 da = dma_map_page(dev, new_page, 0, rxq->datasize,
1030 DMA_FROM_DEVICE);
1031
1032 if (dma_mapping_error(dev, da)) {
1033 __free_page(new_page);
1034 new_page = NULL;
1035 }
1036 }
1037
1038 new_buf = new_page ? page_to_virt(new_page) : NULL;
1039
1040 if (new_buf) {
1041 dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1042 DMA_FROM_DEVICE);
1043
1044 old_buf = rxbuf_oob->buf_va;
1045
1046
1047 rxbuf_oob->buf_va = new_buf;
1048 rxbuf_oob->buf_dma_addr = da;
1049 rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1050 } else {
1051 old_buf = NULL;
1052 }
1053
1054 mana_rx_skb(old_buf, oob, rxq);
1055
1056 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1057
1058 mana_post_pkt_rxq(rxq);
1059}
1060
1061static void mana_poll_rx_cq(struct mana_cq *cq)
1062{
1063 struct gdma_comp *comp = cq->gdma_comp_buf;
1064 int comp_read, i;
1065
1066 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1067 WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1068
1069 for (i = 0; i < comp_read; i++) {
1070 if (WARN_ON_ONCE(comp[i].is_sq))
1071 return;
1072
1073
1074 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1075 return;
1076
1077 mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1078 }
1079}
1080
1081static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1082{
1083 struct mana_cq *cq = context;
1084
1085 WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1086
1087 if (cq->type == MANA_CQ_TYPE_RX)
1088 mana_poll_rx_cq(cq);
1089 else
1090 mana_poll_tx_cq(cq);
1091
1092 mana_gd_arm_cq(gdma_queue);
1093}
1094
1095static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1096{
1097 struct gdma_dev *gd = apc->ac->gdma_dev;
1098
1099 if (!cq->gdma_cq)
1100 return;
1101
1102 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1103}
1104
1105static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1106{
1107 struct gdma_dev *gd = apc->ac->gdma_dev;
1108
1109 if (!txq->gdma_sq)
1110 return;
1111
1112 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1113}
1114
1115static void mana_destroy_txq(struct mana_port_context *apc)
1116{
1117 int i;
1118
1119 if (!apc->tx_qp)
1120 return;
1121
1122 for (i = 0; i < apc->num_queues; i++) {
1123 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1124
1125 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1126
1127 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1128 }
1129
1130 kfree(apc->tx_qp);
1131 apc->tx_qp = NULL;
1132}
1133
1134static int mana_create_txq(struct mana_port_context *apc,
1135 struct net_device *net)
1136{
1137 struct gdma_dev *gd = apc->ac->gdma_dev;
1138 struct mana_obj_spec wq_spec;
1139 struct mana_obj_spec cq_spec;
1140 struct gdma_queue_spec spec;
1141 struct gdma_context *gc;
1142 struct mana_txq *txq;
1143 struct mana_cq *cq;
1144 u32 txq_size;
1145 u32 cq_size;
1146 int err;
1147 int i;
1148
1149 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1150 GFP_KERNEL);
1151 if (!apc->tx_qp)
1152 return -ENOMEM;
1153
1154
1155
1156
1157
1158
1159 txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1160 BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1161
1162 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1163 cq_size = PAGE_ALIGN(cq_size);
1164
1165 gc = gd->gdma_context;
1166
1167 for (i = 0; i < apc->num_queues; i++) {
1168 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1169
1170
1171 txq = &apc->tx_qp[i].txq;
1172
1173 u64_stats_init(&txq->stats.syncp);
1174 txq->ndev = net;
1175 txq->net_txq = netdev_get_tx_queue(net, i);
1176 txq->vp_offset = apc->tx_vp_offset;
1177 skb_queue_head_init(&txq->pending_skbs);
1178
1179 memset(&spec, 0, sizeof(spec));
1180 spec.type = GDMA_SQ;
1181 spec.monitor_avl_buf = true;
1182 spec.queue_size = txq_size;
1183 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1184 if (err)
1185 goto out;
1186
1187
1188 cq = &apc->tx_qp[i].tx_cq;
1189 cq->gdma_comp_buf = apc->eqs[i].cqe_poll;
1190 cq->type = MANA_CQ_TYPE_TX;
1191
1192 cq->txq = txq;
1193
1194 memset(&spec, 0, sizeof(spec));
1195 spec.type = GDMA_CQ;
1196 spec.monitor_avl_buf = false;
1197 spec.queue_size = cq_size;
1198 spec.cq.callback = mana_cq_handler;
1199 spec.cq.parent_eq = apc->eqs[i].eq;
1200 spec.cq.context = cq;
1201 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1202 if (err)
1203 goto out;
1204
1205 memset(&wq_spec, 0, sizeof(wq_spec));
1206 memset(&cq_spec, 0, sizeof(cq_spec));
1207
1208 wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1209 wq_spec.queue_size = txq->gdma_sq->queue_size;
1210
1211 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1212 cq_spec.queue_size = cq->gdma_cq->queue_size;
1213 cq_spec.modr_ctx_id = 0;
1214 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1215
1216 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1217 &wq_spec, &cq_spec,
1218 &apc->tx_qp[i].tx_object);
1219
1220 if (err)
1221 goto out;
1222
1223 txq->gdma_sq->id = wq_spec.queue_index;
1224 cq->gdma_cq->id = cq_spec.queue_index;
1225
1226 txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1227 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1228
1229 txq->gdma_txq_id = txq->gdma_sq->id;
1230
1231 cq->gdma_id = cq->gdma_cq->id;
1232
1233 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs))
1234 return -EINVAL;
1235
1236 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1237
1238 mana_gd_arm_cq(cq->gdma_cq);
1239 }
1240
1241 return 0;
1242out:
1243 mana_destroy_txq(apc);
1244 return err;
1245}
1246
1247static void mana_napi_sync_for_rx(struct mana_rxq *rxq)
1248{
1249 struct net_device *ndev = rxq->ndev;
1250 struct mana_port_context *apc;
1251 u16 rxq_idx = rxq->rxq_idx;
1252 struct napi_struct *napi;
1253 struct gdma_queue *eq;
1254
1255 apc = netdev_priv(ndev);
1256 eq = apc->eqs[rxq_idx].eq;
1257 napi = &eq->eq.napi;
1258
1259 napi_synchronize(napi);
1260}
1261
1262static void mana_destroy_rxq(struct mana_port_context *apc,
1263 struct mana_rxq *rxq, bool validate_state)
1264
1265{
1266 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1267 struct mana_recv_buf_oob *rx_oob;
1268 struct device *dev = gc->dev;
1269 int i;
1270
1271 if (!rxq)
1272 return;
1273
1274 if (validate_state)
1275 mana_napi_sync_for_rx(rxq);
1276
1277 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1278
1279 mana_deinit_cq(apc, &rxq->rx_cq);
1280
1281 for (i = 0; i < rxq->num_rx_buf; i++) {
1282 rx_oob = &rxq->rx_oobs[i];
1283
1284 if (!rx_oob->buf_va)
1285 continue;
1286
1287 dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1288 DMA_FROM_DEVICE);
1289
1290 free_page((unsigned long)rx_oob->buf_va);
1291 rx_oob->buf_va = NULL;
1292 }
1293
1294 if (rxq->gdma_rq)
1295 mana_gd_destroy_queue(gc, rxq->gdma_rq);
1296
1297 kfree(rxq);
1298}
1299
1300#define MANA_WQE_HEADER_SIZE 16
1301#define MANA_WQE_SGE_SIZE 16
1302
1303static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1304 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1305{
1306 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1307 struct mana_recv_buf_oob *rx_oob;
1308 struct device *dev = gc->dev;
1309 struct page *page;
1310 dma_addr_t da;
1311 u32 buf_idx;
1312
1313 WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1314
1315 *rxq_size = 0;
1316 *cq_size = 0;
1317
1318 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1319 rx_oob = &rxq->rx_oobs[buf_idx];
1320 memset(rx_oob, 0, sizeof(*rx_oob));
1321
1322 page = alloc_page(GFP_KERNEL);
1323 if (!page)
1324 return -ENOMEM;
1325
1326 da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE);
1327
1328 if (dma_mapping_error(dev, da)) {
1329 __free_page(page);
1330 return -ENOMEM;
1331 }
1332
1333 rx_oob->buf_va = page_to_virt(page);
1334 rx_oob->buf_dma_addr = da;
1335
1336 rx_oob->num_sge = 1;
1337 rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1338 rx_oob->sgl[0].size = rxq->datasize;
1339 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1340
1341 rx_oob->wqe_req.sgl = rx_oob->sgl;
1342 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1343 rx_oob->wqe_req.inline_oob_size = 0;
1344 rx_oob->wqe_req.inline_oob_data = NULL;
1345 rx_oob->wqe_req.flags = 0;
1346 rx_oob->wqe_req.client_data_unit = 0;
1347
1348 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1349 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1350 *cq_size += COMP_ENTRY_SIZE;
1351 }
1352
1353 return 0;
1354}
1355
1356static int mana_push_wqe(struct mana_rxq *rxq)
1357{
1358 struct mana_recv_buf_oob *rx_oob;
1359 u32 buf_idx;
1360 int err;
1361
1362 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1363 rx_oob = &rxq->rx_oobs[buf_idx];
1364
1365 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1366 &rx_oob->wqe_inf);
1367 if (err)
1368 return -ENOSPC;
1369 }
1370
1371 return 0;
1372}
1373
1374static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1375 u32 rxq_idx, struct mana_eq *eq,
1376 struct net_device *ndev)
1377{
1378 struct gdma_dev *gd = apc->ac->gdma_dev;
1379 struct mana_obj_spec wq_spec;
1380 struct mana_obj_spec cq_spec;
1381 struct gdma_queue_spec spec;
1382 struct mana_cq *cq = NULL;
1383 struct gdma_context *gc;
1384 u32 cq_size, rq_size;
1385 struct mana_rxq *rxq;
1386 int err;
1387
1388 gc = gd->gdma_context;
1389
1390 rxq = kzalloc(sizeof(*rxq) +
1391 RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
1392 GFP_KERNEL);
1393 if (!rxq)
1394 return NULL;
1395
1396 rxq->ndev = ndev;
1397 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1398 rxq->rxq_idx = rxq_idx;
1399 rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1400 rxq->rxobj = INVALID_MANA_HANDLE;
1401
1402 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1403 if (err)
1404 goto out;
1405
1406 rq_size = PAGE_ALIGN(rq_size);
1407 cq_size = PAGE_ALIGN(cq_size);
1408
1409
1410 memset(&spec, 0, sizeof(spec));
1411 spec.type = GDMA_RQ;
1412 spec.monitor_avl_buf = true;
1413 spec.queue_size = rq_size;
1414 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1415 if (err)
1416 goto out;
1417
1418
1419 cq = &rxq->rx_cq;
1420 cq->gdma_comp_buf = eq->cqe_poll;
1421 cq->type = MANA_CQ_TYPE_RX;
1422 cq->rxq = rxq;
1423
1424 memset(&spec, 0, sizeof(spec));
1425 spec.type = GDMA_CQ;
1426 spec.monitor_avl_buf = false;
1427 spec.queue_size = cq_size;
1428 spec.cq.callback = mana_cq_handler;
1429 spec.cq.parent_eq = eq->eq;
1430 spec.cq.context = cq;
1431 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1432 if (err)
1433 goto out;
1434
1435 memset(&wq_spec, 0, sizeof(wq_spec));
1436 memset(&cq_spec, 0, sizeof(cq_spec));
1437 wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
1438 wq_spec.queue_size = rxq->gdma_rq->queue_size;
1439
1440 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1441 cq_spec.queue_size = cq->gdma_cq->queue_size;
1442 cq_spec.modr_ctx_id = 0;
1443 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1444
1445 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1446 &wq_spec, &cq_spec, &rxq->rxobj);
1447 if (err)
1448 goto out;
1449
1450 rxq->gdma_rq->id = wq_spec.queue_index;
1451 cq->gdma_cq->id = cq_spec.queue_index;
1452
1453 rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1454 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1455
1456 rxq->gdma_id = rxq->gdma_rq->id;
1457 cq->gdma_id = cq->gdma_cq->id;
1458
1459 err = mana_push_wqe(rxq);
1460 if (err)
1461 goto out;
1462
1463 if (cq->gdma_id >= gc->max_num_cqs)
1464 goto out;
1465
1466 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1467
1468 mana_gd_arm_cq(cq->gdma_cq);
1469out:
1470 if (!err)
1471 return rxq;
1472
1473 netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1474
1475 mana_destroy_rxq(apc, rxq, false);
1476
1477 if (cq)
1478 mana_deinit_cq(apc, cq);
1479
1480 return NULL;
1481}
1482
1483static int mana_add_rx_queues(struct mana_port_context *apc,
1484 struct net_device *ndev)
1485{
1486 struct mana_rxq *rxq;
1487 int err = 0;
1488 int i;
1489
1490 for (i = 0; i < apc->num_queues; i++) {
1491 rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev);
1492 if (!rxq) {
1493 err = -ENOMEM;
1494 goto out;
1495 }
1496
1497 u64_stats_init(&rxq->stats.syncp);
1498
1499 apc->rxqs[i] = rxq;
1500 }
1501
1502 apc->default_rxobj = apc->rxqs[0]->rxobj;
1503out:
1504 return err;
1505}
1506
1507static void mana_destroy_vport(struct mana_port_context *apc)
1508{
1509 struct mana_rxq *rxq;
1510 u32 rxq_idx;
1511
1512 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1513 rxq = apc->rxqs[rxq_idx];
1514 if (!rxq)
1515 continue;
1516
1517 mana_destroy_rxq(apc, rxq, true);
1518 apc->rxqs[rxq_idx] = NULL;
1519 }
1520
1521 mana_destroy_txq(apc);
1522}
1523
1524static int mana_create_vport(struct mana_port_context *apc,
1525 struct net_device *net)
1526{
1527 struct gdma_dev *gd = apc->ac->gdma_dev;
1528 int err;
1529
1530 apc->default_rxobj = INVALID_MANA_HANDLE;
1531
1532 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1533 if (err)
1534 return err;
1535
1536 return mana_create_txq(apc, net);
1537}
1538
1539static void mana_rss_table_init(struct mana_port_context *apc)
1540{
1541 int i;
1542
1543 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1544 apc->indir_table[i] =
1545 ethtool_rxfh_indir_default(i, apc->num_queues);
1546}
1547
1548int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1549 bool update_hash, bool update_tab)
1550{
1551 u32 queue_idx;
1552 int i;
1553
1554 if (update_tab) {
1555 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1556 queue_idx = apc->indir_table[i];
1557 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1558 }
1559 }
1560
1561 return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
1562}
1563
1564static int mana_init_port(struct net_device *ndev)
1565{
1566 struct mana_port_context *apc = netdev_priv(ndev);
1567 u32 max_txq, max_rxq, max_queues;
1568 int port_idx = apc->port_idx;
1569 u32 num_indirect_entries;
1570 int err;
1571
1572 err = mana_init_port_context(apc);
1573 if (err)
1574 return err;
1575
1576 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1577 &num_indirect_entries);
1578 if (err) {
1579 netdev_err(ndev, "Failed to query info for vPort 0\n");
1580 goto reset_apc;
1581 }
1582
1583 max_queues = min_t(u32, max_txq, max_rxq);
1584 if (apc->max_queues > max_queues)
1585 apc->max_queues = max_queues;
1586
1587 if (apc->num_queues > apc->max_queues)
1588 apc->num_queues = apc->max_queues;
1589
1590 ether_addr_copy(ndev->dev_addr, apc->mac_addr);
1591
1592 return 0;
1593
1594reset_apc:
1595 kfree(apc->rxqs);
1596 apc->rxqs = NULL;
1597 return err;
1598}
1599
1600int mana_alloc_queues(struct net_device *ndev)
1601{
1602 struct mana_port_context *apc = netdev_priv(ndev);
1603 struct gdma_dev *gd = apc->ac->gdma_dev;
1604 int err;
1605
1606 err = mana_create_eq(apc);
1607 if (err)
1608 return err;
1609
1610 err = mana_create_vport(apc, ndev);
1611 if (err)
1612 goto destroy_eq;
1613
1614 err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1615 if (err)
1616 goto destroy_vport;
1617
1618 err = mana_add_rx_queues(apc, ndev);
1619 if (err)
1620 goto destroy_vport;
1621
1622 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1623
1624 err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1625 if (err)
1626 goto destroy_vport;
1627
1628 mana_rss_table_init(apc);
1629
1630 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1631 if (err)
1632 goto destroy_vport;
1633
1634 return 0;
1635
1636destroy_vport:
1637 mana_destroy_vport(apc);
1638destroy_eq:
1639 mana_destroy_eq(gd->gdma_context, apc);
1640 return err;
1641}
1642
1643int mana_attach(struct net_device *ndev)
1644{
1645 struct mana_port_context *apc = netdev_priv(ndev);
1646 int err;
1647
1648 ASSERT_RTNL();
1649
1650 err = mana_init_port(ndev);
1651 if (err)
1652 return err;
1653
1654 err = mana_alloc_queues(ndev);
1655 if (err) {
1656 kfree(apc->rxqs);
1657 apc->rxqs = NULL;
1658 return err;
1659 }
1660
1661 netif_device_attach(ndev);
1662
1663 apc->port_is_up = apc->port_st_save;
1664
1665
1666 smp_wmb();
1667
1668 if (apc->port_is_up) {
1669 netif_carrier_on(ndev);
1670 netif_tx_wake_all_queues(ndev);
1671 }
1672
1673 return 0;
1674}
1675
1676static int mana_dealloc_queues(struct net_device *ndev)
1677{
1678 struct mana_port_context *apc = netdev_priv(ndev);
1679 struct mana_txq *txq;
1680 int i, err;
1681
1682 if (apc->port_is_up)
1683 return -EINVAL;
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693 for (i = 0; i < apc->num_queues; i++) {
1694 txq = &apc->tx_qp[i].txq;
1695
1696 while (atomic_read(&txq->pending_sends) > 0)
1697 usleep_range(1000, 2000);
1698 }
1699
1700
1701
1702
1703
1704 apc->rss_state = TRI_STATE_FALSE;
1705 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
1706 if (err) {
1707 netdev_err(ndev, "Failed to disable vPort: %d\n", err);
1708 return err;
1709 }
1710
1711
1712 ssleep(1);
1713
1714 mana_destroy_vport(apc);
1715
1716 mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc);
1717
1718 return 0;
1719}
1720
1721int mana_detach(struct net_device *ndev, bool from_close)
1722{
1723 struct mana_port_context *apc = netdev_priv(ndev);
1724 int err;
1725
1726 ASSERT_RTNL();
1727
1728 apc->port_st_save = apc->port_is_up;
1729 apc->port_is_up = false;
1730
1731
1732 smp_wmb();
1733
1734 netif_tx_disable(ndev);
1735 netif_carrier_off(ndev);
1736
1737 if (apc->port_st_save) {
1738 err = mana_dealloc_queues(ndev);
1739 if (err)
1740 return err;
1741 }
1742
1743 if (!from_close) {
1744 netif_device_detach(ndev);
1745 mana_cleanup_port_context(apc);
1746 }
1747
1748 return 0;
1749}
1750
1751static int mana_probe_port(struct mana_context *ac, int port_idx,
1752 struct net_device **ndev_storage)
1753{
1754 struct gdma_context *gc = ac->gdma_dev->gdma_context;
1755 struct mana_port_context *apc;
1756 struct net_device *ndev;
1757 int err;
1758
1759 ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
1760 gc->max_num_queues);
1761 if (!ndev)
1762 return -ENOMEM;
1763
1764 *ndev_storage = ndev;
1765
1766 apc = netdev_priv(ndev);
1767 apc->ac = ac;
1768 apc->ndev = ndev;
1769 apc->max_queues = gc->max_num_queues;
1770 apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES);
1771 apc->port_handle = INVALID_MANA_HANDLE;
1772 apc->port_idx = port_idx;
1773
1774 ndev->netdev_ops = &mana_devops;
1775 ndev->ethtool_ops = &mana_ethtool_ops;
1776 ndev->mtu = ETH_DATA_LEN;
1777 ndev->max_mtu = ndev->mtu;
1778 ndev->min_mtu = ndev->mtu;
1779 ndev->needed_headroom = MANA_HEADROOM;
1780 SET_NETDEV_DEV(ndev, gc->dev);
1781
1782 netif_carrier_off(ndev);
1783
1784 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
1785
1786 err = mana_init_port(ndev);
1787 if (err)
1788 goto free_net;
1789
1790 netdev_lockdep_set_classes(ndev);
1791
1792 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1793 ndev->hw_features |= NETIF_F_RXCSUM;
1794 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1795 ndev->hw_features |= NETIF_F_RXHASH;
1796 ndev->features = ndev->hw_features;
1797 ndev->vlan_features = 0;
1798
1799 err = register_netdev(ndev);
1800 if (err) {
1801 netdev_err(ndev, "Unable to register netdev.\n");
1802 goto reset_apc;
1803 }
1804
1805 return 0;
1806
1807reset_apc:
1808 kfree(apc->rxqs);
1809 apc->rxqs = NULL;
1810free_net:
1811 *ndev_storage = NULL;
1812 netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
1813 free_netdev(ndev);
1814 return err;
1815}
1816
1817int mana_probe(struct gdma_dev *gd)
1818{
1819 struct gdma_context *gc = gd->gdma_context;
1820 struct device *dev = gc->dev;
1821 struct mana_context *ac;
1822 int err;
1823 int i;
1824
1825 dev_info(dev,
1826 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
1827 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
1828
1829 err = mana_gd_register_device(gd);
1830 if (err)
1831 return err;
1832
1833 ac = kzalloc(sizeof(*ac), GFP_KERNEL);
1834 if (!ac)
1835 return -ENOMEM;
1836
1837 ac->gdma_dev = gd;
1838 ac->num_ports = 1;
1839 gd->driver_data = ac;
1840
1841 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
1842 MANA_MICRO_VERSION, &ac->num_ports);
1843 if (err)
1844 goto out;
1845
1846 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
1847 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
1848
1849 for (i = 0; i < ac->num_ports; i++) {
1850 err = mana_probe_port(ac, i, &ac->ports[i]);
1851 if (err)
1852 break;
1853 }
1854out:
1855 if (err)
1856 mana_remove(gd);
1857
1858 return err;
1859}
1860
1861void mana_remove(struct gdma_dev *gd)
1862{
1863 struct gdma_context *gc = gd->gdma_context;
1864 struct mana_context *ac = gd->driver_data;
1865 struct device *dev = gc->dev;
1866 struct net_device *ndev;
1867 int i;
1868
1869 for (i = 0; i < ac->num_ports; i++) {
1870 ndev = ac->ports[i];
1871 if (!ndev) {
1872 if (i == 0)
1873 dev_err(dev, "No net device to remove\n");
1874 goto out;
1875 }
1876
1877
1878
1879
1880 rtnl_lock();
1881
1882 mana_detach(ndev, false);
1883
1884 unregister_netdevice(ndev);
1885
1886 rtnl_unlock();
1887
1888 free_netdev(ndev);
1889 }
1890out:
1891 mana_gd_deregister_device(gd);
1892 gd->driver_data = NULL;
1893 gd->gdma_context = NULL;
1894 kfree(ac);
1895}
1896