1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include "xgbe.h"
118#include "xgbe-common.h"
119
120static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
121
122static void xgbe_free_ring(struct xgbe_prv_data *pdata,
123 struct xgbe_ring *ring)
124{
125 struct xgbe_ring_data *rdata;
126 unsigned int i;
127
128 if (!ring)
129 return;
130
131 if (ring->rdata) {
132 for (i = 0; i < ring->rdesc_count; i++) {
133 rdata = XGBE_GET_DESC_DATA(ring, i);
134 xgbe_unmap_rdata(pdata, rdata);
135 }
136
137 kfree(ring->rdata);
138 ring->rdata = NULL;
139 }
140
141 if (ring->rx_hdr_pa.pages) {
142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
144 put_page(ring->rx_hdr_pa.pages);
145
146 ring->rx_hdr_pa.pages = NULL;
147 ring->rx_hdr_pa.pages_len = 0;
148 ring->rx_hdr_pa.pages_offset = 0;
149 ring->rx_hdr_pa.pages_dma = 0;
150 }
151
152 if (ring->rx_buf_pa.pages) {
153 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
154 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
155 put_page(ring->rx_buf_pa.pages);
156
157 ring->rx_buf_pa.pages = NULL;
158 ring->rx_buf_pa.pages_len = 0;
159 ring->rx_buf_pa.pages_offset = 0;
160 ring->rx_buf_pa.pages_dma = 0;
161 }
162
163 if (ring->rdesc) {
164 dma_free_coherent(pdata->dev,
165 (sizeof(struct xgbe_ring_desc) *
166 ring->rdesc_count),
167 ring->rdesc, ring->rdesc_dma);
168 ring->rdesc = NULL;
169 }
170}
171
172static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
173{
174 struct xgbe_channel *channel;
175 unsigned int i;
176
177 DBGPR("-->xgbe_free_ring_resources\n");
178
179 for (i = 0; i < pdata->channel_count; i++) {
180 channel = pdata->channel[i];
181 xgbe_free_ring(pdata, channel->tx_ring);
182 xgbe_free_ring(pdata, channel->rx_ring);
183 }
184
185 DBGPR("<--xgbe_free_ring_resources\n");
186}
187
188static void *xgbe_alloc_node(size_t size, int node)
189{
190 void *mem;
191
192 mem = kzalloc_node(size, GFP_KERNEL, node);
193 if (!mem)
194 mem = kzalloc(size, GFP_KERNEL);
195
196 return mem;
197}
198
199static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
200 dma_addr_t *dma, int node)
201{
202 void *mem;
203 int cur_node = dev_to_node(dev);
204
205 set_dev_node(dev, node);
206 mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
207 set_dev_node(dev, cur_node);
208
209 if (!mem)
210 mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
211
212 return mem;
213}
214
215static int xgbe_init_ring(struct xgbe_prv_data *pdata,
216 struct xgbe_ring *ring, unsigned int rdesc_count)
217{
218 size_t size;
219
220 if (!ring)
221 return 0;
222
223
224 size = rdesc_count * sizeof(struct xgbe_ring_desc);
225
226 ring->rdesc_count = rdesc_count;
227 ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
228 ring->node);
229 if (!ring->rdesc)
230 return -ENOMEM;
231
232
233 size = rdesc_count * sizeof(struct xgbe_ring_data);
234
235 ring->rdata = xgbe_alloc_node(size, ring->node);
236 if (!ring->rdata)
237 return -ENOMEM;
238
239 netif_dbg(pdata, drv, pdata->netdev,
240 "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
241 ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
242
243 return 0;
244}
245
246static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
247{
248 struct xgbe_channel *channel;
249 unsigned int i;
250 int ret;
251
252 for (i = 0; i < pdata->channel_count; i++) {
253 channel = pdata->channel[i];
254 netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
255 channel->name);
256
257 ret = xgbe_init_ring(pdata, channel->tx_ring,
258 pdata->tx_desc_count);
259 if (ret) {
260 netdev_alert(pdata->netdev,
261 "error initializing Tx ring\n");
262 goto err_ring;
263 }
264
265 netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
266 channel->name);
267
268 ret = xgbe_init_ring(pdata, channel->rx_ring,
269 pdata->rx_desc_count);
270 if (ret) {
271 netdev_alert(pdata->netdev,
272 "error initializing Rx ring\n");
273 goto err_ring;
274 }
275 }
276
277 return 0;
278
279err_ring:
280 xgbe_free_ring_resources(pdata);
281
282 return ret;
283}
284
285static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
286 struct xgbe_page_alloc *pa, int alloc_order,
287 int node)
288{
289 struct page *pages = NULL;
290 dma_addr_t pages_dma;
291 gfp_t gfp;
292 int order, ret;
293
294again:
295 order = alloc_order;
296
297
298 gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
299 while (order >= 0) {
300 pages = alloc_pages_node(node, gfp, order);
301 if (pages)
302 break;
303
304 order--;
305 }
306
307
308 if (!pages && (node != NUMA_NO_NODE)) {
309 node = NUMA_NO_NODE;
310 goto again;
311 }
312
313 if (!pages)
314 return -ENOMEM;
315
316
317 pages_dma = dma_map_page(pdata->dev, pages, 0,
318 PAGE_SIZE << order, DMA_FROM_DEVICE);
319 ret = dma_mapping_error(pdata->dev, pages_dma);
320 if (ret) {
321 put_page(pages);
322 return ret;
323 }
324
325 pa->pages = pages;
326 pa->pages_len = PAGE_SIZE << order;
327 pa->pages_offset = 0;
328 pa->pages_dma = pages_dma;
329
330 return 0;
331}
332
333static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
334 struct xgbe_page_alloc *pa,
335 unsigned int len)
336{
337 get_page(pa->pages);
338 bd->pa = *pa;
339
340 bd->dma_base = pa->pages_dma;
341 bd->dma_off = pa->pages_offset;
342 bd->dma_len = len;
343
344 pa->pages_offset += len;
345 if ((pa->pages_offset + len) > pa->pages_len) {
346
347 bd->pa_unmap = *pa;
348
349
350 pa->pages = NULL;
351 pa->pages_len = 0;
352 pa->pages_offset = 0;
353 pa->pages_dma = 0;
354 }
355}
356
357static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
358 struct xgbe_ring *ring,
359 struct xgbe_ring_data *rdata)
360{
361 int ret;
362
363 if (!ring->rx_hdr_pa.pages) {
364 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
365 if (ret)
366 return ret;
367 }
368
369 if (!ring->rx_buf_pa.pages) {
370 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
371 PAGE_ALLOC_COSTLY_ORDER, ring->node);
372 if (ret)
373 return ret;
374 }
375
376
377 xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
378 XGBE_SKB_ALLOC_SIZE);
379
380
381 xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
382 pdata->rx_buf_size);
383
384 return 0;
385}
386
387static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
388{
389 struct xgbe_hw_if *hw_if = &pdata->hw_if;
390 struct xgbe_channel *channel;
391 struct xgbe_ring *ring;
392 struct xgbe_ring_data *rdata;
393 struct xgbe_ring_desc *rdesc;
394 dma_addr_t rdesc_dma;
395 unsigned int i, j;
396
397 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
398
399 for (i = 0; i < pdata->channel_count; i++) {
400 channel = pdata->channel[i];
401 ring = channel->tx_ring;
402 if (!ring)
403 break;
404
405 rdesc = ring->rdesc;
406 rdesc_dma = ring->rdesc_dma;
407
408 for (j = 0; j < ring->rdesc_count; j++) {
409 rdata = XGBE_GET_DESC_DATA(ring, j);
410
411 rdata->rdesc = rdesc;
412 rdata->rdesc_dma = rdesc_dma;
413
414 rdesc++;
415 rdesc_dma += sizeof(struct xgbe_ring_desc);
416 }
417
418 ring->cur = 0;
419 ring->dirty = 0;
420 memset(&ring->tx, 0, sizeof(ring->tx));
421
422 hw_if->tx_desc_init(channel);
423 }
424
425 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
426}
427
428static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
429{
430 struct xgbe_hw_if *hw_if = &pdata->hw_if;
431 struct xgbe_channel *channel;
432 struct xgbe_ring *ring;
433 struct xgbe_ring_desc *rdesc;
434 struct xgbe_ring_data *rdata;
435 dma_addr_t rdesc_dma;
436 unsigned int i, j;
437
438 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
439
440 for (i = 0; i < pdata->channel_count; i++) {
441 channel = pdata->channel[i];
442 ring = channel->rx_ring;
443 if (!ring)
444 break;
445
446 rdesc = ring->rdesc;
447 rdesc_dma = ring->rdesc_dma;
448
449 for (j = 0; j < ring->rdesc_count; j++) {
450 rdata = XGBE_GET_DESC_DATA(ring, j);
451
452 rdata->rdesc = rdesc;
453 rdata->rdesc_dma = rdesc_dma;
454
455 if (xgbe_map_rx_buffer(pdata, ring, rdata))
456 break;
457
458 rdesc++;
459 rdesc_dma += sizeof(struct xgbe_ring_desc);
460 }
461
462 ring->cur = 0;
463 ring->dirty = 0;
464
465 hw_if->rx_desc_init(channel);
466 }
467
468 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
469}
470
471static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
472 struct xgbe_ring_data *rdata)
473{
474 if (rdata->skb_dma) {
475 if (rdata->mapped_as_page) {
476 dma_unmap_page(pdata->dev, rdata->skb_dma,
477 rdata->skb_dma_len, DMA_TO_DEVICE);
478 } else {
479 dma_unmap_single(pdata->dev, rdata->skb_dma,
480 rdata->skb_dma_len, DMA_TO_DEVICE);
481 }
482 rdata->skb_dma = 0;
483 rdata->skb_dma_len = 0;
484 }
485
486 if (rdata->skb) {
487 dev_kfree_skb_any(rdata->skb);
488 rdata->skb = NULL;
489 }
490
491 if (rdata->rx.hdr.pa.pages)
492 put_page(rdata->rx.hdr.pa.pages);
493
494 if (rdata->rx.hdr.pa_unmap.pages) {
495 dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
496 rdata->rx.hdr.pa_unmap.pages_len,
497 DMA_FROM_DEVICE);
498 put_page(rdata->rx.hdr.pa_unmap.pages);
499 }
500
501 if (rdata->rx.buf.pa.pages)
502 put_page(rdata->rx.buf.pa.pages);
503
504 if (rdata->rx.buf.pa_unmap.pages) {
505 dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
506 rdata->rx.buf.pa_unmap.pages_len,
507 DMA_FROM_DEVICE);
508 put_page(rdata->rx.buf.pa_unmap.pages);
509 }
510
511 memset(&rdata->tx, 0, sizeof(rdata->tx));
512 memset(&rdata->rx, 0, sizeof(rdata->rx));
513
514 rdata->mapped_as_page = 0;
515
516 if (rdata->state_saved) {
517 rdata->state_saved = 0;
518 rdata->state.skb = NULL;
519 rdata->state.len = 0;
520 rdata->state.error = 0;
521 }
522}
523
524static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
525{
526 struct xgbe_prv_data *pdata = channel->pdata;
527 struct xgbe_ring *ring = channel->tx_ring;
528 struct xgbe_ring_data *rdata;
529 struct xgbe_packet_data *packet;
530 struct skb_frag_struct *frag;
531 dma_addr_t skb_dma;
532 unsigned int start_index, cur_index;
533 unsigned int offset, tso, vlan, datalen, len;
534 unsigned int i;
535
536 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
537
538 offset = 0;
539 start_index = ring->cur;
540 cur_index = ring->cur;
541
542 packet = &ring->packet_data;
543 packet->rdesc_count = 0;
544 packet->length = 0;
545
546 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
547 TSO_ENABLE);
548 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
549 VLAN_CTAG);
550
551
552 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
553 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
554 cur_index++;
555 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
556
557 if (tso) {
558
559 skb_dma = dma_map_single(pdata->dev, skb->data,
560 packet->header_len, DMA_TO_DEVICE);
561 if (dma_mapping_error(pdata->dev, skb_dma)) {
562 netdev_alert(pdata->netdev, "dma_map_single failed\n");
563 goto err_out;
564 }
565 rdata->skb_dma = skb_dma;
566 rdata->skb_dma_len = packet->header_len;
567 netif_dbg(pdata, tx_queued, pdata->netdev,
568 "skb header: index=%u, dma=%pad, len=%u\n",
569 cur_index, &skb_dma, packet->header_len);
570
571 offset = packet->header_len;
572
573 packet->length += packet->header_len;
574
575 cur_index++;
576 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
577 }
578
579
580 for (datalen = skb_headlen(skb) - offset; datalen; ) {
581 len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
582
583 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
584 DMA_TO_DEVICE);
585 if (dma_mapping_error(pdata->dev, skb_dma)) {
586 netdev_alert(pdata->netdev, "dma_map_single failed\n");
587 goto err_out;
588 }
589 rdata->skb_dma = skb_dma;
590 rdata->skb_dma_len = len;
591 netif_dbg(pdata, tx_queued, pdata->netdev,
592 "skb data: index=%u, dma=%pad, len=%u\n",
593 cur_index, &skb_dma, len);
594
595 datalen -= len;
596 offset += len;
597
598 packet->length += len;
599
600 cur_index++;
601 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
602 }
603
604 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
605 netif_dbg(pdata, tx_queued, pdata->netdev,
606 "mapping frag %u\n", i);
607
608 frag = &skb_shinfo(skb)->frags[i];
609 offset = 0;
610
611 for (datalen = skb_frag_size(frag); datalen; ) {
612 len = min_t(unsigned int, datalen,
613 XGBE_TX_MAX_BUF_SIZE);
614
615 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
616 len, DMA_TO_DEVICE);
617 if (dma_mapping_error(pdata->dev, skb_dma)) {
618 netdev_alert(pdata->netdev,
619 "skb_frag_dma_map failed\n");
620 goto err_out;
621 }
622 rdata->skb_dma = skb_dma;
623 rdata->skb_dma_len = len;
624 rdata->mapped_as_page = 1;
625 netif_dbg(pdata, tx_queued, pdata->netdev,
626 "skb frag: index=%u, dma=%pad, len=%u\n",
627 cur_index, &skb_dma, len);
628
629 datalen -= len;
630 offset += len;
631
632 packet->length += len;
633
634 cur_index++;
635 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
636 }
637 }
638
639
640
641
642
643 rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
644 rdata->skb = skb;
645
646
647 packet->rdesc_count = cur_index - start_index;
648
649 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
650
651 return packet->rdesc_count;
652
653err_out:
654 while (start_index < cur_index) {
655 rdata = XGBE_GET_DESC_DATA(ring, start_index++);
656 xgbe_unmap_rdata(pdata, rdata);
657 }
658
659 DBGPR("<--xgbe_map_tx_skb: count=0\n");
660
661 return 0;
662}
663
664void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
665{
666 DBGPR("-->xgbe_init_function_ptrs_desc\n");
667
668 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
669 desc_if->free_ring_resources = xgbe_free_ring_resources;
670 desc_if->map_tx_skb = xgbe_map_tx_skb;
671 desc_if->map_rx_buffer = xgbe_map_rx_buffer;
672 desc_if->unmap_rdata = xgbe_unmap_rdata;
673 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
674 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
675
676 DBGPR("<--xgbe_init_function_ptrs_desc\n");
677}
678