1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include "xgbe.h"
118#include "xgbe-common.h"
119
120static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
121
122static void xgbe_free_ring(struct xgbe_prv_data *pdata,
123 struct xgbe_ring *ring)
124{
125 struct xgbe_ring_data *rdata;
126 unsigned int i;
127
128 if (!ring)
129 return;
130
131 if (ring->rdata) {
132 for (i = 0; i < ring->rdesc_count; i++) {
133 rdata = XGBE_GET_DESC_DATA(ring, i);
134 xgbe_unmap_rdata(pdata, rdata);
135 }
136
137 kfree(ring->rdata);
138 ring->rdata = NULL;
139 }
140
141 if (ring->rx_hdr_pa.pages) {
142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
144 put_page(ring->rx_hdr_pa.pages);
145
146 ring->rx_hdr_pa.pages = NULL;
147 ring->rx_hdr_pa.pages_len = 0;
148 ring->rx_hdr_pa.pages_offset = 0;
149 ring->rx_hdr_pa.pages_dma = 0;
150 }
151
152 if (ring->rx_buf_pa.pages) {
153 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
154 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
155 put_page(ring->rx_buf_pa.pages);
156
157 ring->rx_buf_pa.pages = NULL;
158 ring->rx_buf_pa.pages_len = 0;
159 ring->rx_buf_pa.pages_offset = 0;
160 ring->rx_buf_pa.pages_dma = 0;
161 }
162
163 if (ring->rdesc) {
164 dma_free_coherent(pdata->dev,
165 (sizeof(struct xgbe_ring_desc) *
166 ring->rdesc_count),
167 ring->rdesc, ring->rdesc_dma);
168 ring->rdesc = NULL;
169 }
170}
171
172static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
173{
174 struct xgbe_channel *channel;
175 unsigned int i;
176
177 DBGPR("-->xgbe_free_ring_resources\n");
178
179 for (i = 0; i < pdata->channel_count; i++) {
180 channel = pdata->channel[i];
181 xgbe_free_ring(pdata, channel->tx_ring);
182 xgbe_free_ring(pdata, channel->rx_ring);
183 }
184
185 DBGPR("<--xgbe_free_ring_resources\n");
186}
187
188static void *xgbe_alloc_node(size_t size, int node)
189{
190 void *mem;
191
192 mem = kzalloc_node(size, GFP_KERNEL, node);
193 if (!mem)
194 mem = kzalloc(size, GFP_KERNEL);
195
196 return mem;
197}
198
199static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
200 dma_addr_t *dma, int node)
201{
202 void *mem;
203 int cur_node = dev_to_node(dev);
204
205 set_dev_node(dev, node);
206 mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
207 set_dev_node(dev, cur_node);
208
209 if (!mem)
210 mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
211
212 return mem;
213}
214
215static int xgbe_init_ring(struct xgbe_prv_data *pdata,
216 struct xgbe_ring *ring, unsigned int rdesc_count)
217{
218 size_t size;
219
220 if (!ring)
221 return 0;
222
223
224 size = rdesc_count * sizeof(struct xgbe_ring_desc);
225
226 ring->rdesc_count = rdesc_count;
227 ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
228 ring->node);
229 if (!ring->rdesc)
230 return -ENOMEM;
231
232
233 size = rdesc_count * sizeof(struct xgbe_ring_data);
234
235 ring->rdata = xgbe_alloc_node(size, ring->node);
236 if (!ring->rdata)
237 return -ENOMEM;
238
239 netif_dbg(pdata, drv, pdata->netdev,
240 "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
241 ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
242
243 return 0;
244}
245
246static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
247{
248 struct xgbe_channel *channel;
249 unsigned int i;
250 int ret;
251
252 for (i = 0; i < pdata->channel_count; i++) {
253 channel = pdata->channel[i];
254 netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
255 channel->name);
256
257 ret = xgbe_init_ring(pdata, channel->tx_ring,
258 pdata->tx_desc_count);
259 if (ret) {
260 netdev_alert(pdata->netdev,
261 "error initializing Tx ring\n");
262 goto err_ring;
263 }
264
265 netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
266 channel->name);
267
268 ret = xgbe_init_ring(pdata, channel->rx_ring,
269 pdata->rx_desc_count);
270 if (ret) {
271 netdev_alert(pdata->netdev,
272 "error initializing Rx ring\n");
273 goto err_ring;
274 }
275 }
276
277 return 0;
278
279err_ring:
280 xgbe_free_ring_resources(pdata);
281
282 return ret;
283}
284
285static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
286 struct xgbe_page_alloc *pa, int alloc_order,
287 int node)
288{
289 struct page *pages = NULL;
290 dma_addr_t pages_dma;
291 gfp_t gfp;
292 int order;
293
294again:
295 order = alloc_order;
296
297
298 gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
299 while (order >= 0) {
300 pages = alloc_pages_node(node, gfp, order);
301 if (pages)
302 break;
303
304 order--;
305 }
306
307
308 if (!pages && (node != NUMA_NO_NODE)) {
309 node = NUMA_NO_NODE;
310 goto again;
311 }
312
313 if (!pages)
314 return -ENOMEM;
315
316
317 pages_dma = dma_map_page(pdata->dev, pages, 0,
318 PAGE_SIZE << order, DMA_FROM_DEVICE);
319 if (dma_mapping_error(pdata->dev, pages_dma)) {
320 put_page(pages);
321 return -ENOMEM;
322 }
323
324 pa->pages = pages;
325 pa->pages_len = PAGE_SIZE << order;
326 pa->pages_offset = 0;
327 pa->pages_dma = pages_dma;
328
329 return 0;
330}
331
332static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
333 struct xgbe_page_alloc *pa,
334 unsigned int len)
335{
336 get_page(pa->pages);
337 bd->pa = *pa;
338
339 bd->dma_base = pa->pages_dma;
340 bd->dma_off = pa->pages_offset;
341 bd->dma_len = len;
342
343 pa->pages_offset += len;
344 if ((pa->pages_offset + len) > pa->pages_len) {
345
346 bd->pa_unmap = *pa;
347
348
349 pa->pages = NULL;
350 pa->pages_len = 0;
351 pa->pages_offset = 0;
352 pa->pages_dma = 0;
353 }
354}
355
356static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
357 struct xgbe_ring *ring,
358 struct xgbe_ring_data *rdata)
359{
360 int ret;
361
362 if (!ring->rx_hdr_pa.pages) {
363 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
364 if (ret)
365 return ret;
366 }
367
368 if (!ring->rx_buf_pa.pages) {
369 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
370 PAGE_ALLOC_COSTLY_ORDER, ring->node);
371 if (ret)
372 return ret;
373 }
374
375
376 xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
377 XGBE_SKB_ALLOC_SIZE);
378
379
380 xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
381 pdata->rx_buf_size);
382
383 return 0;
384}
385
386static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
387{
388 struct xgbe_hw_if *hw_if = &pdata->hw_if;
389 struct xgbe_channel *channel;
390 struct xgbe_ring *ring;
391 struct xgbe_ring_data *rdata;
392 struct xgbe_ring_desc *rdesc;
393 dma_addr_t rdesc_dma;
394 unsigned int i, j;
395
396 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
397
398 for (i = 0; i < pdata->channel_count; i++) {
399 channel = pdata->channel[i];
400 ring = channel->tx_ring;
401 if (!ring)
402 break;
403
404 rdesc = ring->rdesc;
405 rdesc_dma = ring->rdesc_dma;
406
407 for (j = 0; j < ring->rdesc_count; j++) {
408 rdata = XGBE_GET_DESC_DATA(ring, j);
409
410 rdata->rdesc = rdesc;
411 rdata->rdesc_dma = rdesc_dma;
412
413 rdesc++;
414 rdesc_dma += sizeof(struct xgbe_ring_desc);
415 }
416
417 ring->cur = 0;
418 ring->dirty = 0;
419 memset(&ring->tx, 0, sizeof(ring->tx));
420
421 hw_if->tx_desc_init(channel);
422 }
423
424 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
425}
426
427static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
428{
429 struct xgbe_hw_if *hw_if = &pdata->hw_if;
430 struct xgbe_channel *channel;
431 struct xgbe_ring *ring;
432 struct xgbe_ring_desc *rdesc;
433 struct xgbe_ring_data *rdata;
434 dma_addr_t rdesc_dma;
435 unsigned int i, j;
436
437 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
438
439 for (i = 0; i < pdata->channel_count; i++) {
440 channel = pdata->channel[i];
441 ring = channel->rx_ring;
442 if (!ring)
443 break;
444
445 rdesc = ring->rdesc;
446 rdesc_dma = ring->rdesc_dma;
447
448 for (j = 0; j < ring->rdesc_count; j++) {
449 rdata = XGBE_GET_DESC_DATA(ring, j);
450
451 rdata->rdesc = rdesc;
452 rdata->rdesc_dma = rdesc_dma;
453
454 if (xgbe_map_rx_buffer(pdata, ring, rdata))
455 break;
456
457 rdesc++;
458 rdesc_dma += sizeof(struct xgbe_ring_desc);
459 }
460
461 ring->cur = 0;
462 ring->dirty = 0;
463
464 hw_if->rx_desc_init(channel);
465 }
466
467 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
468}
469
470static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
471 struct xgbe_ring_data *rdata)
472{
473 if (rdata->skb_dma) {
474 if (rdata->mapped_as_page) {
475 dma_unmap_page(pdata->dev, rdata->skb_dma,
476 rdata->skb_dma_len, DMA_TO_DEVICE);
477 } else {
478 dma_unmap_single(pdata->dev, rdata->skb_dma,
479 rdata->skb_dma_len, DMA_TO_DEVICE);
480 }
481 rdata->skb_dma = 0;
482 rdata->skb_dma_len = 0;
483 }
484
485 if (rdata->skb) {
486 dev_kfree_skb_any(rdata->skb);
487 rdata->skb = NULL;
488 }
489
490 if (rdata->rx.hdr.pa.pages)
491 put_page(rdata->rx.hdr.pa.pages);
492
493 if (rdata->rx.hdr.pa_unmap.pages) {
494 dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
495 rdata->rx.hdr.pa_unmap.pages_len,
496 DMA_FROM_DEVICE);
497 put_page(rdata->rx.hdr.pa_unmap.pages);
498 }
499
500 if (rdata->rx.buf.pa.pages)
501 put_page(rdata->rx.buf.pa.pages);
502
503 if (rdata->rx.buf.pa_unmap.pages) {
504 dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
505 rdata->rx.buf.pa_unmap.pages_len,
506 DMA_FROM_DEVICE);
507 put_page(rdata->rx.buf.pa_unmap.pages);
508 }
509
510 memset(&rdata->tx, 0, sizeof(rdata->tx));
511 memset(&rdata->rx, 0, sizeof(rdata->rx));
512
513 rdata->mapped_as_page = 0;
514
515 if (rdata->state_saved) {
516 rdata->state_saved = 0;
517 rdata->state.skb = NULL;
518 rdata->state.len = 0;
519 rdata->state.error = 0;
520 }
521}
522
523static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
524{
525 struct xgbe_prv_data *pdata = channel->pdata;
526 struct xgbe_ring *ring = channel->tx_ring;
527 struct xgbe_ring_data *rdata;
528 struct xgbe_packet_data *packet;
529 skb_frag_t *frag;
530 dma_addr_t skb_dma;
531 unsigned int start_index, cur_index;
532 unsigned int offset, tso, vlan, datalen, len;
533 unsigned int i;
534
535 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
536
537 offset = 0;
538 start_index = ring->cur;
539 cur_index = ring->cur;
540
541 packet = &ring->packet_data;
542 packet->rdesc_count = 0;
543 packet->length = 0;
544
545 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
546 TSO_ENABLE);
547 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
548 VLAN_CTAG);
549
550
551 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
552 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
553 cur_index++;
554 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
555
556 if (tso) {
557
558 skb_dma = dma_map_single(pdata->dev, skb->data,
559 packet->header_len, DMA_TO_DEVICE);
560 if (dma_mapping_error(pdata->dev, skb_dma)) {
561 netdev_alert(pdata->netdev, "dma_map_single failed\n");
562 goto err_out;
563 }
564 rdata->skb_dma = skb_dma;
565 rdata->skb_dma_len = packet->header_len;
566 netif_dbg(pdata, tx_queued, pdata->netdev,
567 "skb header: index=%u, dma=%pad, len=%u\n",
568 cur_index, &skb_dma, packet->header_len);
569
570 offset = packet->header_len;
571
572 packet->length += packet->header_len;
573
574 cur_index++;
575 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
576 }
577
578
579 for (datalen = skb_headlen(skb) - offset; datalen; ) {
580 len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
581
582 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
583 DMA_TO_DEVICE);
584 if (dma_mapping_error(pdata->dev, skb_dma)) {
585 netdev_alert(pdata->netdev, "dma_map_single failed\n");
586 goto err_out;
587 }
588 rdata->skb_dma = skb_dma;
589 rdata->skb_dma_len = len;
590 netif_dbg(pdata, tx_queued, pdata->netdev,
591 "skb data: index=%u, dma=%pad, len=%u\n",
592 cur_index, &skb_dma, len);
593
594 datalen -= len;
595 offset += len;
596
597 packet->length += len;
598
599 cur_index++;
600 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
601 }
602
603 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
604 netif_dbg(pdata, tx_queued, pdata->netdev,
605 "mapping frag %u\n", i);
606
607 frag = &skb_shinfo(skb)->frags[i];
608 offset = 0;
609
610 for (datalen = skb_frag_size(frag); datalen; ) {
611 len = min_t(unsigned int, datalen,
612 XGBE_TX_MAX_BUF_SIZE);
613
614 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
615 len, DMA_TO_DEVICE);
616 if (dma_mapping_error(pdata->dev, skb_dma)) {
617 netdev_alert(pdata->netdev,
618 "skb_frag_dma_map failed\n");
619 goto err_out;
620 }
621 rdata->skb_dma = skb_dma;
622 rdata->skb_dma_len = len;
623 rdata->mapped_as_page = 1;
624 netif_dbg(pdata, tx_queued, pdata->netdev,
625 "skb frag: index=%u, dma=%pad, len=%u\n",
626 cur_index, &skb_dma, len);
627
628 datalen -= len;
629 offset += len;
630
631 packet->length += len;
632
633 cur_index++;
634 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
635 }
636 }
637
638
639
640
641
642 rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
643 rdata->skb = skb;
644
645
646 packet->rdesc_count = cur_index - start_index;
647
648 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
649
650 return packet->rdesc_count;
651
652err_out:
653 while (start_index < cur_index) {
654 rdata = XGBE_GET_DESC_DATA(ring, start_index++);
655 xgbe_unmap_rdata(pdata, rdata);
656 }
657
658 DBGPR("<--xgbe_map_tx_skb: count=0\n");
659
660 return 0;
661}
662
663void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
664{
665 DBGPR("-->xgbe_init_function_ptrs_desc\n");
666
667 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
668 desc_if->free_ring_resources = xgbe_free_ring_resources;
669 desc_if->map_tx_skb = xgbe_map_tx_skb;
670 desc_if->map_rx_buffer = xgbe_map_rx_buffer;
671 desc_if->unmap_rdata = xgbe_unmap_rdata;
672 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
673 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
674
675 DBGPR("<--xgbe_init_function_ptrs_desc\n");
676}
677