1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include "xgbe.h"
118#include "xgbe-common.h"
119
120static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
121
122static void xgbe_free_ring(struct xgbe_prv_data *pdata,
123 struct xgbe_ring *ring)
124{
125 struct xgbe_ring_data *rdata;
126 unsigned int i;
127
128 if (!ring)
129 return;
130
131 if (ring->rdata) {
132 for (i = 0; i < ring->rdesc_count; i++) {
133 rdata = XGBE_GET_DESC_DATA(ring, i);
134 xgbe_unmap_rdata(pdata, rdata);
135 }
136
137 kfree(ring->rdata);
138 ring->rdata = NULL;
139 }
140
141 if (ring->rx_hdr_pa.pages) {
142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
144 put_page(ring->rx_hdr_pa.pages);
145
146 ring->rx_hdr_pa.pages = NULL;
147 ring->rx_hdr_pa.pages_len = 0;
148 ring->rx_hdr_pa.pages_offset = 0;
149 ring->rx_hdr_pa.pages_dma = 0;
150 }
151
152 if (ring->rx_buf_pa.pages) {
153 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
154 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
155 put_page(ring->rx_buf_pa.pages);
156
157 ring->rx_buf_pa.pages = NULL;
158 ring->rx_buf_pa.pages_len = 0;
159 ring->rx_buf_pa.pages_offset = 0;
160 ring->rx_buf_pa.pages_dma = 0;
161 }
162
163 if (ring->rdesc) {
164 dma_free_coherent(pdata->dev,
165 (sizeof(struct xgbe_ring_desc) *
166 ring->rdesc_count),
167 ring->rdesc, ring->rdesc_dma);
168 ring->rdesc = NULL;
169 }
170}
171
172static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
173{
174 struct xgbe_channel *channel;
175 unsigned int i;
176
177 DBGPR("-->xgbe_free_ring_resources\n");
178
179 channel = pdata->channel;
180 for (i = 0; i < pdata->channel_count; i++, channel++) {
181 xgbe_free_ring(pdata, channel->tx_ring);
182 xgbe_free_ring(pdata, channel->rx_ring);
183 }
184
185 DBGPR("<--xgbe_free_ring_resources\n");
186}
187
188static int xgbe_init_ring(struct xgbe_prv_data *pdata,
189 struct xgbe_ring *ring, unsigned int rdesc_count)
190{
191 DBGPR("-->xgbe_init_ring\n");
192
193 if (!ring)
194 return 0;
195
196
197 ring->rdesc_count = rdesc_count;
198 ring->rdesc = dma_alloc_coherent(pdata->dev,
199 (sizeof(struct xgbe_ring_desc) *
200 rdesc_count), &ring->rdesc_dma,
201 GFP_KERNEL);
202 if (!ring->rdesc)
203 return -ENOMEM;
204
205
206 ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
207 GFP_KERNEL);
208 if (!ring->rdata)
209 return -ENOMEM;
210
211 DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
212 ring->rdesc, ring->rdesc_dma, ring->rdata);
213
214 DBGPR("<--xgbe_init_ring\n");
215
216 return 0;
217}
218
219static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
220{
221 struct xgbe_channel *channel;
222 unsigned int i;
223 int ret;
224
225 DBGPR("-->xgbe_alloc_ring_resources\n");
226
227 channel = pdata->channel;
228 for (i = 0; i < pdata->channel_count; i++, channel++) {
229 DBGPR(" %s - tx_ring:\n", channel->name);
230 ret = xgbe_init_ring(pdata, channel->tx_ring,
231 pdata->tx_desc_count);
232 if (ret) {
233 netdev_alert(pdata->netdev,
234 "error initializing Tx ring\n");
235 goto err_ring;
236 }
237
238 DBGPR(" %s - rx_ring:\n", channel->name);
239 ret = xgbe_init_ring(pdata, channel->rx_ring,
240 pdata->rx_desc_count);
241 if (ret) {
242 netdev_alert(pdata->netdev,
243 "error initializing Tx ring\n");
244 goto err_ring;
245 }
246 }
247
248 DBGPR("<--xgbe_alloc_ring_resources\n");
249
250 return 0;
251
252err_ring:
253 xgbe_free_ring_resources(pdata);
254
255 return ret;
256}
257
258static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
259 struct xgbe_page_alloc *pa, gfp_t gfp, int order)
260{
261 struct page *pages = NULL;
262 dma_addr_t pages_dma;
263 int ret;
264
265
266 gfp |= __GFP_COLD | __GFP_COMP;
267 while (order >= 0) {
268 pages = alloc_pages(gfp, order);
269 if (pages)
270 break;
271
272 order--;
273 }
274 if (!pages)
275 return -ENOMEM;
276
277
278 pages_dma = dma_map_page(pdata->dev, pages, 0,
279 PAGE_SIZE << order, DMA_FROM_DEVICE);
280 ret = dma_mapping_error(pdata->dev, pages_dma);
281 if (ret) {
282 put_page(pages);
283 return ret;
284 }
285
286 pa->pages = pages;
287 pa->pages_len = PAGE_SIZE << order;
288 pa->pages_offset = 0;
289 pa->pages_dma = pages_dma;
290
291 return 0;
292}
293
294static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
295 struct xgbe_page_alloc *pa,
296 unsigned int len)
297{
298 get_page(pa->pages);
299 bd->pa = *pa;
300
301 bd->dma = pa->pages_dma + pa->pages_offset;
302 bd->dma_len = len;
303
304 pa->pages_offset += len;
305 if ((pa->pages_offset + len) > pa->pages_len) {
306
307 bd->pa_unmap = *pa;
308
309
310 pa->pages = NULL;
311 pa->pages_len = 0;
312 pa->pages_offset = 0;
313 pa->pages_dma = 0;
314 }
315}
316
317static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
318 struct xgbe_ring *ring,
319 struct xgbe_ring_data *rdata)
320{
321 int order, ret;
322
323 if (!ring->rx_hdr_pa.pages) {
324 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
325 if (ret)
326 return ret;
327 }
328
329 if (!ring->rx_buf_pa.pages) {
330 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
331 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
332 order);
333 if (ret)
334 return ret;
335 }
336
337
338 xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
339 XGBE_SKB_ALLOC_SIZE);
340
341
342 xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
343 pdata->rx_buf_size);
344
345 return 0;
346}
347
348static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
349{
350 struct xgbe_hw_if *hw_if = &pdata->hw_if;
351 struct xgbe_channel *channel;
352 struct xgbe_ring *ring;
353 struct xgbe_ring_data *rdata;
354 struct xgbe_ring_desc *rdesc;
355 dma_addr_t rdesc_dma;
356 unsigned int i, j;
357
358 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
359
360 channel = pdata->channel;
361 for (i = 0; i < pdata->channel_count; i++, channel++) {
362 ring = channel->tx_ring;
363 if (!ring)
364 break;
365
366 rdesc = ring->rdesc;
367 rdesc_dma = ring->rdesc_dma;
368
369 for (j = 0; j < ring->rdesc_count; j++) {
370 rdata = XGBE_GET_DESC_DATA(ring, j);
371
372 rdata->rdesc = rdesc;
373 rdata->rdesc_dma = rdesc_dma;
374
375 rdesc++;
376 rdesc_dma += sizeof(struct xgbe_ring_desc);
377 }
378
379 ring->cur = 0;
380 ring->dirty = 0;
381 memset(&ring->tx, 0, sizeof(ring->tx));
382
383 hw_if->tx_desc_init(channel);
384 }
385
386 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
387}
388
389static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
390{
391 struct xgbe_hw_if *hw_if = &pdata->hw_if;
392 struct xgbe_channel *channel;
393 struct xgbe_ring *ring;
394 struct xgbe_ring_desc *rdesc;
395 struct xgbe_ring_data *rdata;
396 dma_addr_t rdesc_dma;
397 unsigned int i, j;
398
399 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
400
401 channel = pdata->channel;
402 for (i = 0; i < pdata->channel_count; i++, channel++) {
403 ring = channel->rx_ring;
404 if (!ring)
405 break;
406
407 rdesc = ring->rdesc;
408 rdesc_dma = ring->rdesc_dma;
409
410 for (j = 0; j < ring->rdesc_count; j++) {
411 rdata = XGBE_GET_DESC_DATA(ring, j);
412
413 rdata->rdesc = rdesc;
414 rdata->rdesc_dma = rdesc_dma;
415
416 if (xgbe_map_rx_buffer(pdata, ring, rdata))
417 break;
418
419 rdesc++;
420 rdesc_dma += sizeof(struct xgbe_ring_desc);
421 }
422
423 ring->cur = 0;
424 ring->dirty = 0;
425 memset(&ring->rx, 0, sizeof(ring->rx));
426
427 hw_if->rx_desc_init(channel);
428 }
429
430 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
431}
432
433static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
434 struct xgbe_ring_data *rdata)
435{
436 if (rdata->skb_dma) {
437 if (rdata->mapped_as_page) {
438 dma_unmap_page(pdata->dev, rdata->skb_dma,
439 rdata->skb_dma_len, DMA_TO_DEVICE);
440 } else {
441 dma_unmap_single(pdata->dev, rdata->skb_dma,
442 rdata->skb_dma_len, DMA_TO_DEVICE);
443 }
444 rdata->skb_dma = 0;
445 rdata->skb_dma_len = 0;
446 }
447
448 if (rdata->skb) {
449 dev_kfree_skb_any(rdata->skb);
450 rdata->skb = NULL;
451 }
452
453 if (rdata->rx.hdr.pa.pages)
454 put_page(rdata->rx.hdr.pa.pages);
455
456 if (rdata->rx.hdr.pa_unmap.pages) {
457 dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
458 rdata->rx.hdr.pa_unmap.pages_len,
459 DMA_FROM_DEVICE);
460 put_page(rdata->rx.hdr.pa_unmap.pages);
461 }
462
463 if (rdata->rx.buf.pa.pages)
464 put_page(rdata->rx.buf.pa.pages);
465
466 if (rdata->rx.buf.pa_unmap.pages) {
467 dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
468 rdata->rx.buf.pa_unmap.pages_len,
469 DMA_FROM_DEVICE);
470 put_page(rdata->rx.buf.pa_unmap.pages);
471 }
472
473 memset(&rdata->tx, 0, sizeof(rdata->tx));
474 memset(&rdata->rx, 0, sizeof(rdata->rx));
475
476 rdata->mapped_as_page = 0;
477
478 if (rdata->state_saved) {
479 rdata->state_saved = 0;
480 rdata->state.incomplete = 0;
481 rdata->state.context_next = 0;
482 rdata->state.skb = NULL;
483 rdata->state.len = 0;
484 rdata->state.error = 0;
485 }
486}
487
488static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
489{
490 struct xgbe_prv_data *pdata = channel->pdata;
491 struct xgbe_ring *ring = channel->tx_ring;
492 struct xgbe_ring_data *rdata;
493 struct xgbe_packet_data *packet;
494 struct skb_frag_struct *frag;
495 dma_addr_t skb_dma;
496 unsigned int start_index, cur_index;
497 unsigned int offset, tso, vlan, datalen, len;
498 unsigned int i;
499
500 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
501
502 offset = 0;
503 start_index = ring->cur;
504 cur_index = ring->cur;
505
506 packet = &ring->packet_data;
507 packet->rdesc_count = 0;
508 packet->length = 0;
509
510 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
511 TSO_ENABLE);
512 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
513 VLAN_CTAG);
514
515
516 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
517 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
518 cur_index++;
519 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
520
521 if (tso) {
522 DBGPR(" TSO packet\n");
523
524
525 skb_dma = dma_map_single(pdata->dev, skb->data,
526 packet->header_len, DMA_TO_DEVICE);
527 if (dma_mapping_error(pdata->dev, skb_dma)) {
528 netdev_alert(pdata->netdev, "dma_map_single failed\n");
529 goto err_out;
530 }
531 rdata->skb_dma = skb_dma;
532 rdata->skb_dma_len = packet->header_len;
533
534 offset = packet->header_len;
535
536 packet->length += packet->header_len;
537
538 cur_index++;
539 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
540 }
541
542
543 for (datalen = skb_headlen(skb) - offset; datalen; ) {
544 len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
545
546 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
547 DMA_TO_DEVICE);
548 if (dma_mapping_error(pdata->dev, skb_dma)) {
549 netdev_alert(pdata->netdev, "dma_map_single failed\n");
550 goto err_out;
551 }
552 rdata->skb_dma = skb_dma;
553 rdata->skb_dma_len = len;
554 DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
555 cur_index, skb_dma, len);
556
557 datalen -= len;
558 offset += len;
559
560 packet->length += len;
561
562 cur_index++;
563 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
564 }
565
566 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
567 DBGPR(" mapping frag %u\n", i);
568
569 frag = &skb_shinfo(skb)->frags[i];
570 offset = 0;
571
572 for (datalen = skb_frag_size(frag); datalen; ) {
573 len = min_t(unsigned int, datalen,
574 XGBE_TX_MAX_BUF_SIZE);
575
576 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
577 len, DMA_TO_DEVICE);
578 if (dma_mapping_error(pdata->dev, skb_dma)) {
579 netdev_alert(pdata->netdev,
580 "skb_frag_dma_map failed\n");
581 goto err_out;
582 }
583 rdata->skb_dma = skb_dma;
584 rdata->skb_dma_len = len;
585 rdata->mapped_as_page = 1;
586 DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
587 cur_index, skb_dma, len);
588
589 datalen -= len;
590 offset += len;
591
592 packet->length += len;
593
594 cur_index++;
595 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
596 }
597 }
598
599
600
601
602
603 rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
604 rdata->skb = skb;
605
606
607 packet->rdesc_count = cur_index - start_index;
608
609 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
610
611 return packet->rdesc_count;
612
613err_out:
614 while (start_index < cur_index) {
615 rdata = XGBE_GET_DESC_DATA(ring, start_index++);
616 xgbe_unmap_rdata(pdata, rdata);
617 }
618
619 DBGPR("<--xgbe_map_tx_skb: count=0\n");
620
621 return 0;
622}
623
624static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
625{
626 struct xgbe_prv_data *pdata = channel->pdata;
627 struct xgbe_hw_if *hw_if = &pdata->hw_if;
628 struct xgbe_ring *ring = channel->rx_ring;
629 struct xgbe_ring_data *rdata;
630 int i;
631
632 DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
633 ring->rx.realloc_index);
634
635 for (i = 0; i < ring->dirty; i++) {
636 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
637
638
639 xgbe_unmap_rdata(pdata, rdata);
640
641 if (xgbe_map_rx_buffer(pdata, ring, rdata))
642 break;
643
644 hw_if->rx_desc_reset(rdata);
645
646 ring->rx.realloc_index++;
647 }
648 ring->dirty = 0;
649
650 DBGPR("<--xgbe_realloc_rx_buffer\n");
651}
652
653void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
654{
655 DBGPR("-->xgbe_init_function_ptrs_desc\n");
656
657 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
658 desc_if->free_ring_resources = xgbe_free_ring_resources;
659 desc_if->map_tx_skb = xgbe_map_tx_skb;
660 desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
661 desc_if->unmap_rdata = xgbe_unmap_rdata;
662 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
663 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
664
665 DBGPR("<--xgbe_init_function_ptrs_desc\n");
666}
667