1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "stmmac.h"
26
27static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
28{
29 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
30 unsigned int nopaged_len = skb_headlen(skb);
31 struct stmmac_priv *priv = tx_q->priv_data;
32 unsigned int entry = tx_q->cur_tx;
33 unsigned int bmax, len, des2;
34 struct dma_desc *desc;
35
36 if (priv->extend_desc)
37 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
38 else
39 desc = tx_q->dma_tx + entry;
40
41 if (priv->plat->enh_desc)
42 bmax = BUF_SIZE_8KiB;
43 else
44 bmax = BUF_SIZE_2KiB;
45
46 len = nopaged_len - bmax;
47
48 if (nopaged_len > BUF_SIZE_8KiB) {
49
50 des2 = dma_map_single(priv->device, skb->data, bmax,
51 DMA_TO_DEVICE);
52 desc->des2 = cpu_to_le32(des2);
53 if (dma_mapping_error(priv->device, des2))
54 return -1;
55
56 tx_q->tx_skbuff_dma[entry].buf = des2;
57 tx_q->tx_skbuff_dma[entry].len = bmax;
58 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
59
60 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
61 stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
62 STMMAC_RING_MODE, 0, false, skb->len);
63 tx_q->tx_skbuff[entry] = NULL;
64 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
65
66 if (priv->extend_desc)
67 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
68 else
69 desc = tx_q->dma_tx + entry;
70
71 des2 = dma_map_single(priv->device, skb->data + bmax, len,
72 DMA_TO_DEVICE);
73 desc->des2 = cpu_to_le32(des2);
74 if (dma_mapping_error(priv->device, des2))
75 return -1;
76 tx_q->tx_skbuff_dma[entry].buf = des2;
77 tx_q->tx_skbuff_dma[entry].len = len;
78 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
79
80 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
81 stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
82 STMMAC_RING_MODE, 1, true, skb->len);
83 } else {
84 des2 = dma_map_single(priv->device, skb->data,
85 nopaged_len, DMA_TO_DEVICE);
86 desc->des2 = cpu_to_le32(des2);
87 if (dma_mapping_error(priv->device, des2))
88 return -1;
89 tx_q->tx_skbuff_dma[entry].buf = des2;
90 tx_q->tx_skbuff_dma[entry].len = nopaged_len;
91 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
92 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
93 stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
94 STMMAC_RING_MODE, 0, true, skb->len);
95 }
96
97 tx_q->cur_tx = entry;
98
99 return entry;
100}
101
102static unsigned int is_jumbo_frm(int len, int enh_desc)
103{
104 unsigned int ret = 0;
105
106 if (len >= BUF_SIZE_4KiB)
107 ret = 1;
108
109 return ret;
110}
111
112static void refill_desc3(void *priv_ptr, struct dma_desc *p)
113{
114 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
115
116
117 if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
118 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
119}
120
121
122static void init_desc3(struct dma_desc *p)
123{
124 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
125}
126
127static void clean_desc3(void *priv_ptr, struct dma_desc *p)
128{
129 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
130 struct stmmac_priv *priv = tx_q->priv_data;
131 unsigned int entry = tx_q->dirty_tx;
132
133
134 if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
135 (tx_q->tx_skbuff_dma[entry].last_segment &&
136 !priv->extend_desc && priv->hwts_tx_en)))
137 p->des3 = 0;
138}
139
140static int set_16kib_bfsize(int mtu)
141{
142 int ret = 0;
143 if (unlikely(mtu >= BUF_SIZE_8KiB))
144 ret = BUF_SIZE_16KiB;
145 return ret;
146}
147
148const struct stmmac_mode_ops ring_mode_ops = {
149 .is_jumbo_frm = is_jumbo_frm,
150 .jumbo_frm = jumbo_frm,
151 .refill_desc3 = refill_desc3,
152 .init_desc3 = init_desc3,
153 .clean_desc3 = clean_desc3,
154 .set_16kib_bfsize = set_16kib_bfsize,
155};
156