1
2
3
4
5
6
7
8
9
10
11#include <linux/stmmac.h>
12#include "common.h"
13#include "descs_com.h"
14
15static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
16 struct dma_desc *p, void __iomem *ioaddr)
17{
18 struct net_device_stats *stats = (struct net_device_stats *)data;
19 unsigned int tdes0 = le32_to_cpu(p->des0);
20 unsigned int tdes1 = le32_to_cpu(p->des1);
21 int ret = tx_done;
22
23
24 if (unlikely(tdes0 & TDES0_OWN))
25 return tx_dma_own;
26
27
28 if (likely(!(tdes1 & TDES1_LAST_SEGMENT)))
29 return tx_not_ls;
30
31 if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
32 if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
33 x->tx_underflow++;
34 stats->tx_fifo_errors++;
35 }
36 if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
37 x->tx_carrier++;
38 stats->tx_carrier_errors++;
39 }
40 if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
41 x->tx_losscarrier++;
42 stats->tx_carrier_errors++;
43 }
44 if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
45 (tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
46 (tdes0 & TDES0_LATE_COLLISION))) {
47 unsigned int collisions;
48
49 collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
50 stats->collisions += collisions;
51 }
52 ret = tx_err;
53 }
54
55 if (tdes0 & TDES0_VLAN_FRAME)
56 x->tx_vlan++;
57
58 if (unlikely(tdes0 & TDES0_DEFERRED))
59 x->tx_deferred++;
60
61 return ret;
62}
63
64static int ndesc_get_tx_len(struct dma_desc *p)
65{
66 return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
67}
68
69
70
71
72
73static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
74 struct dma_desc *p)
75{
76 int ret = good_frame;
77 unsigned int rdes0 = le32_to_cpu(p->des0);
78 struct net_device_stats *stats = (struct net_device_stats *)data;
79
80 if (unlikely(rdes0 & RDES0_OWN))
81 return dma_own;
82
83 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
84 stats->rx_length_errors++;
85 return discard_frame;
86 }
87
88 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
89 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR))
90 x->rx_desc++;
91 if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL))
92 x->sa_filter_fail++;
93 if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
94 x->overflow_error++;
95 if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
96 x->ipc_csum_error++;
97 if (unlikely(rdes0 & RDES0_COLLISION)) {
98 x->rx_collision++;
99 stats->collisions++;
100 }
101 if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
102 x->rx_crc_errors++;
103 stats->rx_crc_errors++;
104 }
105 ret = discard_frame;
106 }
107 if (unlikely(rdes0 & RDES0_DRIBBLING))
108 x->dribbling_bit++;
109
110 if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
111 x->rx_length++;
112 ret = discard_frame;
113 }
114 if (unlikely(rdes0 & RDES0_MII_ERROR)) {
115 x->rx_mii++;
116 ret = discard_frame;
117 }
118#ifdef STMMAC_VLAN_TAG_USED
119 if (rdes0 & RDES0_VLAN_TAG)
120 x->vlan_tag++;
121#endif
122 return ret;
123}
124
125static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
126 int end, int bfsize)
127{
128 int bfsize1;
129
130 p->des0 |= cpu_to_le32(RDES0_OWN);
131
132 bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
133 p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
134
135 if (mode == STMMAC_CHAIN_MODE)
136 ndesc_rx_set_on_chain(p, end);
137 else
138 ndesc_rx_set_on_ring(p, end, bfsize);
139
140 if (disable_rx_ic)
141 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
142}
143
144static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
145{
146 p->des0 &= cpu_to_le32(~TDES0_OWN);
147 if (mode == STMMAC_CHAIN_MODE)
148 ndesc_tx_set_on_chain(p);
149 else
150 ndesc_end_tx_desc_on_ring(p, end);
151}
152
153static int ndesc_get_tx_owner(struct dma_desc *p)
154{
155 return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
156}
157
158static void ndesc_set_tx_owner(struct dma_desc *p)
159{
160 p->des0 |= cpu_to_le32(TDES0_OWN);
161}
162
163static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
164{
165 p->des0 |= cpu_to_le32(RDES0_OWN);
166}
167
168static int ndesc_get_tx_ls(struct dma_desc *p)
169{
170 return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
171}
172
173static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
174{
175 int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
176
177 memset(p, 0, offsetof(struct dma_desc, des2));
178 if (mode == STMMAC_CHAIN_MODE)
179 ndesc_tx_set_on_chain(p);
180 else
181 ndesc_end_tx_desc_on_ring(p, ter);
182}
183
184static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
185 bool csum_flag, int mode, bool tx_own,
186 bool ls, unsigned int tot_pkt_len)
187{
188 unsigned int tdes1 = le32_to_cpu(p->des1);
189
190 if (is_fs)
191 tdes1 |= TDES1_FIRST_SEGMENT;
192 else
193 tdes1 &= ~TDES1_FIRST_SEGMENT;
194
195 if (likely(csum_flag))
196 tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
197 else
198 tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
199
200 if (ls)
201 tdes1 |= TDES1_LAST_SEGMENT;
202
203 p->des1 = cpu_to_le32(tdes1);
204
205 if (mode == STMMAC_CHAIN_MODE)
206 norm_set_tx_desc_len_on_chain(p, len);
207 else
208 norm_set_tx_desc_len_on_ring(p, len);
209
210 if (tx_own)
211 p->des0 |= cpu_to_le32(TDES0_OWN);
212}
213
214static void ndesc_set_tx_ic(struct dma_desc *p)
215{
216 p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
217}
218
219static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
220{
221 unsigned int csum = 0;
222
223
224
225
226
227
228
229 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
230 csum = 2;
231
232 return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
233 >> RDES0_FRAME_LEN_SHIFT) -
234 csum);
235
236}
237
238static void ndesc_enable_tx_timestamp(struct dma_desc *p)
239{
240 p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
241}
242
243static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
244{
245 return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
246}
247
248static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts)
249{
250 struct dma_desc *p = (struct dma_desc *)desc;
251 u64 ns;
252
253 ns = le32_to_cpu(p->des2);
254
255 ns += le32_to_cpu(p->des3) * 1000000000ULL;
256
257 *ts = ns;
258}
259
260static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
261{
262 struct dma_desc *p = (struct dma_desc *)desc;
263
264 if ((le32_to_cpu(p->des2) == 0xffffffff) &&
265 (le32_to_cpu(p->des3) == 0xffffffff))
266
267 return 0;
268 else
269 return 1;
270}
271
272static void ndesc_display_ring(void *head, unsigned int size, bool rx,
273 dma_addr_t dma_rx_phy, unsigned int desc_size)
274{
275 struct dma_desc *p = (struct dma_desc *)head;
276 dma_addr_t dma_addr;
277 int i;
278
279 pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
280
281 for (i = 0; i < size; i++) {
282 u64 x;
283 dma_addr = dma_rx_phy + i * sizeof(*p);
284
285 x = *(u64 *)p;
286 pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x",
287 i, &dma_addr,
288 (unsigned int)x, (unsigned int)(x >> 32),
289 p->des2, p->des3);
290 p++;
291 }
292 pr_info("\n");
293}
294
295static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr)
296{
297 *addr = le32_to_cpu(p->des2);
298}
299
300static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
301{
302 p->des2 = cpu_to_le32(addr);
303}
304
305static void ndesc_clear(struct dma_desc *p)
306{
307 p->des2 = 0;
308}
309
310const struct stmmac_desc_ops ndesc_ops = {
311 .tx_status = ndesc_get_tx_status,
312 .rx_status = ndesc_get_rx_status,
313 .get_tx_len = ndesc_get_tx_len,
314 .init_rx_desc = ndesc_init_rx_desc,
315 .init_tx_desc = ndesc_init_tx_desc,
316 .get_tx_owner = ndesc_get_tx_owner,
317 .release_tx_desc = ndesc_release_tx_desc,
318 .prepare_tx_desc = ndesc_prepare_tx_desc,
319 .set_tx_ic = ndesc_set_tx_ic,
320 .get_tx_ls = ndesc_get_tx_ls,
321 .set_tx_owner = ndesc_set_tx_owner,
322 .set_rx_owner = ndesc_set_rx_owner,
323 .get_rx_frame_len = ndesc_get_rx_frame_len,
324 .enable_tx_timestamp = ndesc_enable_tx_timestamp,
325 .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
326 .get_timestamp = ndesc_get_timestamp,
327 .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
328 .display_ring = ndesc_display_ring,
329 .get_addr = ndesc_get_addr,
330 .set_addr = ndesc_set_addr,
331 .clear = ndesc_clear,
332};
333