1
2
3
4
5
6
7
8#include <linux/skbuff.h>
9
10#include "mlxbf_gige.h"
11#include "mlxbf_gige_regs.h"
12
13
14
15
16
17int mlxbf_gige_tx_init(struct mlxbf_gige *priv)
18{
19 size_t size;
20
21 size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
22 priv->tx_wqe_base = dma_alloc_coherent(priv->dev, size,
23 &priv->tx_wqe_base_dma,
24 GFP_KERNEL);
25 if (!priv->tx_wqe_base)
26 return -ENOMEM;
27
28 priv->tx_wqe_next = priv->tx_wqe_base;
29
30
31 writeq(priv->tx_wqe_base_dma, priv->base + MLXBF_GIGE_TX_WQ_BASE);
32
33
34 priv->tx_cc = dma_alloc_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
35 &priv->tx_cc_dma, GFP_KERNEL);
36 if (!priv->tx_cc) {
37 dma_free_coherent(priv->dev, size,
38 priv->tx_wqe_base, priv->tx_wqe_base_dma);
39 return -ENOMEM;
40 }
41
42
43 writeq(priv->tx_cc_dma, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
44
45 writeq(ilog2(priv->tx_q_entries),
46 priv->base + MLXBF_GIGE_TX_WQ_SIZE_LOG2);
47
48 priv->prev_tx_ci = 0;
49 priv->tx_pi = 0;
50
51 return 0;
52}
53
54
55
56
57
58void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv)
59{
60 u64 *tx_wqe_addr;
61 size_t size;
62 int i;
63
64 tx_wqe_addr = priv->tx_wqe_base;
65
66 for (i = 0; i < priv->tx_q_entries; i++) {
67 if (priv->tx_skb[i]) {
68 dma_unmap_single(priv->dev, *tx_wqe_addr,
69 priv->tx_skb[i]->len, DMA_TO_DEVICE);
70 dev_kfree_skb(priv->tx_skb[i]);
71 priv->tx_skb[i] = NULL;
72 }
73 tx_wqe_addr += 2;
74 }
75
76 size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
77 dma_free_coherent(priv->dev, size,
78 priv->tx_wqe_base, priv->tx_wqe_base_dma);
79
80 dma_free_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
81 priv->tx_cc, priv->tx_cc_dma);
82
83 priv->tx_wqe_base = NULL;
84 priv->tx_wqe_base_dma = 0;
85 priv->tx_cc = NULL;
86 priv->tx_cc_dma = 0;
87 priv->tx_wqe_next = NULL;
88 writeq(0, priv->base + MLXBF_GIGE_TX_WQ_BASE);
89 writeq(0, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
90}
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108static u16 mlxbf_gige_tx_buffs_avail(struct mlxbf_gige *priv)
109{
110 unsigned long flags;
111 u16 avail;
112
113 spin_lock_irqsave(&priv->lock, flags);
114
115 if (priv->prev_tx_ci == priv->tx_pi)
116 avail = priv->tx_q_entries - 1;
117 else
118 avail = ((priv->tx_q_entries + priv->prev_tx_ci - priv->tx_pi)
119 % priv->tx_q_entries) - 1;
120
121 spin_unlock_irqrestore(&priv->lock, flags);
122
123 return avail;
124}
125
126bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv)
127{
128 struct net_device_stats *stats;
129 u16 tx_wqe_index;
130 u64 *tx_wqe_addr;
131 u64 tx_status;
132 u16 tx_ci;
133
134 tx_status = readq(priv->base + MLXBF_GIGE_TX_STATUS);
135 if (tx_status & MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL)
136 priv->stats.tx_fifo_full++;
137 tx_ci = readq(priv->base + MLXBF_GIGE_TX_CONSUMER_INDEX);
138 stats = &priv->netdev->stats;
139
140
141
142
143
144
145
146 for (; priv->prev_tx_ci != tx_ci; priv->prev_tx_ci++) {
147 tx_wqe_index = priv->prev_tx_ci % priv->tx_q_entries;
148
149
150
151
152 tx_wqe_addr = priv->tx_wqe_base +
153 (tx_wqe_index * MLXBF_GIGE_TX_WQE_SZ_QWORDS);
154
155 stats->tx_packets++;
156 stats->tx_bytes += MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr);
157
158 dma_unmap_single(priv->dev, *tx_wqe_addr,
159 priv->tx_skb[tx_wqe_index]->len, DMA_TO_DEVICE);
160 dev_consume_skb_any(priv->tx_skb[tx_wqe_index]);
161 priv->tx_skb[tx_wqe_index] = NULL;
162
163
164 mb();
165 }
166
167
168
169
170
171 if (netif_queue_stopped(priv->netdev) &&
172 mlxbf_gige_tx_buffs_avail(priv))
173 netif_wake_queue(priv->netdev);
174
175 return true;
176}
177
178
179void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv)
180{
181
182 priv->tx_wqe_next += MLXBF_GIGE_TX_WQE_SZ_QWORDS;
183
184
185
186 if (priv->tx_wqe_next == (priv->tx_wqe_base +
187 (priv->tx_q_entries * MLXBF_GIGE_TX_WQE_SZ_QWORDS)))
188 priv->tx_wqe_next = priv->tx_wqe_base;
189}
190
191netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
192 struct net_device *netdev)
193{
194 struct mlxbf_gige *priv = netdev_priv(netdev);
195 long buff_addr, start_dma_page, end_dma_page;
196 struct sk_buff *tx_skb;
197 dma_addr_t tx_buf_dma;
198 unsigned long flags;
199 u64 *tx_wqe_addr;
200 u64 word2;
201
202
203 if (skb->len > MLXBF_GIGE_DEFAULT_BUF_SZ || skb_linearize(skb)) {
204 dev_kfree_skb(skb);
205 netdev->stats.tx_dropped++;
206 return NETDEV_TX_OK;
207 }
208
209 buff_addr = (long)skb->data;
210 start_dma_page = buff_addr >> MLXBF_GIGE_DMA_PAGE_SHIFT;
211 end_dma_page = (buff_addr + skb->len - 1) >> MLXBF_GIGE_DMA_PAGE_SHIFT;
212
213
214
215
216 if (start_dma_page != end_dma_page) {
217
218 tx_skb = mlxbf_gige_alloc_skb(priv, skb->len,
219 &tx_buf_dma, DMA_TO_DEVICE);
220 if (!tx_skb) {
221
222 dev_kfree_skb(skb);
223 netdev->stats.tx_dropped++;
224 return NETDEV_TX_OK;
225 }
226
227 skb_put_data(tx_skb, skb->data, skb->len);
228
229
230 dev_kfree_skb(skb);
231 } else {
232 tx_skb = skb;
233 tx_buf_dma = dma_map_single(priv->dev, skb->data,
234 skb->len, DMA_TO_DEVICE);
235 if (dma_mapping_error(priv->dev, tx_buf_dma)) {
236 dev_kfree_skb(skb);
237 netdev->stats.tx_dropped++;
238 return NETDEV_TX_OK;
239 }
240 }
241
242
243 tx_wqe_addr = priv->tx_wqe_next;
244
245 mlxbf_gige_update_tx_wqe_next(priv);
246
247
248 *tx_wqe_addr = tx_buf_dma;
249
250
251
252
253
254 word2 = tx_skb->len & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK;
255
256
257 *(tx_wqe_addr + 1) = word2;
258
259 spin_lock_irqsave(&priv->lock, flags);
260 priv->tx_skb[priv->tx_pi % priv->tx_q_entries] = tx_skb;
261 priv->tx_pi++;
262 spin_unlock_irqrestore(&priv->lock, flags);
263
264 if (!netdev_xmit_more()) {
265
266 wmb();
267 writeq(priv->tx_pi, priv->base + MLXBF_GIGE_TX_PRODUCER_INDEX);
268 }
269
270
271 if (!mlxbf_gige_tx_buffs_avail(priv)) {
272
273 netif_stop_queue(netdev);
274
275
276
277
278
279
280 napi_schedule(&priv->napi);
281 }
282
283 return NETDEV_TX_OK;
284}
285