1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include "sdma.h"
53#include "vnic.h"
54
55#define HFI1_VNIC_SDMA_Q_ACTIVE BIT(0)
56#define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1)
57
58#define HFI1_VNIC_TXREQ_NAME_LEN 32
59#define HFI1_VNIC_SDMA_DESC_WTRMRK 64
60#define HFI1_VNIC_SDMA_RETRY_COUNT 1
61
62
63
64
65
66
67
68
69
70
71
72struct vnic_txreq {
73 struct sdma_txreq txreq;
74 struct hfi1_vnic_sdma *sdma;
75
76 struct sk_buff *skb;
77 unsigned char pad[HFI1_VNIC_MAX_PAD];
78 u16 plen;
79 __le64 pbc_val;
80
81 u32 retry_count;
82};
83
84static void vnic_sdma_complete(struct sdma_txreq *txreq,
85 int status)
86{
87 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
88 struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
89
90 sdma_txclean(vnic_sdma->dd, txreq);
91 dev_kfree_skb_any(tx->skb);
92 kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
93}
94
95static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
96 struct vnic_txreq *tx)
97{
98 int i, ret = 0;
99
100 ret = sdma_txadd_kvaddr(
101 sde->dd,
102 &tx->txreq,
103 tx->skb->data,
104 skb_headlen(tx->skb));
105 if (unlikely(ret))
106 goto bail_txadd;
107
108 for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
109 struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i];
110
111
112 ret = sdma_txadd_page(sde->dd,
113 &tx->txreq,
114 skb_frag_page(frag),
115 frag->page_offset,
116 skb_frag_size(frag));
117 if (unlikely(ret))
118 goto bail_txadd;
119 }
120
121 if (tx->plen)
122 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
123 tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
124 tx->plen);
125
126bail_txadd:
127 return ret;
128}
129
130static int build_vnic_tx_desc(struct sdma_engine *sde,
131 struct vnic_txreq *tx,
132 u64 pbc)
133{
134 int ret = 0;
135 u16 hdrbytes = 2 << 2;
136
137 ret = sdma_txinit_ahg(
138 &tx->txreq,
139 0,
140 hdrbytes + tx->skb->len + tx->plen,
141 0,
142 0,
143 NULL,
144 0,
145 vnic_sdma_complete);
146 if (unlikely(ret))
147 goto bail_txadd;
148
149
150 tx->pbc_val = cpu_to_le64(pbc);
151 ret = sdma_txadd_kvaddr(
152 sde->dd,
153 &tx->txreq,
154 &tx->pbc_val,
155 hdrbytes);
156 if (unlikely(ret))
157 goto bail_txadd;
158
159
160 ret = build_vnic_ulp_payload(sde, tx);
161bail_txadd:
162 return ret;
163}
164
165
166static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen)
167{
168 pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN;
169}
170
171int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
172 struct hfi1_vnic_vport_info *vinfo,
173 struct sk_buff *skb, u64 pbc, u8 plen)
174{
175 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
176 struct sdma_engine *sde = vnic_sdma->sde;
177 struct vnic_txreq *tx;
178 int ret = -ECOMM;
179
180 if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE))
181 goto tx_err;
182
183 if (unlikely(!sde || !sdma_running(sde)))
184 goto tx_err;
185
186 tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
187 if (unlikely(!tx)) {
188 ret = -ENOMEM;
189 goto tx_err;
190 }
191
192 tx->sdma = vnic_sdma;
193 tx->skb = skb;
194 hfi1_vnic_update_pad(tx->pad, plen);
195 tx->plen = plen;
196 ret = build_vnic_tx_desc(sde, tx, pbc);
197 if (unlikely(ret))
198 goto free_desc;
199 tx->retry_count = 0;
200
201 ret = sdma_send_txreq(sde, &vnic_sdma->wait, &tx->txreq);
202
203 if (unlikely(ret && unlikely(ret != -ECOMM)))
204 goto free_desc;
205
206 return ret;
207
208free_desc:
209 sdma_txclean(dd, &tx->txreq);
210 kmem_cache_free(dd->vnic.txreq_cache, tx);
211tx_err:
212 if (ret != -EBUSY)
213 dev_kfree_skb_any(skb);
214 return ret;
215}
216
217
218
219
220
221
222
223
224
225static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
226 struct iowait *wait,
227 struct sdma_txreq *txreq,
228 unsigned int seq)
229{
230 struct hfi1_vnic_sdma *vnic_sdma =
231 container_of(wait, struct hfi1_vnic_sdma, wait);
232 struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
233 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
234
235 if (sdma_progress(sde, seq, txreq))
236 if (tx->retry_count++ < HFI1_VNIC_SDMA_RETRY_COUNT)
237 return -EAGAIN;
238
239 vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
240 write_seqlock(&dev->iowait_lock);
241 if (list_empty(&vnic_sdma->wait.list))
242 list_add_tail(&vnic_sdma->wait.list, &sde->dmawait);
243 write_sequnlock(&dev->iowait_lock);
244 return -EBUSY;
245}
246
247
248
249
250
251
252
253
254static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason)
255{
256 struct hfi1_vnic_sdma *vnic_sdma =
257 container_of(wait, struct hfi1_vnic_sdma, wait);
258 struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo;
259
260 vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
261 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx))
262 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx);
263};
264
265inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
266 u8 q_idx)
267{
268 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
269
270 return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE);
271}
272
273void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
274{
275 int i;
276
277 for (i = 0; i < vinfo->num_tx_q; i++) {
278 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
279
280 iowait_init(&vnic_sdma->wait, 0, NULL, hfi1_vnic_sdma_sleep,
281 hfi1_vnic_sdma_wakeup, NULL);
282 vnic_sdma->sde = &vinfo->dd->per_sdma[i];
283 vnic_sdma->dd = vinfo->dd;
284 vnic_sdma->vinfo = vinfo;
285 vnic_sdma->q_idx = i;
286 vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
287
288
289 if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
290 INIT_LIST_HEAD(&vnic_sdma->stx.list);
291 vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
292 list_add_tail(&vnic_sdma->stx.list,
293 &vnic_sdma->wait.tx_head);
294 }
295 }
296}
297
298static void hfi1_vnic_txreq_kmem_cache_ctor(void *obj)
299{
300 struct vnic_txreq *tx = (struct vnic_txreq *)obj;
301
302 memset(tx, 0, sizeof(*tx));
303}
304
305int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
306{
307 char buf[HFI1_VNIC_TXREQ_NAME_LEN];
308
309 snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
310 dd->vnic.txreq_cache = kmem_cache_create(buf,
311 sizeof(struct vnic_txreq),
312 0, SLAB_HWCACHE_ALIGN,
313 hfi1_vnic_txreq_kmem_cache_ctor);
314 if (!dd->vnic.txreq_cache)
315 return -ENOMEM;
316 return 0;
317}
318
319void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
320{
321 kmem_cache_destroy(dd->vnic.txreq_cache);
322 dd->vnic.txreq_cache = NULL;
323}
324