1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#include <linux/mm.h>
48#include <linux/types.h>
49#include <linux/device.h>
50#include <linux/dmapool.h>
51#include <linux/slab.h>
52#include <linux/list.h>
53#include <linux/highmem.h>
54#include <linux/io.h>
55#include <linux/uio.h>
56#include <linux/rbtree.h>
57#include <linux/spinlock.h>
58#include <linux/delay.h>
59#include <linux/kthread.h>
60#include <linux/mmu_context.h>
61#include <linux/module.h>
62#include <linux/vmalloc.h>
63
64#include "hfi.h"
65#include "sdma.h"
66#include "user_sdma.h"
67#include "verbs.h"
68#include "common.h"
69#include "trace.h"
70#include "mmu_rb.h"
71
72static uint hfi1_sdma_comp_ring_size = 128;
73module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
74MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
75
76
77#define MAX_VECTORS_PER_REQ 8
78
79
80
81
82#define MAX_PKTS_PER_QUEUE 16
83
84#define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
85
86#define req_opcode(x) \
87 (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
88#define req_version(x) \
89 (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
90#define req_iovcnt(x) \
91 (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
92
93
94#define BTH_SEQ_MASK 0x7ffull
95
96
97
98
99
100#define KDETH_OFFSET_SHIFT 0
101#define KDETH_OFFSET_MASK 0x7fff
102#define KDETH_OM_SHIFT 15
103#define KDETH_OM_MASK 0x1
104#define KDETH_TID_SHIFT 16
105#define KDETH_TID_MASK 0x3ff
106#define KDETH_TIDCTRL_SHIFT 26
107#define KDETH_TIDCTRL_MASK 0x3
108#define KDETH_INTR_SHIFT 28
109#define KDETH_INTR_MASK 0x1
110#define KDETH_SH_SHIFT 29
111#define KDETH_SH_MASK 0x1
112#define KDETH_HCRC_UPPER_SHIFT 16
113#define KDETH_HCRC_UPPER_MASK 0xff
114#define KDETH_HCRC_LOWER_SHIFT 24
115#define KDETH_HCRC_LOWER_MASK 0xff
116
117#define AHG_KDETH_INTR_SHIFT 12
118
119#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
120#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
121
122#define KDETH_GET(val, field) \
123 (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
124#define KDETH_SET(dw, field, val) do { \
125 u32 dwval = le32_to_cpu(dw); \
126 dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
127 dwval |= (((val) & KDETH_##field##_MASK) << \
128 KDETH_##field##_SHIFT); \
129 dw = cpu_to_le32(dwval); \
130 } while (0)
131
132#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
133 do { \
134 if ((idx) < ARRAY_SIZE((arr))) \
135 (arr)[(idx++)] = sdma_build_ahg_descriptor( \
136 (__force u16)(value), (dw), (bit), \
137 (width)); \
138 else \
139 return -ERANGE; \
140 } while (0)
141
142
143#define KDETH_OM_SMALL 4
144#define KDETH_OM_LARGE 64
145#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
146
147
148#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
149
150
151#define SDMA_REQ_FOR_THREAD 1
152#define SDMA_REQ_SEND_DONE 2
153#define SDMA_REQ_HAVE_AHG 3
154#define SDMA_REQ_HAS_ERROR 4
155#define SDMA_REQ_DONE_ERROR 5
156
157#define SDMA_PKT_Q_INACTIVE BIT(0)
158#define SDMA_PKT_Q_ACTIVE BIT(1)
159#define SDMA_PKT_Q_DEFERRED BIT(2)
160
161
162
163
164
165#define MAX_DEFER_RETRY_COUNT 1
166
167static unsigned initial_pkt_count = 8;
168
169#define SDMA_IOWAIT_TIMEOUT 1000
170
171struct sdma_mmu_node;
172
173struct user_sdma_iovec {
174 struct list_head list;
175 struct iovec iov;
176
177 unsigned npages;
178
179 struct page **pages;
180
181
182
183
184 u64 offset;
185 struct sdma_mmu_node *node;
186};
187
188struct sdma_mmu_node {
189 struct mmu_rb_node rb;
190 struct hfi1_user_sdma_pkt_q *pq;
191 atomic_t refcount;
192 struct page **pages;
193 unsigned npages;
194};
195
196
197struct evict_data {
198 u32 cleared;
199 u32 target;
200};
201
202struct user_sdma_request {
203 struct sdma_req_info info;
204 struct hfi1_user_sdma_pkt_q *pq;
205 struct hfi1_user_sdma_comp_q *cq;
206
207 struct hfi1_pkt_header hdr;
208
209
210
211
212
213 struct sdma_engine *sde;
214 u8 ahg_idx;
215 u32 ahg[9];
216
217
218
219
220
221 u32 koffset;
222
223
224
225
226
227 u32 tidoffset;
228
229
230
231
232
233 u8 omfactor;
234
235
236
237
238 unsigned data_iovs;
239
240 u32 data_len;
241
242 unsigned iov_idx;
243 struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
244
245 u16 n_tids;
246
247 u32 *tids;
248 u16 tididx;
249 u32 sent;
250 u64 seqnum;
251 u64 seqcomp;
252 u64 seqsubmitted;
253 struct list_head txps;
254 unsigned long flags;
255
256 int status;
257};
258
259
260
261
262
263
264
265struct user_sdma_txreq {
266
267 struct hfi1_pkt_header hdr;
268 struct sdma_txreq txreq;
269 struct list_head list;
270 struct user_sdma_request *req;
271 u16 flags;
272 unsigned busycount;
273 u64 seqnum;
274};
275
276#define SDMA_DBG(req, fmt, ...) \
277 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
278 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
279 ##__VA_ARGS__)
280#define SDMA_Q_DBG(pq, fmt, ...) \
281 hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
282 (pq)->subctxt, ##__VA_ARGS__)
283
284static int user_sdma_send_pkts(struct user_sdma_request *, unsigned);
285static int num_user_pages(const struct iovec *);
286static void user_sdma_txreq_cb(struct sdma_txreq *, int);
287static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
288static void user_sdma_free_request(struct user_sdma_request *, bool);
289static int pin_vector_pages(struct user_sdma_request *,
290 struct user_sdma_iovec *);
291static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
292 unsigned);
293static int check_header_template(struct user_sdma_request *,
294 struct hfi1_pkt_header *, u32, u32);
295static int set_txreq_header(struct user_sdma_request *,
296 struct user_sdma_txreq *, u32);
297static int set_txreq_header_ahg(struct user_sdma_request *,
298 struct user_sdma_txreq *, u32);
299static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *,
300 struct hfi1_user_sdma_comp_q *,
301 u16, enum hfi1_sdma_comp_state, int);
302static inline u32 set_pkt_bth_psn(__be32, u8, u32);
303static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
304
305static int defer_packet_queue(
306 struct sdma_engine *,
307 struct iowait *,
308 struct sdma_txreq *,
309 unsigned seq);
310static void activate_packet_queue(struct iowait *, int);
311static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
312static int sdma_rb_insert(void *, struct mmu_rb_node *);
313static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
314 void *arg2, bool *stop);
315static void sdma_rb_remove(void *, struct mmu_rb_node *);
316static int sdma_rb_invalidate(void *, struct mmu_rb_node *);
317
318static struct mmu_rb_ops sdma_rb_ops = {
319 .filter = sdma_rb_filter,
320 .insert = sdma_rb_insert,
321 .evict = sdma_rb_evict,
322 .remove = sdma_rb_remove,
323 .invalidate = sdma_rb_invalidate
324};
325
326static int defer_packet_queue(
327 struct sdma_engine *sde,
328 struct iowait *wait,
329 struct sdma_txreq *txreq,
330 unsigned seq)
331{
332 struct hfi1_user_sdma_pkt_q *pq =
333 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
334 struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
335 struct user_sdma_txreq *tx =
336 container_of(txreq, struct user_sdma_txreq, txreq);
337
338 if (sdma_progress(sde, seq, txreq)) {
339 if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
340 goto eagain;
341 }
342
343
344
345
346
347 xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
348 write_seqlock(&dev->iowait_lock);
349 if (list_empty(&pq->busy.list))
350 list_add_tail(&pq->busy.list, &sde->dmawait);
351 write_sequnlock(&dev->iowait_lock);
352 return -EBUSY;
353eagain:
354 return -EAGAIN;
355}
356
357static void activate_packet_queue(struct iowait *wait, int reason)
358{
359 struct hfi1_user_sdma_pkt_q *pq =
360 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
361 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
362 wake_up(&wait->wait_dma);
363};
364
365static void sdma_kmem_cache_ctor(void *obj)
366{
367 struct user_sdma_txreq *tx = obj;
368
369 memset(tx, 0, sizeof(*tx));
370}
371
372int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
373{
374 struct hfi1_filedata *fd;
375 int ret = 0;
376 unsigned memsize;
377 char buf[64];
378 struct hfi1_devdata *dd;
379 struct hfi1_user_sdma_comp_q *cq;
380 struct hfi1_user_sdma_pkt_q *pq;
381 unsigned long flags;
382
383 if (!uctxt || !fp) {
384 ret = -EBADF;
385 goto done;
386 }
387
388 fd = fp->private_data;
389
390 if (!hfi1_sdma_comp_ring_size) {
391 ret = -EINVAL;
392 goto done;
393 }
394
395 dd = uctxt->dd;
396
397 pq = kzalloc(sizeof(*pq), GFP_KERNEL);
398 if (!pq)
399 goto pq_nomem;
400
401 memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size;
402 pq->reqs = kzalloc(memsize, GFP_KERNEL);
403 if (!pq->reqs)
404 goto pq_reqs_nomem;
405
406 memsize = BITS_TO_LONGS(hfi1_sdma_comp_ring_size) * sizeof(long);
407 pq->req_in_use = kzalloc(memsize, GFP_KERNEL);
408 if (!pq->req_in_use)
409 goto pq_reqs_no_in_use;
410
411 INIT_LIST_HEAD(&pq->list);
412 pq->dd = dd;
413 pq->ctxt = uctxt->ctxt;
414 pq->subctxt = fd->subctxt;
415 pq->n_max_reqs = hfi1_sdma_comp_ring_size;
416 pq->state = SDMA_PKT_Q_INACTIVE;
417 atomic_set(&pq->n_reqs, 0);
418 init_waitqueue_head(&pq->wait);
419 atomic_set(&pq->n_locked, 0);
420 pq->mm = fd->mm;
421
422 iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
423 activate_packet_queue, NULL);
424 pq->reqidx = 0;
425 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
426 fd->subctxt);
427 pq->txreq_cache = kmem_cache_create(buf,
428 sizeof(struct user_sdma_txreq),
429 L1_CACHE_BYTES,
430 SLAB_HWCACHE_ALIGN,
431 sdma_kmem_cache_ctor);
432 if (!pq->txreq_cache) {
433 dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
434 uctxt->ctxt);
435 goto pq_txreq_nomem;
436 }
437 fd->pq = pq;
438 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
439 if (!cq)
440 goto cq_nomem;
441
442 memsize = PAGE_ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size);
443 cq->comps = vmalloc_user(memsize);
444 if (!cq->comps)
445 goto cq_comps_nomem;
446
447 cq->nentries = hfi1_sdma_comp_ring_size;
448 fd->cq = cq;
449
450 ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
451 &pq->handler);
452 if (ret) {
453 dd_dev_err(dd, "Failed to register with MMU %d", ret);
454 goto done;
455 }
456
457 spin_lock_irqsave(&uctxt->sdma_qlock, flags);
458 list_add(&pq->list, &uctxt->sdma_queues);
459 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
460 goto done;
461
462cq_comps_nomem:
463 kfree(cq);
464cq_nomem:
465 kmem_cache_destroy(pq->txreq_cache);
466pq_txreq_nomem:
467 kfree(pq->req_in_use);
468pq_reqs_no_in_use:
469 kfree(pq->reqs);
470pq_reqs_nomem:
471 kfree(pq);
472 fd->pq = NULL;
473pq_nomem:
474 ret = -ENOMEM;
475done:
476 return ret;
477}
478
479int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
480{
481 struct hfi1_ctxtdata *uctxt = fd->uctxt;
482 struct hfi1_user_sdma_pkt_q *pq;
483 unsigned long flags;
484
485 hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
486 uctxt->ctxt, fd->subctxt);
487 pq = fd->pq;
488 if (pq) {
489 if (pq->handler)
490 hfi1_mmu_rb_unregister(pq->handler);
491 spin_lock_irqsave(&uctxt->sdma_qlock, flags);
492 if (!list_empty(&pq->list))
493 list_del_init(&pq->list);
494 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
495 iowait_sdma_drain(&pq->busy);
496
497 wait_event_interruptible(
498 pq->wait,
499 (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
500 kfree(pq->reqs);
501 kfree(pq->req_in_use);
502 kmem_cache_destroy(pq->txreq_cache);
503 kfree(pq);
504 fd->pq = NULL;
505 }
506 if (fd->cq) {
507 vfree(fd->cq->comps);
508 kfree(fd->cq);
509 fd->cq = NULL;
510 }
511 return 0;
512}
513
514static u8 dlid_to_selector(u16 dlid)
515{
516 static u8 mapping[256];
517 static int initialized;
518 static u8 next;
519 int hash;
520
521 if (!initialized) {
522 memset(mapping, 0xFF, 256);
523 initialized = 1;
524 }
525
526 hash = ((dlid >> 8) ^ dlid) & 0xFF;
527 if (mapping[hash] == 0xFF) {
528 mapping[hash] = next;
529 next = (next + 1) & 0x7F;
530 }
531
532 return mapping[hash];
533}
534
535int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
536 unsigned long dim, unsigned long *count)
537{
538 int ret = 0, i;
539 struct hfi1_filedata *fd = fp->private_data;
540 struct hfi1_ctxtdata *uctxt = fd->uctxt;
541 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
542 struct hfi1_user_sdma_comp_q *cq = fd->cq;
543 struct hfi1_devdata *dd = pq->dd;
544 unsigned long idx = 0;
545 u8 pcount = initial_pkt_count;
546 struct sdma_req_info info;
547 struct user_sdma_request *req;
548 u8 opcode, sc, vl;
549 int req_queued = 0;
550 u16 dlid;
551 u32 selector;
552
553 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
554 hfi1_cdbg(
555 SDMA,
556 "[%u:%u:%u] First vector not big enough for header %lu/%lu",
557 dd->unit, uctxt->ctxt, fd->subctxt,
558 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
559 return -EINVAL;
560 }
561 ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
562 if (ret) {
563 hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
564 dd->unit, uctxt->ctxt, fd->subctxt, ret);
565 return -EFAULT;
566 }
567
568 trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
569 (u16 *)&info);
570
571 if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
572 hfi1_cdbg(SDMA,
573 "[%u:%u:%u:%u] Invalid comp index",
574 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
575 return -EINVAL;
576 }
577
578
579
580
581
582 if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
583 hfi1_cdbg(SDMA,
584 "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
585 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
586 req_iovcnt(info.ctrl), dim);
587 return -EINVAL;
588 }
589
590 if (!info.fragsize) {
591 hfi1_cdbg(SDMA,
592 "[%u:%u:%u:%u] Request does not specify fragsize",
593 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
594 return -EINVAL;
595 }
596
597
598 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
599 hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
600 dd->unit, uctxt->ctxt, fd->subctxt,
601 info.comp_idx);
602 return -EBADSLT;
603 }
604
605
606
607 hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
608 uctxt->ctxt, fd->subctxt, info.comp_idx);
609 req = pq->reqs + info.comp_idx;
610 memset(req, 0, sizeof(*req));
611 req->data_iovs = req_iovcnt(info.ctrl) - 1;
612 req->pq = pq;
613 req->cq = cq;
614 req->status = -1;
615 INIT_LIST_HEAD(&req->txps);
616
617 memcpy(&req->info, &info, sizeof(info));
618
619 if (req_opcode(info.ctrl) == EXPECTED) {
620
621 if (req->data_iovs < 2) {
622 SDMA_DBG(req,
623 "Not enough vectors for expected request");
624 ret = -EINVAL;
625 goto free_req;
626 }
627 req->data_iovs--;
628 }
629
630 if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
631 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
632 MAX_VECTORS_PER_REQ);
633 ret = -EINVAL;
634 goto free_req;
635 }
636
637 ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
638 sizeof(req->hdr));
639 if (ret) {
640 SDMA_DBG(req, "Failed to copy header template (%d)", ret);
641 ret = -EFAULT;
642 goto free_req;
643 }
644
645
646 if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
647 req->hdr.pbc[2] = 0;
648
649
650 opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
651 if ((opcode & USER_OPCODE_CHECK_MASK) !=
652 USER_OPCODE_CHECK_VAL) {
653 SDMA_DBG(req, "Invalid opcode (%d)", opcode);
654 ret = -EINVAL;
655 goto free_req;
656 }
657
658
659
660
661
662 vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
663 sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
664 (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
665 if (vl >= dd->pport->vls_operational ||
666 vl != sc_to_vlt(dd, sc)) {
667 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
668 ret = -EINVAL;
669 goto free_req;
670 }
671
672
673 if (egress_pkey_check(dd->pport, req->hdr.lrh, req->hdr.bth, sc,
674 PKEY_CHECK_INVALID)) {
675 ret = -EINVAL;
676 goto free_req;
677 }
678
679
680
681
682
683
684 if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
685 SDMA_DBG(req, "User tried to pass in a GRH");
686 ret = -EINVAL;
687 goto free_req;
688 }
689
690 req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
691
692
693
694
695 req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
696 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
697 KDETH_OM_LARGE : KDETH_OM_SMALL);
698 SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
699 idx++;
700
701
702 for (i = 0; i < req->data_iovs; i++) {
703 INIT_LIST_HEAD(&req->iovs[i].list);
704 memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
705 ret = pin_vector_pages(req, &req->iovs[i]);
706 if (ret) {
707 req->status = ret;
708 goto free_req;
709 }
710 req->data_len += req->iovs[i].iov.iov_len;
711 }
712 SDMA_DBG(req, "total data length %u", req->data_len);
713
714 if (pcount > req->info.npkts)
715 pcount = req->info.npkts;
716
717
718
719
720
721
722
723
724 if (req_opcode(req->info.ctrl) == EXPECTED) {
725 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
726
727 if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
728 ret = -EINVAL;
729 goto free_req;
730 }
731 req->tids = kcalloc(ntids, sizeof(*req->tids), GFP_KERNEL);
732 if (!req->tids) {
733 ret = -ENOMEM;
734 goto free_req;
735 }
736
737
738
739
740
741
742 ret = copy_from_user(req->tids, iovec[idx].iov_base,
743 ntids * sizeof(*req->tids));
744 if (ret) {
745 SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
746 ntids, ret);
747 ret = -EFAULT;
748 goto free_req;
749 }
750 req->n_tids = ntids;
751 idx++;
752 }
753
754 dlid = be16_to_cpu(req->hdr.lrh[1]);
755 selector = dlid_to_selector(dlid);
756 selector += uctxt->ctxt + fd->subctxt;
757 req->sde = sdma_select_user_engine(dd, selector, vl);
758
759 if (!req->sde || !sdma_running(req->sde)) {
760 ret = -ECOMM;
761 goto free_req;
762 }
763
764
765 if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) {
766 int ahg = sdma_ahg_alloc(req->sde);
767
768 if (likely(ahg >= 0)) {
769 req->ahg_idx = (u8)ahg;
770 set_bit(SDMA_REQ_HAVE_AHG, &req->flags);
771 }
772 }
773
774 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
775 atomic_inc(&pq->n_reqs);
776 req_queued = 1;
777
778 ret = user_sdma_send_pkts(req, pcount);
779 if (unlikely(ret < 0 && ret != -EBUSY)) {
780 req->status = ret;
781 goto free_req;
782 }
783
784
785
786
787
788
789
790 if (atomic_read(&pq->n_reqs))
791 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
792
793
794
795
796
797
798
799 while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
800 ret = user_sdma_send_pkts(req, pcount);
801 if (ret < 0) {
802 if (ret != -EBUSY) {
803 req->status = ret;
804 set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
805 if (ACCESS_ONCE(req->seqcomp) ==
806 req->seqsubmitted - 1)
807 goto free_req;
808 return ret;
809 }
810 wait_event_interruptible_timeout(
811 pq->busy.wait_dma,
812 (pq->state == SDMA_PKT_Q_ACTIVE),
813 msecs_to_jiffies(
814 SDMA_IOWAIT_TIMEOUT));
815 }
816 }
817 *count += idx;
818 return 0;
819free_req:
820 user_sdma_free_request(req, true);
821 if (req_queued)
822 pq_update(pq);
823 set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
824 return ret;
825}
826
827static inline u32 compute_data_length(struct user_sdma_request *req,
828 struct user_sdma_txreq *tx)
829{
830
831
832
833
834
835
836
837
838
839
840
841
842 u32 len;
843
844 if (!req->seqnum) {
845 if (req->data_len < sizeof(u32))
846 len = req->data_len;
847 else
848 len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
849 (sizeof(tx->hdr) - 4));
850 } else if (req_opcode(req->info.ctrl) == EXPECTED) {
851 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
852 PAGE_SIZE;
853
854
855
856
857 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
858
859 if (unlikely(!len) && ++req->tididx < req->n_tids &&
860 req->tids[req->tididx]) {
861 tidlen = EXP_TID_GET(req->tids[req->tididx],
862 LEN) * PAGE_SIZE;
863 req->tidoffset = 0;
864 len = min_t(u32, tidlen, req->info.fragsize);
865 }
866
867
868
869
870
871 len = min(len, req->data_len - req->sent);
872 } else {
873 len = min(req->data_len - req->sent, (u32)req->info.fragsize);
874 }
875 SDMA_DBG(req, "Data Length = %u", len);
876 return len;
877}
878
879static inline u32 pad_len(u32 len)
880{
881 if (len & (sizeof(u32) - 1))
882 len += sizeof(u32) - (len & (sizeof(u32) - 1));
883 return len;
884}
885
886static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
887{
888
889 return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
890}
891
892static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
893{
894 int ret = 0, count;
895 unsigned npkts = 0;
896 struct user_sdma_txreq *tx = NULL;
897 struct hfi1_user_sdma_pkt_q *pq = NULL;
898 struct user_sdma_iovec *iovec = NULL;
899
900 if (!req->pq)
901 return -EINVAL;
902
903 pq = req->pq;
904
905
906 if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
907 set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
908 return -EFAULT;
909 }
910
911
912
913
914 if (unlikely(req->seqnum == req->info.npkts)) {
915 if (!list_empty(&req->txps))
916 goto dosend;
917 return ret;
918 }
919
920 if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
921 maxpkts = req->info.npkts - req->seqnum;
922
923 while (npkts < maxpkts) {
924 u32 datalen = 0, queued = 0, data_sent = 0;
925 u64 iov_offset = 0;
926
927
928
929
930
931
932 if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
933 set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
934 return -EFAULT;
935 }
936
937 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
938 if (!tx)
939 return -ENOMEM;
940
941 tx->flags = 0;
942 tx->req = req;
943 tx->busycount = 0;
944 INIT_LIST_HEAD(&tx->list);
945
946 if (req->seqnum == req->info.npkts - 1)
947 tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT;
948
949
950
951
952
953
954 if (req->data_len) {
955 iovec = &req->iovs[req->iov_idx];
956 if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
957 if (++req->iov_idx == req->data_iovs) {
958 ret = -EFAULT;
959 goto free_txreq;
960 }
961 iovec = &req->iovs[req->iov_idx];
962 WARN_ON(iovec->offset);
963 }
964
965 datalen = compute_data_length(req, tx);
966 if (!datalen) {
967 SDMA_DBG(req,
968 "Request has data but pkt len is 0");
969 ret = -EFAULT;
970 goto free_tx;
971 }
972 }
973
974 if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) {
975 if (!req->seqnum) {
976 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
977 u32 lrhlen = get_lrh_len(req->hdr,
978 pad_len(datalen));
979
980
981
982
983
984
985
986
987 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
988 if (PBC2LRH(pbclen) != lrhlen) {
989 pbclen = (pbclen & 0xf000) |
990 LRH2PBC(lrhlen);
991 tx->hdr.pbc[0] = cpu_to_le16(pbclen);
992 }
993 ret = sdma_txinit_ahg(&tx->txreq,
994 SDMA_TXREQ_F_AHG_COPY,
995 sizeof(tx->hdr) + datalen,
996 req->ahg_idx, 0, NULL, 0,
997 user_sdma_txreq_cb);
998 if (ret)
999 goto free_tx;
1000 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq,
1001 &tx->hdr,
1002 sizeof(tx->hdr));
1003 if (ret)
1004 goto free_txreq;
1005 } else {
1006 int changes;
1007
1008 changes = set_txreq_header_ahg(req, tx,
1009 datalen);
1010 if (changes < 0)
1011 goto free_tx;
1012 sdma_txinit_ahg(&tx->txreq,
1013 SDMA_TXREQ_F_USE_AHG,
1014 datalen, req->ahg_idx, changes,
1015 req->ahg, sizeof(req->hdr),
1016 user_sdma_txreq_cb);
1017 }
1018 } else {
1019 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
1020 datalen, user_sdma_txreq_cb);
1021 if (ret)
1022 goto free_tx;
1023
1024
1025
1026
1027
1028
1029 ret = set_txreq_header(req, tx, datalen);
1030 if (ret)
1031 goto free_txreq;
1032 }
1033
1034
1035
1036
1037
1038 while (queued < datalen &&
1039 (req->sent + data_sent) < req->data_len) {
1040 unsigned long base, offset;
1041 unsigned pageidx, len;
1042
1043 base = (unsigned long)iovec->iov.iov_base;
1044 offset = offset_in_page(base + iovec->offset +
1045 iov_offset);
1046 pageidx = (((iovec->offset + iov_offset +
1047 base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
1048 len = offset + req->info.fragsize > PAGE_SIZE ?
1049 PAGE_SIZE - offset : req->info.fragsize;
1050 len = min((datalen - queued), len);
1051 ret = sdma_txadd_page(pq->dd, &tx->txreq,
1052 iovec->pages[pageidx],
1053 offset, len);
1054 if (ret) {
1055 SDMA_DBG(req, "SDMA txreq add page failed %d\n",
1056 ret);
1057 goto free_txreq;
1058 }
1059 iov_offset += len;
1060 queued += len;
1061 data_sent += len;
1062 if (unlikely(queued < datalen &&
1063 pageidx == iovec->npages &&
1064 req->iov_idx < req->data_iovs - 1)) {
1065 iovec->offset += iov_offset;
1066 iovec = &req->iovs[++req->iov_idx];
1067 iov_offset = 0;
1068 }
1069 }
1070
1071
1072
1073
1074 req->koffset += datalen;
1075 if (req_opcode(req->info.ctrl) == EXPECTED)
1076 req->tidoffset += datalen;
1077 req->sent += data_sent;
1078 if (req->data_len)
1079 iovec->offset += iov_offset;
1080 list_add_tail(&tx->txreq.list, &req->txps);
1081
1082
1083
1084
1085
1086 tx->seqnum = req->seqnum++;
1087 npkts++;
1088 }
1089dosend:
1090 ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
1091 req->seqsubmitted += count;
1092 if (req->seqsubmitted == req->info.npkts) {
1093 set_bit(SDMA_REQ_SEND_DONE, &req->flags);
1094
1095
1096
1097
1098
1099
1100 if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
1101 sdma_ahg_free(req->sde, req->ahg_idx);
1102 }
1103 return ret;
1104
1105free_txreq:
1106 sdma_txclean(pq->dd, &tx->txreq);
1107free_tx:
1108 kmem_cache_free(pq->txreq_cache, tx);
1109 return ret;
1110}
1111
1112
1113
1114
1115static inline int num_user_pages(const struct iovec *iov)
1116{
1117 const unsigned long addr = (unsigned long)iov->iov_base;
1118 const unsigned long len = iov->iov_len;
1119 const unsigned long spage = addr & PAGE_MASK;
1120 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
1121
1122 return 1 + ((epage - spage) >> PAGE_SHIFT);
1123}
1124
1125static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
1126{
1127 struct evict_data evict_data;
1128
1129 evict_data.cleared = 0;
1130 evict_data.target = npages;
1131 hfi1_mmu_rb_evict(pq->handler, &evict_data);
1132 return evict_data.cleared;
1133}
1134
1135static int pin_vector_pages(struct user_sdma_request *req,
1136 struct user_sdma_iovec *iovec)
1137{
1138 int ret = 0, pinned, npages, cleared;
1139 struct page **pages;
1140 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1141 struct sdma_mmu_node *node = NULL;
1142 struct mmu_rb_node *rb_node;
1143
1144 rb_node = hfi1_mmu_rb_extract(pq->handler,
1145 (unsigned long)iovec->iov.iov_base,
1146 iovec->iov.iov_len);
1147 if (rb_node)
1148 node = container_of(rb_node, struct sdma_mmu_node, rb);
1149 else
1150 rb_node = NULL;
1151
1152 if (!node) {
1153 node = kzalloc(sizeof(*node), GFP_KERNEL);
1154 if (!node)
1155 return -ENOMEM;
1156
1157 node->rb.addr = (unsigned long)iovec->iov.iov_base;
1158 node->pq = pq;
1159 atomic_set(&node->refcount, 0);
1160 }
1161
1162 npages = num_user_pages(&iovec->iov);
1163 if (node->npages < npages) {
1164 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1165 if (!pages) {
1166 SDMA_DBG(req, "Failed page array alloc");
1167 ret = -ENOMEM;
1168 goto bail;
1169 }
1170 memcpy(pages, node->pages, node->npages * sizeof(*pages));
1171
1172 npages -= node->npages;
1173
1174retry:
1175 if (!hfi1_can_pin_pages(pq->dd, pq->mm,
1176 atomic_read(&pq->n_locked), npages)) {
1177 cleared = sdma_cache_evict(pq, npages);
1178 if (cleared >= npages)
1179 goto retry;
1180 }
1181 pinned = hfi1_acquire_user_pages(pq->mm,
1182 ((unsigned long)iovec->iov.iov_base +
1183 (node->npages * PAGE_SIZE)), npages, 0,
1184 pages + node->npages);
1185 if (pinned < 0) {
1186 kfree(pages);
1187 ret = pinned;
1188 goto bail;
1189 }
1190 if (pinned != npages) {
1191 unpin_vector_pages(pq->mm, pages, node->npages,
1192 pinned);
1193 ret = -EFAULT;
1194 goto bail;
1195 }
1196 kfree(node->pages);
1197 node->rb.len = iovec->iov.iov_len;
1198 node->pages = pages;
1199 node->npages += pinned;
1200 npages = node->npages;
1201 atomic_add(pinned, &pq->n_locked);
1202 }
1203 iovec->pages = node->pages;
1204 iovec->npages = npages;
1205 iovec->node = node;
1206
1207 ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
1208 if (ret) {
1209 atomic_sub(node->npages, &pq->n_locked);
1210 iovec->node = NULL;
1211 goto bail;
1212 }
1213 return 0;
1214bail:
1215 if (rb_node)
1216 unpin_vector_pages(pq->mm, node->pages, 0, node->npages);
1217 kfree(node);
1218 return ret;
1219}
1220
1221static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1222 unsigned start, unsigned npages)
1223{
1224 hfi1_release_user_pages(mm, pages + start, npages, false);
1225 kfree(pages);
1226}
1227
1228static int check_header_template(struct user_sdma_request *req,
1229 struct hfi1_pkt_header *hdr, u32 lrhlen,
1230 u32 datalen)
1231{
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242 if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
1243 lrhlen > get_lrh_len(*hdr, req->info.fragsize))
1244 return -EINVAL;
1245
1246 if (req_opcode(req->info.ctrl) == EXPECTED) {
1247
1248
1249
1250
1251
1252
1253 u32 tidval = req->tids[req->tididx],
1254 tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
1255 tididx = EXP_TID_GET(tidval, IDX),
1256 tidctrl = EXP_TID_GET(tidval, CTRL),
1257 tidoff;
1258 __le32 kval = hdr->kdeth.ver_tid_offset;
1259
1260 tidoff = KDETH_GET(kval, OFFSET) *
1261 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
1262 KDETH_OM_LARGE : KDETH_OM_SMALL);
1263
1264
1265
1266
1267
1268
1269
1270 if ((tidoff + datalen > tidlen) ||
1271 KDETH_GET(kval, TIDCTRL) != tidctrl ||
1272 KDETH_GET(kval, TID) != tididx)
1273 return -EINVAL;
1274 }
1275 return 0;
1276}
1277
1278
1279
1280
1281
1282
1283
1284static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
1285{
1286 u32 val = be32_to_cpu(bthpsn),
1287 mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
1288 0xffffffull),
1289 psn = val & mask;
1290 if (expct)
1291 psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
1292 else
1293 psn = psn + frags;
1294 return psn & mask;
1295}
1296
1297static int set_txreq_header(struct user_sdma_request *req,
1298 struct user_sdma_txreq *tx, u32 datalen)
1299{
1300 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1301 struct hfi1_pkt_header *hdr = &tx->hdr;
1302 u16 pbclen;
1303 int ret;
1304 u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1305
1306
1307 memcpy(hdr, &req->hdr, sizeof(*hdr));
1308
1309
1310
1311
1312
1313 pbclen = le16_to_cpu(hdr->pbc[0]);
1314 if (PBC2LRH(pbclen) != lrhlen) {
1315 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
1316 hdr->pbc[0] = cpu_to_le16(pbclen);
1317 hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
1318
1319
1320
1321
1322
1323
1324 if (unlikely(req->seqnum == 2)) {
1325
1326
1327
1328
1329
1330
1331
1332 req->hdr.pbc[0] = hdr->pbc[0];
1333 req->hdr.lrh[2] = hdr->lrh[2];
1334 }
1335 }
1336
1337
1338
1339
1340
1341 if (unlikely(!req->seqnum)) {
1342 ret = check_header_template(req, hdr, lrhlen, datalen);
1343 if (ret)
1344 return ret;
1345 goto done;
1346 }
1347
1348 hdr->bth[2] = cpu_to_be32(
1349 set_pkt_bth_psn(hdr->bth[2],
1350 (req_opcode(req->info.ctrl) == EXPECTED),
1351 req->seqnum));
1352
1353
1354 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1355 hdr->bth[2] |= cpu_to_be32(1UL << 31);
1356
1357
1358 hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1359
1360 if (req_opcode(req->info.ctrl) == EXPECTED) {
1361 tidval = req->tids[req->tididx];
1362
1363
1364
1365
1366 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1367 PAGE_SIZE)) {
1368 req->tidoffset = 0;
1369
1370
1371
1372
1373 if (++req->tididx > req->n_tids - 1 ||
1374 !req->tids[req->tididx]) {
1375 return -EINVAL;
1376 }
1377 tidval = req->tids[req->tididx];
1378 }
1379 req->omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1380 KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE : KDETH_OM_SMALL;
1381
1382 KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1383 EXP_TID_GET(tidval, CTRL));
1384
1385 KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1386 EXP_TID_GET(tidval, IDX));
1387
1388 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1389 KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1390
1391
1392
1393
1394 SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
1395 req->tidoffset, req->tidoffset / req->omfactor,
1396 req->omfactor != KDETH_OM_SMALL);
1397 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1398 req->tidoffset / req->omfactor);
1399 KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1400 req->omfactor != KDETH_OM_SMALL);
1401 }
1402done:
1403 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1404 req->info.comp_idx, hdr, tidval);
1405 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1406}
1407
1408static int set_txreq_header_ahg(struct user_sdma_request *req,
1409 struct user_sdma_txreq *tx, u32 len)
1410{
1411 int diff = 0;
1412 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1413 struct hfi1_pkt_header *hdr = &req->hdr;
1414 u16 pbclen = le16_to_cpu(hdr->pbc[0]);
1415 u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(len));
1416
1417 if (PBC2LRH(pbclen) != lrhlen) {
1418
1419 AHG_HEADER_SET(req->ahg, diff, 0, 0, 12,
1420 cpu_to_le16(LRH2PBC(lrhlen)));
1421
1422 AHG_HEADER_SET(req->ahg, diff, 3, 0, 16,
1423 cpu_to_be16(lrhlen >> 2));
1424 }
1425
1426
1427
1428
1429
1430 val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1431 (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
1432 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1433 val32 |= 1UL << 31;
1434 AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
1435 AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
1436
1437 AHG_HEADER_SET(req->ahg, diff, 15, 0, 16,
1438 cpu_to_le16(req->koffset & 0xffff));
1439 AHG_HEADER_SET(req->ahg, diff, 15, 16, 16,
1440 cpu_to_le16(req->koffset >> 16));
1441 if (req_opcode(req->info.ctrl) == EXPECTED) {
1442 __le16 val;
1443
1444 tidval = req->tids[req->tididx];
1445
1446
1447
1448
1449
1450 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1451 PAGE_SIZE)) {
1452 req->tidoffset = 0;
1453
1454
1455
1456
1457 if (++req->tididx > req->n_tids - 1 ||
1458 !req->tids[req->tididx]) {
1459 return -EINVAL;
1460 }
1461 tidval = req->tids[req->tididx];
1462 }
1463 req->omfactor = ((EXP_TID_GET(tidval, LEN) *
1464 PAGE_SIZE) >=
1465 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE :
1466 KDETH_OM_SMALL;
1467
1468 AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
1469 ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 |
1470 ((req->tidoffset / req->omfactor) & 0x7fff)));
1471
1472 val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
1473 (EXP_TID_GET(tidval, IDX) & 0x3ff));
1474
1475 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
1476 val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
1477 INTR) <<
1478 AHG_KDETH_INTR_SHIFT);
1479 val &= cpu_to_le16(~(1U << 13));
1480 AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
1481 } else {
1482 AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val);
1483 }
1484 }
1485
1486 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1487 req->info.comp_idx, req->sde->this_idx,
1488 req->ahg_idx, req->ahg, diff, tidval);
1489 return diff;
1490}
1491
1492
1493
1494
1495
1496
1497
1498static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
1499{
1500 struct user_sdma_txreq *tx =
1501 container_of(txreq, struct user_sdma_txreq, txreq);
1502 struct user_sdma_request *req;
1503 struct hfi1_user_sdma_pkt_q *pq;
1504 struct hfi1_user_sdma_comp_q *cq;
1505 u16 idx;
1506
1507 if (!tx->req)
1508 return;
1509
1510 req = tx->req;
1511 pq = req->pq;
1512 cq = req->cq;
1513
1514 if (status != SDMA_TXREQ_S_OK) {
1515 SDMA_DBG(req, "SDMA completion with error %d",
1516 status);
1517 set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
1518 }
1519
1520 req->seqcomp = tx->seqnum;
1521 kmem_cache_free(pq->txreq_cache, tx);
1522 tx = NULL;
1523
1524 idx = req->info.comp_idx;
1525 if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
1526 if (req->seqcomp == req->info.npkts - 1) {
1527 req->status = 0;
1528 user_sdma_free_request(req, false);
1529 pq_update(pq);
1530 set_comp_state(pq, cq, idx, COMPLETE, 0);
1531 }
1532 } else {
1533 if (status != SDMA_TXREQ_S_OK)
1534 req->status = status;
1535 if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
1536 (test_bit(SDMA_REQ_SEND_DONE, &req->flags) ||
1537 test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) {
1538 user_sdma_free_request(req, false);
1539 pq_update(pq);
1540 set_comp_state(pq, cq, idx, ERROR, req->status);
1541 }
1542 }
1543}
1544
1545static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
1546{
1547 if (atomic_dec_and_test(&pq->n_reqs)) {
1548 xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
1549 wake_up(&pq->wait);
1550 }
1551}
1552
1553static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1554{
1555 if (!list_empty(&req->txps)) {
1556 struct sdma_txreq *t, *p;
1557
1558 list_for_each_entry_safe(t, p, &req->txps, list) {
1559 struct user_sdma_txreq *tx =
1560 container_of(t, struct user_sdma_txreq, txreq);
1561 list_del_init(&t->list);
1562 sdma_txclean(req->pq->dd, t);
1563 kmem_cache_free(req->pq->txreq_cache, tx);
1564 }
1565 }
1566 if (req->data_iovs) {
1567 struct sdma_mmu_node *node;
1568 int i;
1569
1570 for (i = 0; i < req->data_iovs; i++) {
1571 node = req->iovs[i].node;
1572 if (!node)
1573 continue;
1574
1575 if (unpin)
1576 hfi1_mmu_rb_remove(req->pq->handler,
1577 &node->rb);
1578 else
1579 atomic_dec(&node->refcount);
1580 }
1581 }
1582 kfree(req->tids);
1583 clear_bit(req->info.comp_idx, req->pq->req_in_use);
1584}
1585
1586static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1587 struct hfi1_user_sdma_comp_q *cq,
1588 u16 idx, enum hfi1_sdma_comp_state state,
1589 int ret)
1590{
1591 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d",
1592 pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret);
1593 cq->comps[idx].status = state;
1594 if (state == ERROR)
1595 cq->comps[idx].errcode = -ret;
1596 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
1597 idx, state, ret);
1598}
1599
1600static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
1601 unsigned long len)
1602{
1603 return (bool)(node->addr == addr);
1604}
1605
1606static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
1607{
1608 struct sdma_mmu_node *node =
1609 container_of(mnode, struct sdma_mmu_node, rb);
1610
1611 atomic_inc(&node->refcount);
1612 return 0;
1613}
1614
1615
1616
1617
1618
1619
1620static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
1621 void *evict_arg, bool *stop)
1622{
1623 struct sdma_mmu_node *node =
1624 container_of(mnode, struct sdma_mmu_node, rb);
1625 struct evict_data *evict_data = evict_arg;
1626
1627
1628 if (atomic_read(&node->refcount))
1629 return 0;
1630
1631
1632 evict_data->cleared += node->npages;
1633
1634
1635 if (evict_data->cleared >= evict_data->target)
1636 *stop = true;
1637
1638 return 1;
1639}
1640
1641static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
1642{
1643 struct sdma_mmu_node *node =
1644 container_of(mnode, struct sdma_mmu_node, rb);
1645
1646 atomic_sub(node->npages, &node->pq->n_locked);
1647
1648 unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
1649
1650 kfree(node);
1651}
1652
1653static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
1654{
1655 struct sdma_mmu_node *node =
1656 container_of(mnode, struct sdma_mmu_node, rb);
1657
1658 if (!atomic_read(&node->refcount))
1659 return 1;
1660 return 0;
1661}
1662