1
2#include <linux/gfp.h>
3#include <linux/workqueue.h>
4#include <crypto/internal/skcipher.h>
5
6#include "nitrox_common.h"
7#include "nitrox_dev.h"
8#include "nitrox_req.h"
9#include "nitrox_csr.h"
10
11
12#define MIN_UDD_LEN 16
13
14#define FDATA_SIZE 32
15
16#define SOLICIT_BASE_DPORT 256
17
18#define REQ_NOT_POSTED 1
19#define REQ_BACKLOG 2
20#define REQ_POSTED 3
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45static inline int incr_index(int index, int count, int max)
46{
47 if ((index + count) >= max)
48 index = index + count - max;
49 else
50 index += count;
51
52 return index;
53}
54
55static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
56{
57 struct nitrox_device *ndev = sr->ndev;
58 struct device *dev = DEV(ndev);
59
60
61 dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg),
62 DMA_BIDIRECTIONAL);
63 dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
64 DMA_TO_DEVICE);
65 kfree(sr->in.sgcomp);
66 sr->in.sg = NULL;
67 sr->in.sgmap_cnt = 0;
68
69 dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg),
70 DMA_BIDIRECTIONAL);
71 dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
72 DMA_TO_DEVICE);
73 kfree(sr->out.sgcomp);
74 sr->out.sg = NULL;
75 sr->out.sgmap_cnt = 0;
76}
77
78static void softreq_destroy(struct nitrox_softreq *sr)
79{
80 softreq_unmap_sgbufs(sr);
81 kfree(sr);
82}
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107static int create_sg_component(struct nitrox_softreq *sr,
108 struct nitrox_sgtable *sgtbl, int map_nents)
109{
110 struct nitrox_device *ndev = sr->ndev;
111 struct nitrox_sgcomp *sgcomp;
112 struct scatterlist *sg;
113 dma_addr_t dma;
114 size_t sz_comp;
115 int i, j, nr_sgcomp;
116
117 nr_sgcomp = roundup(map_nents, 4) / 4;
118
119
120 sz_comp = nr_sgcomp * sizeof(*sgcomp);
121 sgcomp = kzalloc(sz_comp, sr->gfp);
122 if (!sgcomp)
123 return -ENOMEM;
124
125 sgtbl->sgcomp = sgcomp;
126
127 sg = sgtbl->sg;
128
129 for (i = 0; i < nr_sgcomp; i++) {
130 for (j = 0; j < 4 && sg; j++) {
131 sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg));
132 sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg));
133 sg = sg_next(sg);
134 }
135 }
136
137 dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
138 if (dma_mapping_error(DEV(ndev), dma)) {
139 kfree(sgtbl->sgcomp);
140 sgtbl->sgcomp = NULL;
141 return -ENOMEM;
142 }
143
144 sgtbl->sgcomp_dma = dma;
145 sgtbl->sgcomp_len = sz_comp;
146
147 return 0;
148}
149
150
151
152
153
154
155
156
157
158static int dma_map_inbufs(struct nitrox_softreq *sr,
159 struct se_crypto_request *req)
160{
161 struct device *dev = DEV(sr->ndev);
162 struct scatterlist *sg;
163 int i, nents, ret = 0;
164
165 nents = dma_map_sg(dev, req->src, sg_nents(req->src),
166 DMA_BIDIRECTIONAL);
167 if (!nents)
168 return -EINVAL;
169
170 for_each_sg(req->src, sg, nents, i)
171 sr->in.total_bytes += sg_dma_len(sg);
172
173 sr->in.sg = req->src;
174 sr->in.sgmap_cnt = nents;
175 ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt);
176 if (ret)
177 goto incomp_err;
178
179 return 0;
180
181incomp_err:
182 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
183 sr->in.sgmap_cnt = 0;
184 return ret;
185}
186
187static int dma_map_outbufs(struct nitrox_softreq *sr,
188 struct se_crypto_request *req)
189{
190 struct device *dev = DEV(sr->ndev);
191 int nents, ret = 0;
192
193 nents = dma_map_sg(dev, req->dst, sg_nents(req->dst),
194 DMA_BIDIRECTIONAL);
195 if (!nents)
196 return -EINVAL;
197
198 sr->out.sg = req->dst;
199 sr->out.sgmap_cnt = nents;
200 ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt);
201 if (ret)
202 goto outcomp_map_err;
203
204 return 0;
205
206outcomp_map_err:
207 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL);
208 sr->out.sgmap_cnt = 0;
209 sr->out.sg = NULL;
210 return ret;
211}
212
213static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
214 struct se_crypto_request *creq)
215{
216 int ret;
217
218 ret = dma_map_inbufs(sr, creq);
219 if (ret)
220 return ret;
221
222 ret = dma_map_outbufs(sr, creq);
223 if (ret)
224 softreq_unmap_sgbufs(sr);
225
226 return ret;
227}
228
229static inline void backlog_list_add(struct nitrox_softreq *sr,
230 struct nitrox_cmdq *cmdq)
231{
232 INIT_LIST_HEAD(&sr->backlog);
233
234 spin_lock_bh(&cmdq->backlog_qlock);
235 list_add_tail(&sr->backlog, &cmdq->backlog_head);
236 atomic_inc(&cmdq->backlog_count);
237 atomic_set(&sr->status, REQ_BACKLOG);
238 spin_unlock_bh(&cmdq->backlog_qlock);
239}
240
241static inline void response_list_add(struct nitrox_softreq *sr,
242 struct nitrox_cmdq *cmdq)
243{
244 INIT_LIST_HEAD(&sr->response);
245
246 spin_lock_bh(&cmdq->resp_qlock);
247 list_add_tail(&sr->response, &cmdq->response_head);
248 spin_unlock_bh(&cmdq->resp_qlock);
249}
250
251static inline void response_list_del(struct nitrox_softreq *sr,
252 struct nitrox_cmdq *cmdq)
253{
254 spin_lock_bh(&cmdq->resp_qlock);
255 list_del(&sr->response);
256 spin_unlock_bh(&cmdq->resp_qlock);
257}
258
259static struct nitrox_softreq *
260get_first_response_entry(struct nitrox_cmdq *cmdq)
261{
262 return list_first_entry_or_null(&cmdq->response_head,
263 struct nitrox_softreq, response);
264}
265
266static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
267{
268 if (atomic_inc_return(&cmdq->pending_count) > qlen) {
269 atomic_dec(&cmdq->pending_count);
270
271 smp_mb__after_atomic();
272 return true;
273 }
274
275 smp_mb__after_atomic();
276 return false;
277}
278
279
280
281
282
283
284
285
286
287static void post_se_instr(struct nitrox_softreq *sr,
288 struct nitrox_cmdq *cmdq)
289{
290 struct nitrox_device *ndev = sr->ndev;
291 int idx;
292 u8 *ent;
293
294 spin_lock_bh(&cmdq->cmd_qlock);
295
296 idx = cmdq->write_idx;
297
298 ent = cmdq->base + (idx * cmdq->instr_size);
299 memcpy(ent, &sr->instr, cmdq->instr_size);
300
301 atomic_set(&sr->status, REQ_POSTED);
302 response_list_add(sr, cmdq);
303 sr->tstamp = jiffies;
304
305 dma_wmb();
306
307
308 writeq(1, cmdq->dbell_csr_addr);
309
310 cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
311
312 spin_unlock_bh(&cmdq->cmd_qlock);
313
314
315 atomic64_inc(&ndev->stats.posted);
316}
317
318static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
319{
320 struct nitrox_device *ndev = cmdq->ndev;
321 struct nitrox_softreq *sr, *tmp;
322 int ret = 0;
323
324 if (!atomic_read(&cmdq->backlog_count))
325 return 0;
326
327 spin_lock_bh(&cmdq->backlog_qlock);
328
329 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
330
331 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
332 ret = -ENOSPC;
333 break;
334 }
335
336 list_del(&sr->backlog);
337 atomic_dec(&cmdq->backlog_count);
338
339 smp_mb__after_atomic();
340
341
342 post_se_instr(sr, cmdq);
343 }
344 spin_unlock_bh(&cmdq->backlog_qlock);
345
346 return ret;
347}
348
349static int nitrox_enqueue_request(struct nitrox_softreq *sr)
350{
351 struct nitrox_cmdq *cmdq = sr->cmdq;
352 struct nitrox_device *ndev = sr->ndev;
353
354
355 post_backlog_cmds(cmdq);
356
357 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
358 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
359
360 atomic64_inc(&ndev->stats.dropped);
361 return -ENOSPC;
362 }
363
364 backlog_list_add(sr, cmdq);
365 return -EINPROGRESS;
366 }
367 post_se_instr(sr, cmdq);
368
369 return -EINPROGRESS;
370}
371
372
373
374
375
376
377
378
379
380
381int nitrox_process_se_request(struct nitrox_device *ndev,
382 struct se_crypto_request *req,
383 completion_t callback,
384 void *cb_arg)
385{
386 struct nitrox_softreq *sr;
387 dma_addr_t ctx_handle = 0;
388 int qno, ret = 0;
389
390 if (!nitrox_ready(ndev))
391 return -ENODEV;
392
393 sr = kzalloc(sizeof(*sr), req->gfp);
394 if (!sr)
395 return -ENOMEM;
396
397 sr->ndev = ndev;
398 sr->flags = req->flags;
399 sr->gfp = req->gfp;
400 sr->callback = callback;
401 sr->cb_arg = cb_arg;
402
403 atomic_set(&sr->status, REQ_NOT_POSTED);
404
405 sr->resp.orh = req->orh;
406 sr->resp.completion = req->comp;
407
408 ret = softreq_map_iobuf(sr, req);
409 if (ret) {
410 kfree(sr);
411 return ret;
412 }
413
414
415 if (req->ctx_handle) {
416 struct ctx_hdr *hdr;
417 u8 *ctx_ptr;
418
419 ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
420 hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
421 ctx_handle = hdr->ctx_dma;
422 }
423
424
425 qno = smp_processor_id() % ndev->nr_queues;
426
427 sr->cmdq = &ndev->pkt_inq[qno];
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447 sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma);
448
449
450 sr->instr.ih.value = 0;
451 sr->instr.ih.s.g = 1;
452 sr->instr.ih.s.gsz = sr->in.sgmap_cnt;
453 sr->instr.ih.s.ssz = sr->out.sgmap_cnt;
454 sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
455 sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
456 sr->instr.ih.bev = cpu_to_be64(sr->instr.ih.value);
457
458
459 sr->instr.irh.value[0] = 0;
460 sr->instr.irh.s.uddl = MIN_UDD_LEN;
461
462 sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
463
464 sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
465 sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
466 sr->instr.irh.s.arg = req->ctrl.s.arg;
467 sr->instr.irh.s.opcode = req->opcode;
468 sr->instr.irh.bev[0] = cpu_to_be64(sr->instr.irh.value[0]);
469
470
471 sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
472
473
474 sr->instr.slc.value[0] = 0;
475 sr->instr.slc.s.ssz = sr->out.sgmap_cnt;
476 sr->instr.slc.bev[0] = cpu_to_be64(sr->instr.slc.value[0]);
477
478
479 sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma);
480
481
482
483
484
485
486 sr->instr.fdata[0] = *((u64 *)&req->gph);
487 sr->instr.fdata[1] = 0;
488
489 ret = nitrox_enqueue_request(sr);
490 if (ret == -ENOSPC)
491 goto send_fail;
492
493 return ret;
494
495send_fail:
496 softreq_destroy(sr);
497 return ret;
498}
499
500static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
501{
502 return time_after_eq(jiffies, (tstamp + timeout));
503}
504
505void backlog_qflush_work(struct work_struct *work)
506{
507 struct nitrox_cmdq *cmdq;
508
509 cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
510 post_backlog_cmds(cmdq);
511}
512
513static bool sr_completed(struct nitrox_softreq *sr)
514{
515 u64 orh = READ_ONCE(*sr->resp.orh);
516 unsigned long timeout = jiffies + msecs_to_jiffies(1);
517
518 if ((orh != PENDING_SIG) && (orh & 0xff))
519 return true;
520
521 while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) {
522 if (time_after(jiffies, timeout)) {
523 pr_err("comp not done\n");
524 return false;
525 }
526 }
527
528 return true;
529}
530
531
532
533
534
535
536
537static void process_response_list(struct nitrox_cmdq *cmdq)
538{
539 struct nitrox_device *ndev = cmdq->ndev;
540 struct nitrox_softreq *sr;
541 int req_completed = 0, err = 0, budget;
542 completion_t callback;
543 void *cb_arg;
544
545
546 budget = atomic_read(&cmdq->pending_count);
547
548 while (req_completed < budget) {
549 sr = get_first_response_entry(cmdq);
550 if (!sr)
551 break;
552
553 if (atomic_read(&sr->status) != REQ_POSTED)
554 break;
555
556
557 if (!sr_completed(sr)) {
558
559 if (!cmd_timeout(sr->tstamp, ndev->timeout))
560 break;
561 dev_err_ratelimited(DEV(ndev),
562 "Request timeout, orh 0x%016llx\n",
563 READ_ONCE(*sr->resp.orh));
564 }
565 atomic_dec(&cmdq->pending_count);
566 atomic64_inc(&ndev->stats.completed);
567
568 smp_mb__after_atomic();
569
570 response_list_del(sr, cmdq);
571
572 err = READ_ONCE(*sr->resp.orh) & 0xff;
573 callback = sr->callback;
574 cb_arg = sr->cb_arg;
575 softreq_destroy(sr);
576 if (callback)
577 callback(cb_arg, err);
578
579 req_completed++;
580 }
581}
582
583
584
585
586void pkt_slc_resp_tasklet(unsigned long data)
587{
588 struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
589 struct nitrox_cmdq *cmdq = qvec->cmdq;
590 union nps_pkt_slc_cnts slc_cnts;
591
592
593 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
594
595 slc_cnts.s.resend = 1;
596
597 process_response_list(cmdq);
598
599
600
601
602
603 writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
604
605 if (atomic_read(&cmdq->backlog_count))
606 schedule_work(&cmdq->backlog_qflush);
607}
608