1
2
3
4
5
6
7
8
9
10#include <linux/err.h>
11#include <linux/delay.h>
12#include <linux/device.h>
13#include <crypto/engine.h>
14#include <uapi/linux/sched/types.h>
15#include "internal.h"
16
17#define CRYPTO_ENGINE_MAX_QLEN 10
18
19
20
21
22
23
24
25static void crypto_finalize_request(struct crypto_engine *engine,
26 struct crypto_async_request *req, int err)
27{
28 unsigned long flags;
29 bool finalize_req = false;
30 int ret;
31 struct crypto_engine_ctx *enginectx;
32
33
34
35
36
37
38 if (!engine->retry_support) {
39 spin_lock_irqsave(&engine->queue_lock, flags);
40 if (engine->cur_req == req) {
41 finalize_req = true;
42 engine->cur_req = NULL;
43 }
44 spin_unlock_irqrestore(&engine->queue_lock, flags);
45 }
46
47 if (finalize_req || engine->retry_support) {
48 enginectx = crypto_tfm_ctx(req->tfm);
49 if (enginectx->op.prepare_request &&
50 enginectx->op.unprepare_request) {
51 ret = enginectx->op.unprepare_request(engine, req);
52 if (ret)
53 dev_err(engine->dev, "failed to unprepare request\n");
54 }
55 }
56 req->complete(req, err);
57
58 kthread_queue_work(engine->kworker, &engine->pump_requests);
59}
60
61
62
63
64
65
66
67
68
69
70static void crypto_pump_requests(struct crypto_engine *engine,
71 bool in_kthread)
72{
73 struct crypto_async_request *async_req, *backlog;
74 unsigned long flags;
75 bool was_busy = false;
76 int ret;
77 struct crypto_engine_ctx *enginectx;
78
79 spin_lock_irqsave(&engine->queue_lock, flags);
80
81
82 if (!engine->retry_support && engine->cur_req)
83 goto out;
84
85
86 if (engine->idling) {
87 kthread_queue_work(engine->kworker, &engine->pump_requests);
88 goto out;
89 }
90
91
92 if (!crypto_queue_len(&engine->queue) || !engine->running) {
93 if (!engine->busy)
94 goto out;
95
96
97 if (!in_kthread) {
98 kthread_queue_work(engine->kworker,
99 &engine->pump_requests);
100 goto out;
101 }
102
103 engine->busy = false;
104 engine->idling = true;
105 spin_unlock_irqrestore(&engine->queue_lock, flags);
106
107 if (engine->unprepare_crypt_hardware &&
108 engine->unprepare_crypt_hardware(engine))
109 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
110
111 spin_lock_irqsave(&engine->queue_lock, flags);
112 engine->idling = false;
113 goto out;
114 }
115
116start_request:
117
118 backlog = crypto_get_backlog(&engine->queue);
119 async_req = crypto_dequeue_request(&engine->queue);
120 if (!async_req)
121 goto out;
122
123
124
125
126
127
128 if (!engine->retry_support)
129 engine->cur_req = async_req;
130
131 if (backlog)
132 backlog->complete(backlog, -EINPROGRESS);
133
134 if (engine->busy)
135 was_busy = true;
136 else
137 engine->busy = true;
138
139 spin_unlock_irqrestore(&engine->queue_lock, flags);
140
141
142 if (!was_busy && engine->prepare_crypt_hardware) {
143 ret = engine->prepare_crypt_hardware(engine);
144 if (ret) {
145 dev_err(engine->dev, "failed to prepare crypt hardware\n");
146 goto req_err_2;
147 }
148 }
149
150 enginectx = crypto_tfm_ctx(async_req->tfm);
151
152 if (enginectx->op.prepare_request) {
153 ret = enginectx->op.prepare_request(engine, async_req);
154 if (ret) {
155 dev_err(engine->dev, "failed to prepare request: %d\n",
156 ret);
157 goto req_err_2;
158 }
159 }
160 if (!enginectx->op.do_one_request) {
161 dev_err(engine->dev, "failed to do request\n");
162 ret = -EINVAL;
163 goto req_err_1;
164 }
165
166 ret = enginectx->op.do_one_request(engine, async_req);
167
168
169 if (ret < 0) {
170
171
172
173
174
175 if (!engine->retry_support ||
176 (ret != -ENOSPC)) {
177 dev_err(engine->dev,
178 "Failed to do one request from queue: %d\n",
179 ret);
180 goto req_err_1;
181 }
182
183
184
185
186
187 if (enginectx->op.unprepare_request) {
188 ret = enginectx->op.unprepare_request(engine,
189 async_req);
190 if (ret)
191 dev_err(engine->dev,
192 "failed to unprepare request\n");
193 }
194 spin_lock_irqsave(&engine->queue_lock, flags);
195
196
197
198
199
200 crypto_enqueue_request_head(&engine->queue, async_req);
201
202 kthread_queue_work(engine->kworker, &engine->pump_requests);
203 goto out;
204 }
205
206 goto retry;
207
208req_err_1:
209 if (enginectx->op.unprepare_request) {
210 ret = enginectx->op.unprepare_request(engine, async_req);
211 if (ret)
212 dev_err(engine->dev, "failed to unprepare request\n");
213 }
214
215req_err_2:
216 async_req->complete(async_req, ret);
217
218retry:
219
220 if (engine->retry_support) {
221 spin_lock_irqsave(&engine->queue_lock, flags);
222 goto start_request;
223 }
224 return;
225
226out:
227 spin_unlock_irqrestore(&engine->queue_lock, flags);
228
229
230
231
232
233 if (engine->do_batch_requests) {
234 ret = engine->do_batch_requests(engine);
235 if (ret)
236 dev_err(engine->dev, "failed to do batch requests: %d\n",
237 ret);
238 }
239
240 return;
241}
242
243static void crypto_pump_work(struct kthread_work *work)
244{
245 struct crypto_engine *engine =
246 container_of(work, struct crypto_engine, pump_requests);
247
248 crypto_pump_requests(engine, true);
249}
250
251
252
253
254
255
256static int crypto_transfer_request(struct crypto_engine *engine,
257 struct crypto_async_request *req,
258 bool need_pump)
259{
260 unsigned long flags;
261 int ret;
262
263 spin_lock_irqsave(&engine->queue_lock, flags);
264
265 if (!engine->running) {
266 spin_unlock_irqrestore(&engine->queue_lock, flags);
267 return -ESHUTDOWN;
268 }
269
270 ret = crypto_enqueue_request(&engine->queue, req);
271
272 if (!engine->busy && need_pump)
273 kthread_queue_work(engine->kworker, &engine->pump_requests);
274
275 spin_unlock_irqrestore(&engine->queue_lock, flags);
276 return ret;
277}
278
279
280
281
282
283
284
285static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
286 struct crypto_async_request *req)
287{
288 return crypto_transfer_request(engine, req, true);
289}
290
291
292
293
294
295
296
297int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
298 struct aead_request *req)
299{
300 return crypto_transfer_request_to_engine(engine, &req->base);
301}
302EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
303
304
305
306
307
308
309
310int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
311 struct akcipher_request *req)
312{
313 return crypto_transfer_request_to_engine(engine, &req->base);
314}
315EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
316
317
318
319
320
321
322
323int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
324 struct ahash_request *req)
325{
326 return crypto_transfer_request_to_engine(engine, &req->base);
327}
328EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
329
330
331
332
333
334
335
336int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
337 struct kpp_request *req)
338{
339 return crypto_transfer_request_to_engine(engine, &req->base);
340}
341EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
342
343
344
345
346
347
348
349int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
350 struct skcipher_request *req)
351{
352 return crypto_transfer_request_to_engine(engine, &req->base);
353}
354EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
355
356
357
358
359
360
361
362
363void crypto_finalize_aead_request(struct crypto_engine *engine,
364 struct aead_request *req, int err)
365{
366 return crypto_finalize_request(engine, &req->base, err);
367}
368EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
369
370
371
372
373
374
375
376
377void crypto_finalize_akcipher_request(struct crypto_engine *engine,
378 struct akcipher_request *req, int err)
379{
380 return crypto_finalize_request(engine, &req->base, err);
381}
382EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
383
384
385
386
387
388
389
390
391void crypto_finalize_hash_request(struct crypto_engine *engine,
392 struct ahash_request *req, int err)
393{
394 return crypto_finalize_request(engine, &req->base, err);
395}
396EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
397
398
399
400
401
402
403
404void crypto_finalize_kpp_request(struct crypto_engine *engine,
405 struct kpp_request *req, int err)
406{
407 return crypto_finalize_request(engine, &req->base, err);
408}
409EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
410
411
412
413
414
415
416
417
418void crypto_finalize_skcipher_request(struct crypto_engine *engine,
419 struct skcipher_request *req, int err)
420{
421 return crypto_finalize_request(engine, &req->base, err);
422}
423EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
424
425
426
427
428
429
430
431int crypto_engine_start(struct crypto_engine *engine)
432{
433 unsigned long flags;
434
435 spin_lock_irqsave(&engine->queue_lock, flags);
436
437 if (engine->running || engine->busy) {
438 spin_unlock_irqrestore(&engine->queue_lock, flags);
439 return -EBUSY;
440 }
441
442 engine->running = true;
443 spin_unlock_irqrestore(&engine->queue_lock, flags);
444
445 kthread_queue_work(engine->kworker, &engine->pump_requests);
446
447 return 0;
448}
449EXPORT_SYMBOL_GPL(crypto_engine_start);
450
451
452
453
454
455
456
457int crypto_engine_stop(struct crypto_engine *engine)
458{
459 unsigned long flags;
460 unsigned int limit = 500;
461 int ret = 0;
462
463 spin_lock_irqsave(&engine->queue_lock, flags);
464
465
466
467
468
469 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
470 spin_unlock_irqrestore(&engine->queue_lock, flags);
471 msleep(20);
472 spin_lock_irqsave(&engine->queue_lock, flags);
473 }
474
475 if (crypto_queue_len(&engine->queue) || engine->busy)
476 ret = -EBUSY;
477 else
478 engine->running = false;
479
480 spin_unlock_irqrestore(&engine->queue_lock, flags);
481
482 if (ret)
483 dev_warn(engine->dev, "could not stop engine\n");
484
485 return ret;
486}
487EXPORT_SYMBOL_GPL(crypto_engine_stop);
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
508 bool retry_support,
509 int (*cbk_do_batch)(struct crypto_engine *engine),
510 bool rt, int qlen)
511{
512 struct crypto_engine *engine;
513
514 if (!dev)
515 return NULL;
516
517 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
518 if (!engine)
519 return NULL;
520
521 engine->dev = dev;
522 engine->rt = rt;
523 engine->running = false;
524 engine->busy = false;
525 engine->idling = false;
526 engine->retry_support = retry_support;
527 engine->priv_data = dev;
528
529
530
531
532 engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
533
534 snprintf(engine->name, sizeof(engine->name),
535 "%s-engine", dev_name(dev));
536
537 crypto_init_queue(&engine->queue, qlen);
538 spin_lock_init(&engine->queue_lock);
539
540 engine->kworker = kthread_create_worker(0, "%s", engine->name);
541 if (IS_ERR(engine->kworker)) {
542 dev_err(dev, "failed to create crypto request pump task\n");
543 return NULL;
544 }
545 kthread_init_work(&engine->pump_requests, crypto_pump_work);
546
547 if (engine->rt) {
548 dev_info(dev, "will run requests pump with realtime priority\n");
549 sched_set_fifo(engine->kworker->task);
550 }
551
552 return engine;
553}
554EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
555
556
557
558
559
560
561
562
563
564
565struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
566{
567 return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
568 CRYPTO_ENGINE_MAX_QLEN);
569}
570EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
571
572
573
574
575
576
577
578int crypto_engine_exit(struct crypto_engine *engine)
579{
580 int ret;
581
582 ret = crypto_engine_stop(engine);
583 if (ret)
584 return ret;
585
586 kthread_destroy_worker(engine->kworker);
587
588 return 0;
589}
590EXPORT_SYMBOL_GPL(crypto_engine_exit);
591
592MODULE_LICENSE("GPL");
593MODULE_DESCRIPTION("Crypto hardware engine framework");
594