1
2
3
4
5
6
7
8
9
10#include <linux/err.h>
11#include <linux/delay.h>
12#include <crypto/engine.h>
13#include <uapi/linux/sched/types.h>
14#include "internal.h"
15
16#define CRYPTO_ENGINE_MAX_QLEN 10
17
18
19
20
21
22
23
24static void crypto_finalize_request(struct crypto_engine *engine,
25 struct crypto_async_request *req, int err)
26{
27 unsigned long flags;
28 bool finalize_req = false;
29 int ret;
30 struct crypto_engine_ctx *enginectx;
31
32
33
34
35
36
37 if (!engine->retry_support) {
38 spin_lock_irqsave(&engine->queue_lock, flags);
39 if (engine->cur_req == req) {
40 finalize_req = true;
41 engine->cur_req = NULL;
42 }
43 spin_unlock_irqrestore(&engine->queue_lock, flags);
44 }
45
46 if (finalize_req || engine->retry_support) {
47 enginectx = crypto_tfm_ctx(req->tfm);
48 if (enginectx->op.prepare_request &&
49 enginectx->op.unprepare_request) {
50 ret = enginectx->op.unprepare_request(engine, req);
51 if (ret)
52 dev_err(engine->dev, "failed to unprepare request\n");
53 }
54 }
55 req->complete(req, err);
56
57 kthread_queue_work(engine->kworker, &engine->pump_requests);
58}
59
60
61
62
63
64
65
66
67
68
69static void crypto_pump_requests(struct crypto_engine *engine,
70 bool in_kthread)
71{
72 struct crypto_async_request *async_req, *backlog;
73 unsigned long flags;
74 bool was_busy = false;
75 int ret;
76 struct crypto_engine_ctx *enginectx;
77
78 spin_lock_irqsave(&engine->queue_lock, flags);
79
80
81 if (!engine->retry_support && engine->cur_req)
82 goto out;
83
84
85 if (engine->idling) {
86 kthread_queue_work(engine->kworker, &engine->pump_requests);
87 goto out;
88 }
89
90
91 if (!crypto_queue_len(&engine->queue) || !engine->running) {
92 if (!engine->busy)
93 goto out;
94
95
96 if (!in_kthread) {
97 kthread_queue_work(engine->kworker,
98 &engine->pump_requests);
99 goto out;
100 }
101
102 engine->busy = false;
103 engine->idling = true;
104 spin_unlock_irqrestore(&engine->queue_lock, flags);
105
106 if (engine->unprepare_crypt_hardware &&
107 engine->unprepare_crypt_hardware(engine))
108 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
109
110 spin_lock_irqsave(&engine->queue_lock, flags);
111 engine->idling = false;
112 goto out;
113 }
114
115start_request:
116
117 backlog = crypto_get_backlog(&engine->queue);
118 async_req = crypto_dequeue_request(&engine->queue);
119 if (!async_req)
120 goto out;
121
122
123
124
125
126
127 if (!engine->retry_support)
128 engine->cur_req = async_req;
129
130 if (backlog)
131 backlog->complete(backlog, -EINPROGRESS);
132
133 if (engine->busy)
134 was_busy = true;
135 else
136 engine->busy = true;
137
138 spin_unlock_irqrestore(&engine->queue_lock, flags);
139
140
141 if (!was_busy && engine->prepare_crypt_hardware) {
142 ret = engine->prepare_crypt_hardware(engine);
143 if (ret) {
144 dev_err(engine->dev, "failed to prepare crypt hardware\n");
145 goto req_err_2;
146 }
147 }
148
149 enginectx = crypto_tfm_ctx(async_req->tfm);
150
151 if (enginectx->op.prepare_request) {
152 ret = enginectx->op.prepare_request(engine, async_req);
153 if (ret) {
154 dev_err(engine->dev, "failed to prepare request: %d\n",
155 ret);
156 goto req_err_2;
157 }
158 }
159 if (!enginectx->op.do_one_request) {
160 dev_err(engine->dev, "failed to do request\n");
161 ret = -EINVAL;
162 goto req_err_1;
163 }
164
165 ret = enginectx->op.do_one_request(engine, async_req);
166
167
168 if (ret < 0) {
169
170
171
172
173
174 if (!engine->retry_support ||
175 (ret != -ENOSPC)) {
176 dev_err(engine->dev,
177 "Failed to do one request from queue: %d\n",
178 ret);
179 goto req_err_1;
180 }
181
182
183
184
185
186 if (enginectx->op.unprepare_request) {
187 ret = enginectx->op.unprepare_request(engine,
188 async_req);
189 if (ret)
190 dev_err(engine->dev,
191 "failed to unprepare request\n");
192 }
193 spin_lock_irqsave(&engine->queue_lock, flags);
194
195
196
197
198
199 crypto_enqueue_request_head(&engine->queue, async_req);
200
201 kthread_queue_work(engine->kworker, &engine->pump_requests);
202 goto out;
203 }
204
205 goto retry;
206
207req_err_1:
208 if (enginectx->op.unprepare_request) {
209 ret = enginectx->op.unprepare_request(engine, async_req);
210 if (ret)
211 dev_err(engine->dev, "failed to unprepare request\n");
212 }
213
214req_err_2:
215 async_req->complete(async_req, ret);
216
217retry:
218
219 if (engine->retry_support) {
220 spin_lock_irqsave(&engine->queue_lock, flags);
221 goto start_request;
222 }
223 return;
224
225out:
226 spin_unlock_irqrestore(&engine->queue_lock, flags);
227
228
229
230
231
232 if (engine->do_batch_requests) {
233 ret = engine->do_batch_requests(engine);
234 if (ret)
235 dev_err(engine->dev, "failed to do batch requests: %d\n",
236 ret);
237 }
238
239 return;
240}
241
242static void crypto_pump_work(struct kthread_work *work)
243{
244 struct crypto_engine *engine =
245 container_of(work, struct crypto_engine, pump_requests);
246
247 crypto_pump_requests(engine, true);
248}
249
250
251
252
253
254
255static int crypto_transfer_request(struct crypto_engine *engine,
256 struct crypto_async_request *req,
257 bool need_pump)
258{
259 unsigned long flags;
260 int ret;
261
262 spin_lock_irqsave(&engine->queue_lock, flags);
263
264 if (!engine->running) {
265 spin_unlock_irqrestore(&engine->queue_lock, flags);
266 return -ESHUTDOWN;
267 }
268
269 ret = crypto_enqueue_request(&engine->queue, req);
270
271 if (!engine->busy && need_pump)
272 kthread_queue_work(engine->kworker, &engine->pump_requests);
273
274 spin_unlock_irqrestore(&engine->queue_lock, flags);
275 return ret;
276}
277
278
279
280
281
282
283
284static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
285 struct crypto_async_request *req)
286{
287 return crypto_transfer_request(engine, req, true);
288}
289
290
291
292
293
294
295
296int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
297 struct aead_request *req)
298{
299 return crypto_transfer_request_to_engine(engine, &req->base);
300}
301EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
302
303
304
305
306
307
308
309int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
310 struct akcipher_request *req)
311{
312 return crypto_transfer_request_to_engine(engine, &req->base);
313}
314EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
315
316
317
318
319
320
321
322int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
323 struct ahash_request *req)
324{
325 return crypto_transfer_request_to_engine(engine, &req->base);
326}
327EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
328
329
330
331
332
333
334
335int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
336 struct skcipher_request *req)
337{
338 return crypto_transfer_request_to_engine(engine, &req->base);
339}
340EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
341
342
343
344
345
346
347
348
349void crypto_finalize_aead_request(struct crypto_engine *engine,
350 struct aead_request *req, int err)
351{
352 return crypto_finalize_request(engine, &req->base, err);
353}
354EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
355
356
357
358
359
360
361
362
363void crypto_finalize_akcipher_request(struct crypto_engine *engine,
364 struct akcipher_request *req, int err)
365{
366 return crypto_finalize_request(engine, &req->base, err);
367}
368EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
369
370
371
372
373
374
375
376
377void crypto_finalize_hash_request(struct crypto_engine *engine,
378 struct ahash_request *req, int err)
379{
380 return crypto_finalize_request(engine, &req->base, err);
381}
382EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
383
384
385
386
387
388
389
390
391void crypto_finalize_skcipher_request(struct crypto_engine *engine,
392 struct skcipher_request *req, int err)
393{
394 return crypto_finalize_request(engine, &req->base, err);
395}
396EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
397
398
399
400
401
402
403
404int crypto_engine_start(struct crypto_engine *engine)
405{
406 unsigned long flags;
407
408 spin_lock_irqsave(&engine->queue_lock, flags);
409
410 if (engine->running || engine->busy) {
411 spin_unlock_irqrestore(&engine->queue_lock, flags);
412 return -EBUSY;
413 }
414
415 engine->running = true;
416 spin_unlock_irqrestore(&engine->queue_lock, flags);
417
418 kthread_queue_work(engine->kworker, &engine->pump_requests);
419
420 return 0;
421}
422EXPORT_SYMBOL_GPL(crypto_engine_start);
423
424
425
426
427
428
429
430int crypto_engine_stop(struct crypto_engine *engine)
431{
432 unsigned long flags;
433 unsigned int limit = 500;
434 int ret = 0;
435
436 spin_lock_irqsave(&engine->queue_lock, flags);
437
438
439
440
441
442 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
443 spin_unlock_irqrestore(&engine->queue_lock, flags);
444 msleep(20);
445 spin_lock_irqsave(&engine->queue_lock, flags);
446 }
447
448 if (crypto_queue_len(&engine->queue) || engine->busy)
449 ret = -EBUSY;
450 else
451 engine->running = false;
452
453 spin_unlock_irqrestore(&engine->queue_lock, flags);
454
455 if (ret)
456 dev_warn(engine->dev, "could not stop engine\n");
457
458 return ret;
459}
460EXPORT_SYMBOL_GPL(crypto_engine_stop);
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
481 bool retry_support,
482 int (*cbk_do_batch)(struct crypto_engine *engine),
483 bool rt, int qlen)
484{
485 struct crypto_engine *engine;
486
487 if (!dev)
488 return NULL;
489
490 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
491 if (!engine)
492 return NULL;
493
494 engine->dev = dev;
495 engine->rt = rt;
496 engine->running = false;
497 engine->busy = false;
498 engine->idling = false;
499 engine->retry_support = retry_support;
500 engine->priv_data = dev;
501
502
503
504
505 engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
506
507 snprintf(engine->name, sizeof(engine->name),
508 "%s-engine", dev_name(dev));
509
510 crypto_init_queue(&engine->queue, qlen);
511 spin_lock_init(&engine->queue_lock);
512
513 engine->kworker = kthread_create_worker(0, "%s", engine->name);
514 if (IS_ERR(engine->kworker)) {
515 dev_err(dev, "failed to create crypto request pump task\n");
516 return NULL;
517 }
518 kthread_init_work(&engine->pump_requests, crypto_pump_work);
519
520 if (engine->rt) {
521 dev_info(dev, "will run requests pump with realtime priority\n");
522 sched_set_fifo(engine->kworker->task);
523 }
524
525 return engine;
526}
527EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
528
529
530
531
532
533
534
535
536
537
538struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
539{
540 return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
541 CRYPTO_ENGINE_MAX_QLEN);
542}
543EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
544
545
546
547
548
549
550
551int crypto_engine_exit(struct crypto_engine *engine)
552{
553 int ret;
554
555 ret = crypto_engine_stop(engine);
556 if (ret)
557 return ret;
558
559 kthread_destroy_worker(engine->kworker);
560
561 return 0;
562}
563EXPORT_SYMBOL_GPL(crypto_engine_exit);
564
565MODULE_LICENSE("GPL");
566MODULE_DESCRIPTION("Crypto hardware engine framework");
567