1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/err.h>
16#include <linux/delay.h>
17#include "internal.h"
18
19#define CRYPTO_ENGINE_MAX_QLEN 10
20
21void crypto_finalize_request(struct crypto_engine *engine,
22 struct ablkcipher_request *req, int err);
23
24
25
26
27
28
29
30
31
32
33static void crypto_pump_requests(struct crypto_engine *engine,
34 bool in_kthread)
35{
36 struct crypto_async_request *async_req, *backlog;
37 struct ablkcipher_request *req;
38 unsigned long flags;
39 bool was_busy = false;
40 int ret;
41
42 spin_lock_irqsave(&engine->queue_lock, flags);
43
44
45 if (engine->cur_req)
46 goto out;
47
48
49 if (engine->idling) {
50 queue_kthread_work(&engine->kworker, &engine->pump_requests);
51 goto out;
52 }
53
54
55 if (!crypto_queue_len(&engine->queue) || !engine->running) {
56 if (!engine->busy)
57 goto out;
58
59
60 if (!in_kthread) {
61 queue_kthread_work(&engine->kworker,
62 &engine->pump_requests);
63 goto out;
64 }
65
66 engine->busy = false;
67 engine->idling = true;
68 spin_unlock_irqrestore(&engine->queue_lock, flags);
69
70 if (engine->unprepare_crypt_hardware &&
71 engine->unprepare_crypt_hardware(engine))
72 pr_err("failed to unprepare crypt hardware\n");
73
74 spin_lock_irqsave(&engine->queue_lock, flags);
75 engine->idling = false;
76 goto out;
77 }
78
79
80 backlog = crypto_get_backlog(&engine->queue);
81 async_req = crypto_dequeue_request(&engine->queue);
82 if (!async_req)
83 goto out;
84
85 req = ablkcipher_request_cast(async_req);
86
87 engine->cur_req = req;
88 if (backlog)
89 backlog->complete(backlog, -EINPROGRESS);
90
91 if (engine->busy)
92 was_busy = true;
93 else
94 engine->busy = true;
95
96 spin_unlock_irqrestore(&engine->queue_lock, flags);
97
98
99 if (!was_busy && engine->prepare_crypt_hardware) {
100 ret = engine->prepare_crypt_hardware(engine);
101 if (ret) {
102 pr_err("failed to prepare crypt hardware\n");
103 goto req_err;
104 }
105 }
106
107 if (engine->prepare_request) {
108 ret = engine->prepare_request(engine, engine->cur_req);
109 if (ret) {
110 pr_err("failed to prepare request: %d\n", ret);
111 goto req_err;
112 }
113 engine->cur_req_prepared = true;
114 }
115
116 ret = engine->crypt_one_request(engine, engine->cur_req);
117 if (ret) {
118 pr_err("failed to crypt one request from queue\n");
119 goto req_err;
120 }
121 return;
122
123req_err:
124 crypto_finalize_request(engine, engine->cur_req, ret);
125 return;
126
127out:
128 spin_unlock_irqrestore(&engine->queue_lock, flags);
129}
130
131static void crypto_pump_work(struct kthread_work *work)
132{
133 struct crypto_engine *engine =
134 container_of(work, struct crypto_engine, pump_requests);
135
136 crypto_pump_requests(engine, true);
137}
138
139
140
141
142
143
144int crypto_transfer_request(struct crypto_engine *engine,
145 struct ablkcipher_request *req, bool need_pump)
146{
147 unsigned long flags;
148 int ret;
149
150 spin_lock_irqsave(&engine->queue_lock, flags);
151
152 if (!engine->running) {
153 spin_unlock_irqrestore(&engine->queue_lock, flags);
154 return -ESHUTDOWN;
155 }
156
157 ret = ablkcipher_enqueue_request(&engine->queue, req);
158
159 if (!engine->busy && need_pump)
160 queue_kthread_work(&engine->kworker, &engine->pump_requests);
161
162 spin_unlock_irqrestore(&engine->queue_lock, flags);
163 return ret;
164}
165EXPORT_SYMBOL_GPL(crypto_transfer_request);
166
167
168
169
170
171
172
173int crypto_transfer_request_to_engine(struct crypto_engine *engine,
174 struct ablkcipher_request *req)
175{
176 return crypto_transfer_request(engine, req, true);
177}
178EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
179
180
181
182
183
184
185
186void crypto_finalize_request(struct crypto_engine *engine,
187 struct ablkcipher_request *req, int err)
188{
189 unsigned long flags;
190 bool finalize_cur_req = false;
191 int ret;
192
193 spin_lock_irqsave(&engine->queue_lock, flags);
194 if (engine->cur_req == req)
195 finalize_cur_req = true;
196 spin_unlock_irqrestore(&engine->queue_lock, flags);
197
198 if (finalize_cur_req) {
199 if (engine->cur_req_prepared && engine->unprepare_request) {
200 ret = engine->unprepare_request(engine, req);
201 if (ret)
202 pr_err("failed to unprepare request\n");
203 }
204
205 spin_lock_irqsave(&engine->queue_lock, flags);
206 engine->cur_req = NULL;
207 engine->cur_req_prepared = false;
208 spin_unlock_irqrestore(&engine->queue_lock, flags);
209 }
210
211 req->base.complete(&req->base, err);
212
213 queue_kthread_work(&engine->kworker, &engine->pump_requests);
214}
215EXPORT_SYMBOL_GPL(crypto_finalize_request);
216
217
218
219
220
221
222
223int crypto_engine_start(struct crypto_engine *engine)
224{
225 unsigned long flags;
226
227 spin_lock_irqsave(&engine->queue_lock, flags);
228
229 if (engine->running || engine->busy) {
230 spin_unlock_irqrestore(&engine->queue_lock, flags);
231 return -EBUSY;
232 }
233
234 engine->running = true;
235 spin_unlock_irqrestore(&engine->queue_lock, flags);
236
237 queue_kthread_work(&engine->kworker, &engine->pump_requests);
238
239 return 0;
240}
241EXPORT_SYMBOL_GPL(crypto_engine_start);
242
243
244
245
246
247
248
249int crypto_engine_stop(struct crypto_engine *engine)
250{
251 unsigned long flags;
252 unsigned limit = 500;
253 int ret = 0;
254
255 spin_lock_irqsave(&engine->queue_lock, flags);
256
257
258
259
260
261 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
262 spin_unlock_irqrestore(&engine->queue_lock, flags);
263 msleep(20);
264 spin_lock_irqsave(&engine->queue_lock, flags);
265 }
266
267 if (crypto_queue_len(&engine->queue) || engine->busy)
268 ret = -EBUSY;
269 else
270 engine->running = false;
271
272 spin_unlock_irqrestore(&engine->queue_lock, flags);
273
274 if (ret)
275 pr_warn("could not stop engine\n");
276
277 return ret;
278}
279EXPORT_SYMBOL_GPL(crypto_engine_stop);
280
281
282
283
284
285
286
287
288
289
290struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
291{
292 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
293 struct crypto_engine *engine;
294
295 if (!dev)
296 return NULL;
297
298 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
299 if (!engine)
300 return NULL;
301
302 engine->rt = rt;
303 engine->running = false;
304 engine->busy = false;
305 engine->idling = false;
306 engine->cur_req_prepared = false;
307 engine->priv_data = dev;
308 snprintf(engine->name, sizeof(engine->name),
309 "%s-engine", dev_name(dev));
310
311 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
312 spin_lock_init(&engine->queue_lock);
313
314 init_kthread_worker(&engine->kworker);
315 engine->kworker_task = kthread_run(kthread_worker_fn,
316 &engine->kworker, "%s",
317 engine->name);
318 if (IS_ERR(engine->kworker_task)) {
319 dev_err(dev, "failed to create crypto request pump task\n");
320 return NULL;
321 }
322 init_kthread_work(&engine->pump_requests, crypto_pump_work);
323
324 if (engine->rt) {
325 dev_info(dev, "will run requests pump with realtime priority\n");
326 sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m);
327 }
328
329 return engine;
330}
331EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
332
333
334
335
336
337
338
339int crypto_engine_exit(struct crypto_engine *engine)
340{
341 int ret;
342
343 ret = crypto_engine_stop(engine);
344 if (ret)
345 return ret;
346
347 flush_kthread_worker(&engine->kworker);
348 kthread_stop(engine->kworker_task);
349
350 return 0;
351}
352EXPORT_SYMBOL_GPL(crypto_engine_exit);
353
354MODULE_LICENSE("GPL");
355MODULE_DESCRIPTION("Crypto hardware engine framework");
356