1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include "sun4i-ss.h"
18
19static int sun4i_ss_opti_poll(struct skcipher_request *areq)
20{
21 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
22 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
23 struct sun4i_ss_ctx *ss = op->ss;
24 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
25 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
26 u32 mode = ctx->mode;
27
28 u32 rx_cnt = SS_RX_DEFAULT;
29 u32 tx_cnt = 0;
30 u32 spaces;
31 u32 v;
32 int err = 0;
33 unsigned int i;
34 unsigned int ileft = areq->cryptlen;
35 unsigned int oleft = areq->cryptlen;
36 unsigned int todo;
37 struct sg_mapping_iter mi, mo;
38 unsigned int oi, oo;
39 unsigned long flags;
40
41 if (!areq->cryptlen)
42 return 0;
43
44 if (!areq->iv) {
45 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
46 return -EINVAL;
47 }
48
49 if (!areq->src || !areq->dst) {
50 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
51 return -EINVAL;
52 }
53
54 spin_lock_irqsave(&ss->slock, flags);
55
56 for (i = 0; i < op->keylen; i += 4)
57 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
58
59 if (areq->iv) {
60 for (i = 0; i < 4 && i < ivsize / 4; i++) {
61 v = *(u32 *)(areq->iv + i * 4);
62 writel(v, ss->base + SS_IV0 + i * 4);
63 }
64 }
65 writel(mode, ss->base + SS_CTL);
66
67 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
68 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
69 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
70 SG_MITER_TO_SG | SG_MITER_ATOMIC);
71 sg_miter_next(&mi);
72 sg_miter_next(&mo);
73 if (!mi.addr || !mo.addr) {
74 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
75 err = -EINVAL;
76 goto release_ss;
77 }
78
79 ileft = areq->cryptlen / 4;
80 oleft = areq->cryptlen / 4;
81 oi = 0;
82 oo = 0;
83 do {
84 todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
85 if (todo) {
86 ileft -= todo;
87 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
88 oi += todo * 4;
89 }
90 if (oi == mi.length) {
91 sg_miter_next(&mi);
92 oi = 0;
93 }
94
95 spaces = readl(ss->base + SS_FCSR);
96 rx_cnt = SS_RXFIFO_SPACES(spaces);
97 tx_cnt = SS_TXFIFO_SPACES(spaces);
98
99 todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
100 if (todo) {
101 oleft -= todo;
102 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
103 oo += todo * 4;
104 }
105 if (oo == mo.length) {
106 sg_miter_next(&mo);
107 oo = 0;
108 }
109 } while (oleft);
110
111 if (areq->iv) {
112 for (i = 0; i < 4 && i < ivsize / 4; i++) {
113 v = readl(ss->base + SS_IV0 + i * 4);
114 *(u32 *)(areq->iv + i * 4) = v;
115 }
116 }
117
118release_ss:
119 sg_miter_stop(&mi);
120 sg_miter_stop(&mo);
121 writel(0, ss->base + SS_CTL);
122 spin_unlock_irqrestore(&ss->slock, flags);
123 return err;
124}
125
126
127static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
128{
129 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
130 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
131 struct sun4i_ss_ctx *ss = op->ss;
132 int no_chunk = 1;
133 struct scatterlist *in_sg = areq->src;
134 struct scatterlist *out_sg = areq->dst;
135 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
136 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
137 u32 mode = ctx->mode;
138
139 u32 rx_cnt = SS_RX_DEFAULT;
140 u32 tx_cnt = 0;
141 u32 v;
142 u32 spaces;
143 int err = 0;
144 unsigned int i;
145 unsigned int ileft = areq->cryptlen;
146 unsigned int oleft = areq->cryptlen;
147 unsigned int todo;
148 struct sg_mapping_iter mi, mo;
149 unsigned int oi, oo;
150 char buf[4 * SS_RX_MAX];
151 char bufo[4 * SS_TX_MAX];
152 unsigned int ob = 0;
153 unsigned int obo = 0;
154 unsigned int obl = 0;
155 unsigned long flags;
156
157 if (!areq->cryptlen)
158 return 0;
159
160 if (!areq->iv) {
161 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
162 return -EINVAL;
163 }
164
165 if (!areq->src || !areq->dst) {
166 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
167 return -EINVAL;
168 }
169
170
171
172
173
174 while (in_sg && no_chunk == 1) {
175 if (in_sg->length % 4)
176 no_chunk = 0;
177 in_sg = sg_next(in_sg);
178 }
179 while (out_sg && no_chunk == 1) {
180 if (out_sg->length % 4)
181 no_chunk = 0;
182 out_sg = sg_next(out_sg);
183 }
184
185 if (no_chunk == 1)
186 return sun4i_ss_opti_poll(areq);
187
188 spin_lock_irqsave(&ss->slock, flags);
189
190 for (i = 0; i < op->keylen; i += 4)
191 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
192
193 if (areq->iv) {
194 for (i = 0; i < 4 && i < ivsize / 4; i++) {
195 v = *(u32 *)(areq->iv + i * 4);
196 writel(v, ss->base + SS_IV0 + i * 4);
197 }
198 }
199 writel(mode, ss->base + SS_CTL);
200
201 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
202 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
203 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
204 SG_MITER_TO_SG | SG_MITER_ATOMIC);
205 sg_miter_next(&mi);
206 sg_miter_next(&mo);
207 if (!mi.addr || !mo.addr) {
208 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
209 err = -EINVAL;
210 goto release_ss;
211 }
212 ileft = areq->cryptlen;
213 oleft = areq->cryptlen;
214 oi = 0;
215 oo = 0;
216
217 while (oleft) {
218 if (ileft) {
219
220
221
222
223 todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
224 if (todo && !ob) {
225 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
226 todo);
227 ileft -= todo * 4;
228 oi += todo * 4;
229 } else {
230
231
232
233
234
235
236
237 todo = min3(rx_cnt * 4 - ob, ileft,
238 mi.length - oi);
239 memcpy(buf + ob, mi.addr + oi, todo);
240 ileft -= todo;
241 oi += todo;
242 ob += todo;
243 if (!(ob % 4)) {
244 writesl(ss->base + SS_RXFIFO, buf,
245 ob / 4);
246 ob = 0;
247 }
248 }
249 if (oi == mi.length) {
250 sg_miter_next(&mi);
251 oi = 0;
252 }
253 }
254
255 spaces = readl(ss->base + SS_FCSR);
256 rx_cnt = SS_RXFIFO_SPACES(spaces);
257 tx_cnt = SS_TXFIFO_SPACES(spaces);
258 dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
259 mode,
260 oi, mi.length, ileft, areq->cryptlen, rx_cnt,
261 oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
262
263 if (!tx_cnt)
264 continue;
265
266 todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
267 if (todo) {
268 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
269 oleft -= todo * 4;
270 oo += todo * 4;
271 if (oo == mo.length) {
272 sg_miter_next(&mo);
273 oo = 0;
274 }
275 } else {
276
277
278
279
280 readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
281 obl = tx_cnt * 4;
282 obo = 0;
283 do {
284
285
286
287
288
289
290 todo = min(mo.length - oo, obl - obo);
291 memcpy(mo.addr + oo, bufo + obo, todo);
292 oleft -= todo;
293 obo += todo;
294 oo += todo;
295 if (oo == mo.length) {
296 sg_miter_next(&mo);
297 oo = 0;
298 }
299 } while (obo < obl);
300
301 }
302 }
303 if (areq->iv) {
304 for (i = 0; i < 4 && i < ivsize / 4; i++) {
305 v = readl(ss->base + SS_IV0 + i * 4);
306 *(u32 *)(areq->iv + i * 4) = v;
307 }
308 }
309
310release_ss:
311 sg_miter_stop(&mi);
312 sg_miter_stop(&mo);
313 writel(0, ss->base + SS_CTL);
314 spin_unlock_irqrestore(&ss->slock, flags);
315
316 return err;
317}
318
319
320int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
321{
322 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
323 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
324 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
325
326 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
327 op->keymode;
328 return sun4i_ss_cipher_poll(areq);
329}
330
331int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
332{
333 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
334 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
335 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
336
337 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
338 op->keymode;
339 return sun4i_ss_cipher_poll(areq);
340}
341
342
343int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
344{
345 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
346 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
347 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
348
349 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
350 op->keymode;
351 return sun4i_ss_cipher_poll(areq);
352}
353
354int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
355{
356 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
357 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
358 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
359
360 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
361 op->keymode;
362 return sun4i_ss_cipher_poll(areq);
363}
364
365
366int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
367{
368 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
369 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
370 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
371
372 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
373 op->keymode;
374 return sun4i_ss_cipher_poll(areq);
375}
376
377int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
378{
379 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
380 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
381 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
382
383 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
384 op->keymode;
385 return sun4i_ss_cipher_poll(areq);
386}
387
388
389int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
390{
391 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
392 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
393 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
394
395 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
396 op->keymode;
397 return sun4i_ss_cipher_poll(areq);
398}
399
400int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
401{
402 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
403 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
404 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
405
406 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
407 op->keymode;
408 return sun4i_ss_cipher_poll(areq);
409}
410
411
412int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
413{
414 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
415 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
416 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
417
418 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
419 op->keymode;
420 return sun4i_ss_cipher_poll(areq);
421}
422
423int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
424{
425 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
426 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
427 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
428
429 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
430 op->keymode;
431 return sun4i_ss_cipher_poll(areq);
432}
433
434
435int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
436{
437 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
438 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
439 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
440
441 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
442 op->keymode;
443 return sun4i_ss_cipher_poll(areq);
444}
445
446int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
447{
448 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
449 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
450 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
451
452 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
453 op->keymode;
454 return sun4i_ss_cipher_poll(areq);
455}
456
457int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
458{
459 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
460 struct sun4i_ss_alg_template *algt;
461
462 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
463
464 algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
465 alg.crypto.base);
466 op->ss = algt->ss;
467
468 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
469 sizeof(struct sun4i_cipher_req_ctx));
470
471 return 0;
472}
473
474
475int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
476 unsigned int keylen)
477{
478 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
479 struct sun4i_ss_ctx *ss = op->ss;
480
481 switch (keylen) {
482 case 128 / 8:
483 op->keymode = SS_AES_128BITS;
484 break;
485 case 192 / 8:
486 op->keymode = SS_AES_192BITS;
487 break;
488 case 256 / 8:
489 op->keymode = SS_AES_256BITS;
490 break;
491 default:
492 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
493 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
494 return -EINVAL;
495 }
496 op->keylen = keylen;
497 memcpy(op->key, key, keylen);
498 return 0;
499}
500
501
502int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
503 unsigned int keylen)
504{
505 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
506 struct sun4i_ss_ctx *ss = op->ss;
507 u32 flags;
508 u32 tmp[DES_EXPKEY_WORDS];
509 int ret;
510
511 if (unlikely(keylen != DES_KEY_SIZE)) {
512 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
513 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
514 return -EINVAL;
515 }
516
517 flags = crypto_skcipher_get_flags(tfm);
518
519 ret = des_ekey(tmp, key);
520 if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
521 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
522 dev_dbg(ss->dev, "Weak key %u\n", keylen);
523 return -EINVAL;
524 }
525
526 op->keylen = keylen;
527 memcpy(op->key, key, keylen);
528 return 0;
529}
530
531
532int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
533 unsigned int keylen)
534{
535 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
536 struct sun4i_ss_ctx *ss = op->ss;
537
538 if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
539 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
540 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
541 return -EINVAL;
542 }
543 op->keylen = keylen;
544 memcpy(op->key, key, keylen);
545 return 0;
546}
547