1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pagemap.h>
24#include <linux/mempool.h>
25#include <linux/module.h>
26#include <linux/scatterlist.h>
27#include <linux/ratelimit.h>
28#include <linux/dcache.h>
29#include <linux/namei.h>
30#include <crypto/aes.h>
31#include <crypto/skcipher.h>
32#include "fscrypt_private.h"
33
34static unsigned int num_prealloc_crypto_pages = 32;
35static unsigned int num_prealloc_crypto_ctxs = 128;
36
37module_param(num_prealloc_crypto_pages, uint, 0444);
38MODULE_PARM_DESC(num_prealloc_crypto_pages,
39 "Number of crypto pages to preallocate");
40module_param(num_prealloc_crypto_ctxs, uint, 0444);
41MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
42 "Number of crypto contexts to preallocate");
43
44static mempool_t *fscrypt_bounce_page_pool = NULL;
45
46static LIST_HEAD(fscrypt_free_ctxs);
47static DEFINE_SPINLOCK(fscrypt_ctx_lock);
48
49static struct workqueue_struct *fscrypt_read_workqueue;
50static DEFINE_MUTEX(fscrypt_init_mutex);
51
52static struct kmem_cache *fscrypt_ctx_cachep;
53struct kmem_cache *fscrypt_info_cachep;
54
55void fscrypt_enqueue_decrypt_work(struct work_struct *work)
56{
57 queue_work(fscrypt_read_workqueue, work);
58}
59EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
60
61
62
63
64
65
66
67
68void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
69{
70 unsigned long flags;
71
72 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
73 kmem_cache_free(fscrypt_ctx_cachep, ctx);
74 } else {
75 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
76 list_add(&ctx->free_list, &fscrypt_free_ctxs);
77 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
78 }
79}
80EXPORT_SYMBOL(fscrypt_release_ctx);
81
82
83
84
85
86
87
88
89
90struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
91{
92 struct fscrypt_ctx *ctx;
93 unsigned long flags;
94
95
96
97
98
99 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
100 ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
101 struct fscrypt_ctx, free_list);
102 if (ctx)
103 list_del(&ctx->free_list);
104 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
105 if (!ctx) {
106 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
107 if (!ctx)
108 return ERR_PTR(-ENOMEM);
109 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
110 } else {
111 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
112 }
113 return ctx;
114}
115EXPORT_SYMBOL(fscrypt_get_ctx);
116
117struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
118{
119 return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
120}
121
122
123
124
125
126
127
128void fscrypt_free_bounce_page(struct page *bounce_page)
129{
130 if (!bounce_page)
131 return;
132 set_page_private(bounce_page, (unsigned long)NULL);
133 ClearPagePrivate(bounce_page);
134 mempool_free(bounce_page, fscrypt_bounce_page_pool);
135}
136EXPORT_SYMBOL(fscrypt_free_bounce_page);
137
138void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
139 const struct fscrypt_info *ci)
140{
141 memset(iv, 0, ci->ci_mode->ivsize);
142 iv->lblk_num = cpu_to_le64(lblk_num);
143
144 if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY)
145 memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
146
147 if (ci->ci_essiv_tfm != NULL)
148 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
149}
150
151
152int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
153 u64 lblk_num, struct page *src_page,
154 struct page *dest_page, unsigned int len,
155 unsigned int offs, gfp_t gfp_flags)
156{
157 union fscrypt_iv iv;
158 struct skcipher_request *req = NULL;
159 DECLARE_CRYPTO_WAIT(wait);
160 struct scatterlist dst, src;
161 struct fscrypt_info *ci = inode->i_crypt_info;
162 struct crypto_skcipher *tfm = ci->ci_ctfm;
163 int res = 0;
164
165 if (WARN_ON_ONCE(len <= 0))
166 return -EINVAL;
167 if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))
168 return -EINVAL;
169
170 fscrypt_generate_iv(&iv, lblk_num, ci);
171
172 req = skcipher_request_alloc(tfm, gfp_flags);
173 if (!req)
174 return -ENOMEM;
175
176 skcipher_request_set_callback(
177 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
178 crypto_req_done, &wait);
179
180 sg_init_table(&dst, 1);
181 sg_set_page(&dst, dest_page, len, offs);
182 sg_init_table(&src, 1);
183 sg_set_page(&src, src_page, len, offs);
184 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
185 if (rw == FS_DECRYPT)
186 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
187 else
188 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
189 skcipher_request_free(req);
190 if (res) {
191 fscrypt_err(inode->i_sb,
192 "%scryption failed for inode %lu, block %llu: %d",
193 (rw == FS_DECRYPT ? "de" : "en"),
194 inode->i_ino, lblk_num, res);
195 return res;
196 }
197 return 0;
198}
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
220 unsigned int len,
221 unsigned int offs,
222 gfp_t gfp_flags)
223
224{
225 const struct inode *inode = page->mapping->host;
226 const unsigned int blockbits = inode->i_blkbits;
227 const unsigned int blocksize = 1 << blockbits;
228 struct page *ciphertext_page;
229 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
230 (offs >> blockbits);
231 unsigned int i;
232 int err;
233
234 if (WARN_ON_ONCE(!PageLocked(page)))
235 return ERR_PTR(-EINVAL);
236
237 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
238 return ERR_PTR(-EINVAL);
239
240 ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
241 if (!ciphertext_page)
242 return ERR_PTR(-ENOMEM);
243
244 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
245 err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
246 page, ciphertext_page,
247 blocksize, i, gfp_flags);
248 if (err) {
249 fscrypt_free_bounce_page(ciphertext_page);
250 return ERR_PTR(err);
251 }
252 }
253 SetPagePrivate(ciphertext_page);
254 set_page_private(ciphertext_page, (unsigned long)page);
255 return ciphertext_page;
256}
257EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
277 unsigned int len, unsigned int offs,
278 u64 lblk_num, gfp_t gfp_flags)
279{
280 return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
281 len, offs, gfp_flags);
282}
283EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
302 unsigned int offs)
303{
304 const struct inode *inode = page->mapping->host;
305 const unsigned int blockbits = inode->i_blkbits;
306 const unsigned int blocksize = 1 << blockbits;
307 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
308 (offs >> blockbits);
309 unsigned int i;
310 int err;
311
312 if (WARN_ON_ONCE(!PageLocked(page)))
313 return -EINVAL;
314
315 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
316 return -EINVAL;
317
318 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
319 err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
320 page, blocksize, i, GFP_NOFS);
321 if (err)
322 return err;
323 }
324 return 0;
325}
326EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
345 unsigned int len, unsigned int offs,
346 u64 lblk_num)
347{
348 return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
349 len, offs, GFP_NOFS);
350}
351EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
352
353
354
355
356
357static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
358{
359 struct dentry *dir;
360 int err;
361 int valid;
362
363
364
365
366
367
368 if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
369 return 1;
370
371
372
373
374
375
376
377
378
379
380
381
382
383 if (flags & LOOKUP_RCU)
384 return -ECHILD;
385
386 dir = dget_parent(dentry);
387 err = fscrypt_get_encryption_info(d_inode(dir));
388 valid = !fscrypt_has_encryption_key(d_inode(dir));
389 dput(dir);
390
391 if (err < 0)
392 return err;
393
394 return valid;
395}
396
397const struct dentry_operations fscrypt_d_ops = {
398 .d_revalidate = fscrypt_d_revalidate,
399};
400
401static void fscrypt_destroy(void)
402{
403 struct fscrypt_ctx *pos, *n;
404
405 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
406 kmem_cache_free(fscrypt_ctx_cachep, pos);
407 INIT_LIST_HEAD(&fscrypt_free_ctxs);
408 mempool_destroy(fscrypt_bounce_page_pool);
409 fscrypt_bounce_page_pool = NULL;
410}
411
412
413
414
415
416
417
418
419
420
421int fscrypt_initialize(unsigned int cop_flags)
422{
423 int i, res = -ENOMEM;
424
425
426 if (cop_flags & FS_CFLG_OWN_PAGES)
427 return 0;
428
429 mutex_lock(&fscrypt_init_mutex);
430 if (fscrypt_bounce_page_pool)
431 goto already_initialized;
432
433 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
434 struct fscrypt_ctx *ctx;
435
436 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
437 if (!ctx)
438 goto fail;
439 list_add(&ctx->free_list, &fscrypt_free_ctxs);
440 }
441
442 fscrypt_bounce_page_pool =
443 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
444 if (!fscrypt_bounce_page_pool)
445 goto fail;
446
447already_initialized:
448 mutex_unlock(&fscrypt_init_mutex);
449 return 0;
450fail:
451 fscrypt_destroy();
452 mutex_unlock(&fscrypt_init_mutex);
453 return res;
454}
455
456void fscrypt_msg(struct super_block *sb, const char *level,
457 const char *fmt, ...)
458{
459 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
460 DEFAULT_RATELIMIT_BURST);
461 struct va_format vaf;
462 va_list args;
463
464 if (!__ratelimit(&rs))
465 return;
466
467 va_start(args, fmt);
468 vaf.fmt = fmt;
469 vaf.va = &args;
470 if (sb)
471 printk("%sfscrypt (%s): %pV\n", level, sb->s_id, &vaf);
472 else
473 printk("%sfscrypt: %pV\n", level, &vaf);
474 va_end(args);
475}
476
477
478
479
480static int __init fscrypt_init(void)
481{
482
483
484
485
486
487
488
489
490 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
491 WQ_UNBOUND | WQ_HIGHPRI,
492 num_online_cpus());
493 if (!fscrypt_read_workqueue)
494 goto fail;
495
496 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
497 if (!fscrypt_ctx_cachep)
498 goto fail_free_queue;
499
500 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
501 if (!fscrypt_info_cachep)
502 goto fail_free_ctx;
503
504 return 0;
505
506fail_free_ctx:
507 kmem_cache_destroy(fscrypt_ctx_cachep);
508fail_free_queue:
509 destroy_workqueue(fscrypt_read_workqueue);
510fail:
511 return -ENOMEM;
512}
513module_init(fscrypt_init)
514
515
516
517
518static void __exit fscrypt_exit(void)
519{
520 fscrypt_destroy();
521
522 if (fscrypt_read_workqueue)
523 destroy_workqueue(fscrypt_read_workqueue);
524 kmem_cache_destroy(fscrypt_ctx_cachep);
525 kmem_cache_destroy(fscrypt_info_cachep);
526
527 fscrypt_essiv_cleanup();
528}
529module_exit(fscrypt_exit);
530
531MODULE_LICENSE("GPL");
532