1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
27#include <linux/dcache.h>
28#include <linux/namei.h>
29#include <crypto/aes.h>
30#include <crypto/skcipher.h>
31#include "fscrypt_private.h"
32
33static unsigned int num_prealloc_crypto_pages = 32;
34static unsigned int num_prealloc_crypto_ctxs = 128;
35
36module_param(num_prealloc_crypto_pages, uint, 0444);
37MODULE_PARM_DESC(num_prealloc_crypto_pages,
38 "Number of crypto pages to preallocate");
39module_param(num_prealloc_crypto_ctxs, uint, 0444);
40MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
41 "Number of crypto contexts to preallocate");
42
43static mempool_t *fscrypt_bounce_page_pool = NULL;
44
45static LIST_HEAD(fscrypt_free_ctxs);
46static DEFINE_SPINLOCK(fscrypt_ctx_lock);
47
48static struct workqueue_struct *fscrypt_read_workqueue;
49static DEFINE_MUTEX(fscrypt_init_mutex);
50
51static struct kmem_cache *fscrypt_ctx_cachep;
52struct kmem_cache *fscrypt_info_cachep;
53
54void fscrypt_enqueue_decrypt_work(struct work_struct *work)
55{
56 queue_work(fscrypt_read_workqueue, work);
57}
58EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
59
60
61
62
63
64
65
66
67
68
69void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
70{
71 unsigned long flags;
72
73 if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
74 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
75 ctx->w.bounce_page = NULL;
76 }
77 ctx->w.control_page = NULL;
78 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
79 kmem_cache_free(fscrypt_ctx_cachep, ctx);
80 } else {
81 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
82 list_add(&ctx->free_list, &fscrypt_free_ctxs);
83 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
84 }
85}
86EXPORT_SYMBOL(fscrypt_release_ctx);
87
88
89
90
91
92
93
94
95
96
97
98struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
99{
100 struct fscrypt_ctx *ctx = NULL;
101 struct fscrypt_info *ci = inode->i_crypt_info;
102 unsigned long flags;
103
104 if (ci == NULL)
105 return ERR_PTR(-ENOKEY);
106
107
108
109
110
111
112
113
114
115
116
117 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
118 ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
119 struct fscrypt_ctx, free_list);
120 if (ctx)
121 list_del(&ctx->free_list);
122 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
123 if (!ctx) {
124 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
125 if (!ctx)
126 return ERR_PTR(-ENOMEM);
127 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
128 } else {
129 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
130 }
131 ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
132 return ctx;
133}
134EXPORT_SYMBOL(fscrypt_get_ctx);
135
136int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
137 u64 lblk_num, struct page *src_page,
138 struct page *dest_page, unsigned int len,
139 unsigned int offs, gfp_t gfp_flags)
140{
141 struct {
142 __le64 index;
143 u8 padding[FS_IV_SIZE - sizeof(__le64)];
144 } iv;
145 struct skcipher_request *req = NULL;
146 DECLARE_CRYPTO_WAIT(wait);
147 struct scatterlist dst, src;
148 struct fscrypt_info *ci = inode->i_crypt_info;
149 struct crypto_skcipher *tfm = ci->ci_ctfm;
150 int res = 0;
151
152 BUG_ON(len == 0);
153
154 BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
155 BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
156 iv.index = cpu_to_le64(lblk_num);
157 memset(iv.padding, 0, sizeof(iv.padding));
158
159 if (ci->ci_essiv_tfm != NULL) {
160 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
161 (u8 *)&iv);
162 }
163
164 req = skcipher_request_alloc(tfm, gfp_flags);
165 if (!req)
166 return -ENOMEM;
167
168 skcipher_request_set_callback(
169 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
170 crypto_req_done, &wait);
171
172 sg_init_table(&dst, 1);
173 sg_set_page(&dst, dest_page, len, offs);
174 sg_init_table(&src, 1);
175 sg_set_page(&src, src_page, len, offs);
176 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
177 if (rw == FS_DECRYPT)
178 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
179 else
180 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
181 skcipher_request_free(req);
182 if (res) {
183 fscrypt_err(inode->i_sb,
184 "%scryption failed for inode %lu, block %llu: %d",
185 (rw == FS_DECRYPT ? "de" : "en"),
186 inode->i_ino, lblk_num, res);
187 return res;
188 }
189 return 0;
190}
191
192struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
193 gfp_t gfp_flags)
194{
195 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
196 if (ctx->w.bounce_page == NULL)
197 return ERR_PTR(-ENOMEM);
198 ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
199 return ctx->w.bounce_page;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233struct page *fscrypt_encrypt_page(const struct inode *inode,
234 struct page *page,
235 unsigned int len,
236 unsigned int offs,
237 u64 lblk_num, gfp_t gfp_flags)
238
239{
240 struct fscrypt_ctx *ctx;
241 struct page *ciphertext_page = page;
242 int err;
243
244 BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
245
246 if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
247
248 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
249 ciphertext_page, len, offs,
250 gfp_flags);
251 if (err)
252 return ERR_PTR(err);
253
254 return ciphertext_page;
255 }
256
257 BUG_ON(!PageLocked(page));
258
259 ctx = fscrypt_get_ctx(inode, gfp_flags);
260 if (IS_ERR(ctx))
261 return (struct page *)ctx;
262
263
264 ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
265 if (IS_ERR(ciphertext_page))
266 goto errout;
267
268 ctx->w.control_page = page;
269 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
270 page, ciphertext_page, len, offs,
271 gfp_flags);
272 if (err) {
273 ciphertext_page = ERR_PTR(err);
274 goto errout;
275 }
276 SetPagePrivate(ciphertext_page);
277 set_page_private(ciphertext_page, (unsigned long)ctx);
278 lock_page(ciphertext_page);
279 return ciphertext_page;
280
281errout:
282 fscrypt_release_ctx(ctx);
283 return ciphertext_page;
284}
285EXPORT_SYMBOL(fscrypt_encrypt_page);
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
303 unsigned int len, unsigned int offs, u64 lblk_num)
304{
305 if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
306 BUG_ON(!PageLocked(page));
307
308 return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
309 len, offs, GFP_NOFS);
310}
311EXPORT_SYMBOL(fscrypt_decrypt_page);
312
313
314
315
316
317
318static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
319{
320 struct dentry *dir;
321 int dir_has_key, cached_with_key;
322
323 if (flags & LOOKUP_RCU)
324 return -ECHILD;
325
326 dir = dget_parent(dentry);
327 if (!IS_ENCRYPTED(d_inode(dir))) {
328 dput(dir);
329 return 0;
330 }
331
332 spin_lock(&dentry->d_lock);
333 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
334 spin_unlock(&dentry->d_lock);
335 dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
336 dput(dir);
337
338
339
340
341
342
343
344
345
346
347
348 if ((!cached_with_key && d_is_negative(dentry)) ||
349 (!cached_with_key && dir_has_key) ||
350 (cached_with_key && !dir_has_key))
351 return 0;
352 return 1;
353}
354
355const struct dentry_operations fscrypt_d_ops = {
356 .d_revalidate = fscrypt_d_revalidate,
357};
358
359void fscrypt_restore_control_page(struct page *page)
360{
361 struct fscrypt_ctx *ctx;
362
363 ctx = (struct fscrypt_ctx *)page_private(page);
364 set_page_private(page, (unsigned long)NULL);
365 ClearPagePrivate(page);
366 unlock_page(page);
367 fscrypt_release_ctx(ctx);
368}
369EXPORT_SYMBOL(fscrypt_restore_control_page);
370
371static void fscrypt_destroy(void)
372{
373 struct fscrypt_ctx *pos, *n;
374
375 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
376 kmem_cache_free(fscrypt_ctx_cachep, pos);
377 INIT_LIST_HEAD(&fscrypt_free_ctxs);
378 mempool_destroy(fscrypt_bounce_page_pool);
379 fscrypt_bounce_page_pool = NULL;
380}
381
382
383
384
385
386
387
388
389
390
391int fscrypt_initialize(unsigned int cop_flags)
392{
393 int i, res = -ENOMEM;
394
395
396 if (cop_flags & FS_CFLG_OWN_PAGES)
397 return 0;
398
399 mutex_lock(&fscrypt_init_mutex);
400 if (fscrypt_bounce_page_pool)
401 goto already_initialized;
402
403 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
404 struct fscrypt_ctx *ctx;
405
406 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
407 if (!ctx)
408 goto fail;
409 list_add(&ctx->free_list, &fscrypt_free_ctxs);
410 }
411
412 fscrypt_bounce_page_pool =
413 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
414 if (!fscrypt_bounce_page_pool)
415 goto fail;
416
417already_initialized:
418 mutex_unlock(&fscrypt_init_mutex);
419 return 0;
420fail:
421 fscrypt_destroy();
422 mutex_unlock(&fscrypt_init_mutex);
423 return res;
424}
425
426void fscrypt_msg(struct super_block *sb, const char *level,
427 const char *fmt, ...)
428{
429 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
430 DEFAULT_RATELIMIT_BURST);
431 struct va_format vaf;
432 va_list args;
433
434 if (!__ratelimit(&rs))
435 return;
436
437 va_start(args, fmt);
438 vaf.fmt = fmt;
439 vaf.va = &args;
440 if (sb)
441 printk("%sfscrypt (%s): %pV\n", level, sb->s_id, &vaf);
442 else
443 printk("%sfscrypt: %pV\n", level, &vaf);
444 va_end(args);
445}
446
447
448
449
450static int __init fscrypt_init(void)
451{
452
453
454
455
456
457
458
459
460 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
461 WQ_UNBOUND | WQ_HIGHPRI,
462 num_online_cpus());
463 if (!fscrypt_read_workqueue)
464 goto fail;
465
466 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
467 if (!fscrypt_ctx_cachep)
468 goto fail_free_queue;
469
470 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
471 if (!fscrypt_info_cachep)
472 goto fail_free_ctx;
473
474 return 0;
475
476fail_free_ctx:
477 kmem_cache_destroy(fscrypt_ctx_cachep);
478fail_free_queue:
479 destroy_workqueue(fscrypt_read_workqueue);
480fail:
481 return -ENOMEM;
482}
483module_init(fscrypt_init)
484
485
486
487
488static void __exit fscrypt_exit(void)
489{
490 fscrypt_destroy();
491
492 if (fscrypt_read_workqueue)
493 destroy_workqueue(fscrypt_read_workqueue);
494 kmem_cache_destroy(fscrypt_ctx_cachep);
495 kmem_cache_destroy(fscrypt_info_cachep);
496
497 fscrypt_essiv_cleanup();
498}
499module_exit(fscrypt_exit);
500
501MODULE_LICENSE("GPL");
502