1 // SPDX-License-Identifier: GPL-2.0-only
3 * This contains encryption functions for per-file encryption.
5 * Copyright (C) 2015, Google, Inc.
6 * Copyright (C) 2015, Motorola Mobility
8 * Written by Michael Halcrow, 2014.
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
14 * Add llcrypt_pullback_bio_page()
17 * This has not yet undergone a rigorous security audit.
19 * The usage of AES-XTS should conform to recommendations in NIST
20 * Special Publication 800-38E and IEEE P1619/D16.
23 * Linux commit 219d54332a09
27 #include <linux/pagemap.h>
28 #include <linux/mempool.h>
29 #include <linux/module.h>
30 #include <linux/scatterlist.h>
31 #include <linux/ratelimit.h>
32 #include <linux/dcache.h>
33 #include <linux/namei.h>
34 #include <crypto/aes.h>
35 #include <crypto/skcipher.h>
36 #include "llcrypt_private.h"
38 static unsigned int num_prealloc_crypto_pages = 32;
39 static unsigned int num_prealloc_crypto_ctxs = 128;
41 module_param(num_prealloc_crypto_pages, uint, 0444);
42 MODULE_PARM_DESC(num_prealloc_crypto_pages,
43 "Number of crypto pages to preallocate");
44 module_param(num_prealloc_crypto_ctxs, uint, 0444);
45 MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
46 "Number of crypto contexts to preallocate");
48 static mempool_t *llcrypt_bounce_page_pool = NULL;
50 static LIST_HEAD(llcrypt_free_ctxs);
51 static DEFINE_SPINLOCK(llcrypt_ctx_lock);
53 static struct workqueue_struct *llcrypt_read_workqueue;
54 static DEFINE_MUTEX(llcrypt_init_mutex);
56 static struct kmem_cache *llcrypt_ctx_cachep;
57 struct kmem_cache *llcrypt_info_cachep;
59 void llcrypt_enqueue_decrypt_work(struct work_struct *work)
61 queue_work(llcrypt_read_workqueue, work);
63 EXPORT_SYMBOL(llcrypt_enqueue_decrypt_work);
66 * llcrypt_release_ctx() - Release a decryption context
67 * @ctx: The decryption context to release.
69 * If the decryption context was allocated from the pre-allocated pool, return
70 * it to that pool. Else, free it.
72 void llcrypt_release_ctx(struct llcrypt_ctx *ctx)
76 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
77 kmem_cache_free(llcrypt_ctx_cachep, ctx);
79 spin_lock_irqsave(&llcrypt_ctx_lock, flags);
80 list_add(&ctx->free_list, &llcrypt_free_ctxs);
81 spin_unlock_irqrestore(&llcrypt_ctx_lock, flags);
84 EXPORT_SYMBOL(llcrypt_release_ctx);
87 * llcrypt_get_ctx() - Get a decryption context
88 * @gfp_flags: The gfp flag for memory allocation
90 * Allocate and initialize a decryption context.
92 * Return: A new decryption context on success; an ERR_PTR() otherwise.
94 struct llcrypt_ctx *llcrypt_get_ctx(gfp_t gfp_flags)
96 struct llcrypt_ctx *ctx;
100 * First try getting a ctx from the free list so that we don't have to
101 * call into the slab allocator.
103 spin_lock_irqsave(&llcrypt_ctx_lock, flags);
104 ctx = list_first_entry_or_null(&llcrypt_free_ctxs,
105 struct llcrypt_ctx, free_list);
107 list_del(&ctx->free_list);
108 spin_unlock_irqrestore(&llcrypt_ctx_lock, flags);
110 ctx = kmem_cache_zalloc(llcrypt_ctx_cachep, gfp_flags);
112 return ERR_PTR(-ENOMEM);
113 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
115 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
119 EXPORT_SYMBOL(llcrypt_get_ctx);
121 struct page *llcrypt_alloc_bounce_page(gfp_t gfp_flags)
123 return mempool_alloc(llcrypt_bounce_page_pool, gfp_flags);
127 * llcrypt_free_bounce_page() - free a ciphertext bounce page
129 * Free a bounce page that was allocated by llcrypt_encrypt_pagecache_blocks(),
130 * or by llcrypt_alloc_bounce_page() directly.
132 void llcrypt_free_bounce_page(struct page *bounce_page)
136 set_page_private(bounce_page, (unsigned long)NULL);
137 ClearPagePrivate(bounce_page);
138 mempool_free(bounce_page, llcrypt_bounce_page_pool);
140 EXPORT_SYMBOL(llcrypt_free_bounce_page);
142 void llcrypt_generate_iv(union llcrypt_iv *iv, u64 lblk_num,
143 const struct llcrypt_info *ci)
145 memset(iv, 0, ci->ci_mode->ivsize);
146 iv->lblk_num = cpu_to_le64(lblk_num);
148 if (llcrypt_is_direct_key_policy(&ci->ci_policy))
149 memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
151 if (ci->ci_essiv_tfm != NULL)
152 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
155 /* Encrypt or decrypt a single filesystem block of file contents */
156 int llcrypt_crypt_block(const struct inode *inode, llcrypt_direction_t rw,
157 u64 lblk_num, struct page *src_page,
158 struct page *dest_page, unsigned int len,
159 unsigned int offs, gfp_t gfp_flags)
162 struct skcipher_request *req = NULL;
163 DECLARE_CRYPTO_WAIT(wait);
164 struct scatterlist dst, src;
165 struct llcrypt_info *ci = llcrypt_info(inode);
166 struct crypto_skcipher *tfm = ci->ci_ctfm;
170 if (dest_page != src_page)
171 memcpy(page_address(dest_page), page_address(src_page),
176 if (WARN_ON_ONCE(len <= 0))
178 if (WARN_ON_ONCE(len % LL_CRYPTO_BLOCK_SIZE != 0))
181 llcrypt_generate_iv(&iv, lblk_num, ci);
183 req = skcipher_request_alloc(tfm, gfp_flags);
187 skcipher_request_set_callback(
188 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
189 crypto_req_done, &wait);
191 sg_init_table(&dst, 1);
192 sg_set_page(&dst, dest_page, len, offs);
193 sg_init_table(&src, 1);
194 sg_set_page(&src, src_page, len, offs);
195 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
196 if (rw == FS_DECRYPT)
197 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
199 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
200 skcipher_request_free(req);
202 llcrypt_err(inode, "%scryption failed for block %llu: %d",
203 (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
210 * llcrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page
211 * @page: The locked pagecache page containing the block(s) to encrypt
212 * @len: Total size of the block(s) to encrypt. Must be a nonzero
213 * multiple of the filesystem's block size.
214 * @offs: Byte offset within @page of the first block to encrypt. Must be
215 * a multiple of the filesystem's block size.
216 * @gfp_flags: Memory allocation flags
218 * A new bounce page is allocated, and the specified block(s) are encrypted into
219 * it. In the bounce page, the ciphertext block(s) will be located at the same
220 * offsets at which the plaintext block(s) were located in the source page; any
221 * other parts of the bounce page will be left uninitialized. However, normally
222 * blocksize == PAGE_SIZE and the whole page is encrypted at once.
224 * This is for use by the filesystem's ->writepages() method.
226 * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
228 struct page *llcrypt_encrypt_pagecache_blocks(struct page *page,
234 const struct inode *inode = page->mapping->host;
235 const unsigned int blockbits = inode->i_blkbits;
236 const unsigned int blocksize = 1 << blockbits;
237 struct page *ciphertext_page;
238 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
243 if (WARN_ON_ONCE(!PageLocked(page)))
244 return ERR_PTR(-EINVAL);
246 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
247 return ERR_PTR(-EINVAL);
249 ciphertext_page = llcrypt_alloc_bounce_page(gfp_flags);
250 if (!ciphertext_page)
251 return ERR_PTR(-ENOMEM);
253 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
254 err = llcrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
255 page, ciphertext_page,
256 blocksize, i, gfp_flags);
258 llcrypt_free_bounce_page(ciphertext_page);
262 SetPagePrivate(ciphertext_page);
263 set_page_private(ciphertext_page, (unsigned long)page);
264 return ciphertext_page;
266 EXPORT_SYMBOL(llcrypt_encrypt_pagecache_blocks);
269 * llcrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
270 * @inode: The inode to which this block belongs
271 * @page: The page containing the block to encrypt
272 * @len: Size of block to encrypt. Doesn't need to be a multiple of the
273 * fs block size, but must be a multiple of LL_CRYPTO_BLOCK_SIZE.
274 * @offs: Byte offset within @page at which the block to encrypt begins
275 * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
276 * number of the block within the file
277 * @gfp_flags: Memory allocation flags
279 * Encrypt a possibly-compressed filesystem block that is located in an
280 * arbitrary page, not necessarily in the original pagecache page. The @inode
281 * and @lblk_num must be specified, as they can't be determined from @page.
283 * Return: 0 on success; -errno on failure
285 int llcrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
286 unsigned int len, unsigned int offs,
287 u64 lblk_num, gfp_t gfp_flags)
289 return llcrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
290 len, offs, gfp_flags);
292 EXPORT_SYMBOL(llcrypt_encrypt_block_inplace);
295 * llcrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page
296 * @page: The locked pagecache page containing the block(s) to decrypt
297 * @len: Total size of the block(s) to decrypt. Must be a nonzero
298 * multiple of the filesystem's block size.
299 * @offs: Byte offset within @page of the first block to decrypt. Must be
300 * a multiple of the filesystem's block size.
302 * The specified block(s) are decrypted in-place within the pagecache page,
303 * which must still be locked and not uptodate. Normally, blocksize ==
304 * PAGE_SIZE and the whole page is decrypted at once.
306 * This is for use by the filesystem's ->readpages() method.
308 * Return: 0 on success; -errno on failure
310 int llcrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
313 const struct inode *inode = page->mapping->host;
314 const unsigned int blockbits = inode->i_blkbits;
315 const unsigned int blocksize = 1 << blockbits;
316 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
321 if (WARN_ON_ONCE(!PageLocked(page)))
324 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
327 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
328 err = llcrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
329 page, blocksize, i, GFP_NOFS);
335 EXPORT_SYMBOL(llcrypt_decrypt_pagecache_blocks);
338 * llcrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
339 * @inode: The inode to which this block belongs
340 * @page: The page containing the block to decrypt
341 * @len: Size of block to decrypt. Doesn't need to be a multiple of the
342 * fs block size, but must be a multiple of LL_CRYPTO_BLOCK_SIZE.
343 * @offs: Byte offset within @page at which the block to decrypt begins
344 * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
345 * number of the block within the file
347 * Decrypt a possibly-compressed filesystem block that is located in an
348 * arbitrary page, not necessarily in the original pagecache page. The @inode
349 * and @lblk_num must be specified, as they can't be determined from @page.
351 * Return: 0 on success; -errno on failure
353 int llcrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
354 unsigned int len, unsigned int offs,
357 return llcrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
358 len, offs, GFP_NOFS);
360 EXPORT_SYMBOL(llcrypt_decrypt_block_inplace);
363 * Validate dentries in encrypted directories to make sure we aren't potentially
364 * caching stale dentries after a key has been added.
366 static int llcrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
373 * Plaintext names are always valid, since llcrypt doesn't support
374 * reverting to ciphertext names without evicting the directory's inode
375 * -- which implies eviction of the dentries in the directory.
377 if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
381 * Ciphertext name; valid if the directory's key is still unavailable.
383 * Although llcrypt forbids rename() on ciphertext names, we still must
384 * use dget_parent() here rather than use ->d_parent directly. That's
385 * because a corrupted fs image may contain directory hard links, which
386 * the VFS handles by moving the directory's dentry tree in the dcache
387 * each time ->lookup() finds the directory and it already has a dentry
388 * elsewhere. Thus ->d_parent can be changing, and we must safely grab
389 * a reference to some ->d_parent to prevent it from being freed.
392 if (flags & LOOKUP_RCU)
395 dir = dget_parent(dentry);
396 err = llcrypt_get_encryption_info(d_inode(dir));
397 valid = !llcrypt_has_encryption_key(d_inode(dir));
406 const struct dentry_operations llcrypt_d_ops = {
407 .d_revalidate = llcrypt_d_revalidate,
410 static void llcrypt_destroy(void)
412 struct llcrypt_ctx *pos, *n;
414 list_for_each_entry_safe(pos, n, &llcrypt_free_ctxs, free_list)
415 kmem_cache_free(llcrypt_ctx_cachep, pos);
416 INIT_LIST_HEAD(&llcrypt_free_ctxs);
417 mempool_destroy(llcrypt_bounce_page_pool);
418 llcrypt_bounce_page_pool = NULL;
422 * llcrypt_initialize() - allocate major buffers for fs encryption.
423 * @cop_flags: llcrypt operations flags
425 * We only call this when we start accessing encrypted files, since it
426 * results in memory getting allocated that wouldn't otherwise be used.
428 * Return: Zero on success, non-zero otherwise.
430 int llcrypt_initialize(unsigned int cop_flags)
432 int i, res = -ENOMEM;
434 /* No need to allocate a bounce page pool if this FS won't use it. */
435 if (cop_flags & LL_CFLG_OWN_PAGES)
438 mutex_lock(&llcrypt_init_mutex);
439 if (llcrypt_bounce_page_pool)
440 goto already_initialized;
442 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
443 struct llcrypt_ctx *ctx;
445 ctx = kmem_cache_zalloc(llcrypt_ctx_cachep, GFP_NOFS);
448 list_add(&ctx->free_list, &llcrypt_free_ctxs);
451 llcrypt_bounce_page_pool =
452 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
453 if (!llcrypt_bounce_page_pool)
457 mutex_unlock(&llcrypt_init_mutex);
461 mutex_unlock(&llcrypt_init_mutex);
465 void llcrypt_msg(const struct inode *inode, int mask,
466 const char *fmt, ...)
468 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
469 DEFAULT_RATELIMIT_BURST);
470 struct va_format vaf;
473 if (!__ratelimit(&rs))
480 CDEBUG(mask, "llcrypt (%s, inode %lu): %pV\n",
481 inode->i_sb->s_id, inode->i_ino, &vaf);
483 CDEBUG(mask, "llcrypt: %pV\n", &vaf);
488 * llcrypt_init() - Set up for fs encryption.
490 int __init llcrypt_init(void)
495 * Use an unbound workqueue to allow bios to be decrypted in parallel
496 * even when they happen to complete on the same CPU. This sacrifices
497 * locality, but it's worthwhile since decryption is CPU-intensive.
499 * Also use a high-priority workqueue to prioritize decryption work,
500 * which blocks reads from completing, over regular application tasks.
502 llcrypt_read_workqueue = alloc_workqueue("llcrypt_read_queue",
503 WQ_UNBOUND | WQ_HIGHPRI,
505 if (!llcrypt_read_workqueue)
508 llcrypt_ctx_cachep = KMEM_CACHE(llcrypt_ctx, SLAB_RECLAIM_ACCOUNT);
509 if (!llcrypt_ctx_cachep)
510 goto fail_free_queue;
512 llcrypt_info_cachep = KMEM_CACHE(llcrypt_info, SLAB_RECLAIM_ACCOUNT);
513 if (!llcrypt_info_cachep)
516 err = llcrypt_init_keyring();
523 kmem_cache_destroy(llcrypt_info_cachep);
525 kmem_cache_destroy(llcrypt_ctx_cachep);
527 destroy_workqueue(llcrypt_read_workqueue);
533 * llcrypt_exit() - Clean up for fs encryption.
535 void __exit llcrypt_exit(void)
537 llcrypt_exit_keyring();
541 * Make sure all delayed rcu free inodes are flushed before we
546 kmem_cache_destroy(llcrypt_info_cachep);
547 kmem_cache_destroy(llcrypt_ctx_cachep);
548 destroy_workqueue(llcrypt_read_workqueue);