1 // SPDX-License-Identifier: GPL-2.0-only
3 * This contains encryption functions for per-file encryption.
5 * Copyright (C) 2015, Google, Inc.
6 * Copyright (C) 2015, Motorola Mobility
8 * Written by Michael Halcrow, 2014.
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
14 * Add llcrypt_pullback_bio_page()
17 * This has not yet undergone a rigorous security audit.
19 * The usage of AES-XTS should conform to recommendations in NIST
20 * Special Publication 800-38E and IEEE P1619/D16.
23 * Linux commit 219d54332a09
27 #include <linux/pagemap.h>
28 #include <linux/mempool.h>
29 #include <linux/module.h>
30 #include <linux/scatterlist.h>
31 #include <linux/ratelimit.h>
32 #include <linux/dcache.h>
33 #include <linux/namei.h>
34 #include <crypto/aes.h>
35 #include <crypto/skcipher.h>
36 #include "llcrypt_private.h"
39 #include <crypto/internal/cipher.h>
41 MODULE_IMPORT_NS(CRYPTO_INTERNAL);
44 static unsigned int num_prealloc_crypto_pages = 32;
45 static unsigned int num_prealloc_crypto_ctxs = 128;
47 module_param(num_prealloc_crypto_pages, uint, 0444);
48 MODULE_PARM_DESC(num_prealloc_crypto_pages,
49 "Number of crypto pages to preallocate");
50 module_param(num_prealloc_crypto_ctxs, uint, 0444);
51 MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
52 "Number of crypto contexts to preallocate");
54 static mempool_t *llcrypt_bounce_page_pool = NULL;
56 static LIST_HEAD(llcrypt_free_ctxs);
57 static DEFINE_SPINLOCK(llcrypt_ctx_lock);
59 static struct workqueue_struct *llcrypt_read_workqueue;
60 static DEFINE_MUTEX(llcrypt_init_mutex);
62 static struct kmem_cache *llcrypt_ctx_cachep;
63 struct kmem_cache *llcrypt_info_cachep;
65 void llcrypt_enqueue_decrypt_work(struct work_struct *work)
67 queue_work(llcrypt_read_workqueue, work);
69 EXPORT_SYMBOL(llcrypt_enqueue_decrypt_work);
72 * llcrypt_release_ctx() - Release a decryption context
73 * @ctx: The decryption context to release.
75 * If the decryption context was allocated from the pre-allocated pool, return
76 * it to that pool. Else, free it.
78 void llcrypt_release_ctx(struct llcrypt_ctx *ctx)
82 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
83 kmem_cache_free(llcrypt_ctx_cachep, ctx);
85 spin_lock_irqsave(&llcrypt_ctx_lock, flags);
86 list_add(&ctx->free_list, &llcrypt_free_ctxs);
87 spin_unlock_irqrestore(&llcrypt_ctx_lock, flags);
90 EXPORT_SYMBOL(llcrypt_release_ctx);
93 * llcrypt_get_ctx() - Get a decryption context
94 * @gfp_flags: The gfp flag for memory allocation
96 * Allocate and initialize a decryption context.
98 * Return: A new decryption context on success; an ERR_PTR() otherwise.
100 struct llcrypt_ctx *llcrypt_get_ctx(gfp_t gfp_flags)
102 struct llcrypt_ctx *ctx;
106 * First try getting a ctx from the free list so that we don't have to
107 * call into the slab allocator.
109 spin_lock_irqsave(&llcrypt_ctx_lock, flags);
110 ctx = list_first_entry_or_null(&llcrypt_free_ctxs,
111 struct llcrypt_ctx, free_list);
113 list_del(&ctx->free_list);
114 spin_unlock_irqrestore(&llcrypt_ctx_lock, flags);
116 ctx = kmem_cache_zalloc(llcrypt_ctx_cachep, gfp_flags);
118 return ERR_PTR(-ENOMEM);
119 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
121 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
125 EXPORT_SYMBOL(llcrypt_get_ctx);
127 struct page *llcrypt_alloc_bounce_page(gfp_t gfp_flags)
129 return mempool_alloc(llcrypt_bounce_page_pool, gfp_flags);
133 * llcrypt_free_bounce_page() - free a ciphertext bounce page
135 * Free a bounce page that was allocated by llcrypt_encrypt_pagecache_blocks(),
136 * or by llcrypt_alloc_bounce_page() directly.
138 void llcrypt_free_bounce_page(struct page *bounce_page)
142 set_page_private(bounce_page, (unsigned long)NULL);
143 ClearPagePrivate(bounce_page);
144 mempool_free(bounce_page, llcrypt_bounce_page_pool);
146 EXPORT_SYMBOL(llcrypt_free_bounce_page);
148 void llcrypt_generate_iv(union llcrypt_iv *iv, u64 lblk_num,
149 const struct llcrypt_info *ci)
151 memset(iv, 0, ci->ci_mode->ivsize);
152 iv->lblk_num = cpu_to_le64(lblk_num);
154 if (llcrypt_is_direct_key_policy(&ci->ci_policy))
155 memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
157 if (ci->ci_essiv_tfm != NULL)
158 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
161 /* Encrypt or decrypt a single filesystem block of file contents */
162 int llcrypt_crypt_block(const struct inode *inode, llcrypt_direction_t rw,
163 u64 lblk_num, struct page *src_page,
164 struct page *dest_page, unsigned int len,
165 unsigned int offs, gfp_t gfp_flags)
168 struct skcipher_request *req = NULL;
169 DECLARE_CRYPTO_WAIT(wait);
170 struct scatterlist dst, src;
171 struct llcrypt_info *ci = llcrypt_info(inode);
172 struct crypto_skcipher *tfm = ci->ci_ctfm;
176 if (dest_page != src_page)
177 memcpy(page_address(dest_page), page_address(src_page),
182 if (WARN_ON_ONCE(len <= 0))
184 if (WARN_ON_ONCE(len % LL_CRYPTO_BLOCK_SIZE != 0))
187 llcrypt_generate_iv(&iv, lblk_num, ci);
189 req = skcipher_request_alloc(tfm, gfp_flags);
193 skcipher_request_set_callback(
194 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
195 crypto_req_done, &wait);
197 sg_init_table(&dst, 1);
198 sg_set_page(&dst, dest_page, len, offs);
199 sg_init_table(&src, 1);
200 sg_set_page(&src, src_page, len, offs);
201 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
202 if (rw == FS_DECRYPT)
203 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
205 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
206 skcipher_request_free(req);
208 llcrypt_err(inode, "%scryption failed for block %llu: %d",
209 (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
216 * llcrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page
217 * @page: The locked pagecache page containing the block(s) to encrypt
218 * @len: Total size of the block(s) to encrypt. Must be a nonzero
219 * multiple of the filesystem's block size.
220 * @offs: Byte offset within @page of the first block to encrypt. Must be
221 * a multiple of the filesystem's block size.
222 * @gfp_flags: Memory allocation flags
224 * A new bounce page is allocated, and the specified block(s) are encrypted into
225 * it. In the bounce page, the ciphertext block(s) will be located at the same
226 * offsets at which the plaintext block(s) were located in the source page; any
227 * other parts of the bounce page will be left uninitialized. However, normally
228 * blocksize == PAGE_SIZE and the whole page is encrypted at once.
230 * This is for use by the filesystem's ->writepages() method.
232 * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
234 struct page *llcrypt_encrypt_pagecache_blocks(struct page *page,
240 const struct inode *inode = page->mapping->host;
241 const unsigned int blockbits = inode->i_blkbits;
242 const unsigned int blocksize = 1 << blockbits;
243 struct page *ciphertext_page;
244 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
249 if (WARN_ON_ONCE(!PageLocked(page)))
250 return ERR_PTR(-EINVAL);
252 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
253 return ERR_PTR(-EINVAL);
255 ciphertext_page = llcrypt_alloc_bounce_page(gfp_flags);
256 if (!ciphertext_page)
257 return ERR_PTR(-ENOMEM);
259 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
260 err = llcrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
261 page, ciphertext_page,
262 blocksize, i, gfp_flags);
264 llcrypt_free_bounce_page(ciphertext_page);
268 SetPagePrivate(ciphertext_page);
269 set_page_private(ciphertext_page, (unsigned long)page);
270 return ciphertext_page;
272 EXPORT_SYMBOL(llcrypt_encrypt_pagecache_blocks);
275 * llcrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
276 * @inode: The inode to which this block belongs
277 * @page: The page containing the block to encrypt
278 * @len: Size of block to encrypt. Doesn't need to be a multiple of the
279 * fs block size, but must be a multiple of LL_CRYPTO_BLOCK_SIZE.
280 * @offs: Byte offset within @page at which the block to encrypt begins
281 * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
282 * number of the block within the file
283 * @gfp_flags: Memory allocation flags
285 * Encrypt a possibly-compressed filesystem block that is located in an
286 * arbitrary page, not necessarily in the original pagecache page. The @inode
287 * and @lblk_num must be specified, as they can't be determined from @page.
289 * Return: 0 on success; -errno on failure
291 int llcrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
292 unsigned int len, unsigned int offs,
293 u64 lblk_num, gfp_t gfp_flags)
295 return llcrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
296 len, offs, gfp_flags);
298 EXPORT_SYMBOL(llcrypt_encrypt_block_inplace);
301 * llcrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page
302 * @page: The locked pagecache page containing the block(s) to decrypt
303 * @len: Total size of the block(s) to decrypt. Must be a nonzero
304 * multiple of the filesystem's block size.
305 * @offs: Byte offset within @page of the first block to decrypt. Must be
306 * a multiple of the filesystem's block size.
308 * The specified block(s) are decrypted in-place within the pagecache page,
309 * which must still be locked and not uptodate. Normally, blocksize ==
310 * PAGE_SIZE and the whole page is decrypted at once.
312 * This is for use by the filesystem's ->readpages() method.
314 * Return: 0 on success; -errno on failure
316 int llcrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
319 const struct inode *inode = page->mapping->host;
320 const unsigned int blockbits = inode->i_blkbits;
321 const unsigned int blocksize = 1 << blockbits;
322 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
327 if (WARN_ON_ONCE(!PageLocked(page)))
330 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
333 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
334 err = llcrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
335 page, blocksize, i, GFP_NOFS);
341 EXPORT_SYMBOL(llcrypt_decrypt_pagecache_blocks);
344 * llcrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
345 * @inode: The inode to which this block belongs
346 * @page: The page containing the block to decrypt
347 * @len: Size of block to decrypt. Doesn't need to be a multiple of the
348 * fs block size, but must be a multiple of LL_CRYPTO_BLOCK_SIZE.
349 * @offs: Byte offset within @page at which the block to decrypt begins
350 * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
351 * number of the block within the file
353 * Decrypt a possibly-compressed filesystem block that is located in an
354 * arbitrary page, not necessarily in the original pagecache page. The @inode
355 * and @lblk_num must be specified, as they can't be determined from @page.
357 * Return: 0 on success; -errno on failure
359 int llcrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
360 unsigned int len, unsigned int offs,
363 return llcrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
364 len, offs, GFP_NOFS);
366 EXPORT_SYMBOL(llcrypt_decrypt_block_inplace);
369 * Validate dentries in encrypted directories to make sure we aren't potentially
370 * caching stale dentries after a key has been added.
372 static int llcrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
379 * Plaintext names are always valid, since llcrypt doesn't support
380 * reverting to ciphertext names without evicting the directory's inode
381 * -- which implies eviction of the dentries in the directory.
383 if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
387 * Ciphertext name; valid if the directory's key is still unavailable.
389 * Although llcrypt forbids rename() on ciphertext names, we still must
390 * use dget_parent() here rather than use ->d_parent directly. That's
391 * because a corrupted fs image may contain directory hard links, which
392 * the VFS handles by moving the directory's dentry tree in the dcache
393 * each time ->lookup() finds the directory and it already has a dentry
394 * elsewhere. Thus ->d_parent can be changing, and we must safely grab
395 * a reference to some ->d_parent to prevent it from being freed.
398 if (flags & LOOKUP_RCU)
401 dir = dget_parent(dentry);
402 err = llcrypt_get_encryption_info(d_inode(dir));
403 valid = !llcrypt_has_encryption_key(d_inode(dir));
412 const struct dentry_operations llcrypt_d_ops = {
413 .d_revalidate = llcrypt_d_revalidate,
416 static void llcrypt_destroy(void)
418 struct llcrypt_ctx *pos, *n;
420 list_for_each_entry_safe(pos, n, &llcrypt_free_ctxs, free_list)
421 kmem_cache_free(llcrypt_ctx_cachep, pos);
422 INIT_LIST_HEAD(&llcrypt_free_ctxs);
423 mempool_destroy(llcrypt_bounce_page_pool);
424 llcrypt_bounce_page_pool = NULL;
428 * llcrypt_initialize() - allocate major buffers for fs encryption.
429 * @cop_flags: llcrypt operations flags
431 * We only call this when we start accessing encrypted files, since it
432 * results in memory getting allocated that wouldn't otherwise be used.
434 * Return: Zero on success, non-zero otherwise.
436 int llcrypt_initialize(unsigned int cop_flags)
438 int i, res = -ENOMEM;
440 /* No need to allocate a bounce page pool if this FS won't use it. */
441 if (cop_flags & LL_CFLG_OWN_PAGES)
444 mutex_lock(&llcrypt_init_mutex);
445 if (llcrypt_bounce_page_pool)
446 goto already_initialized;
448 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
449 struct llcrypt_ctx *ctx;
451 ctx = kmem_cache_zalloc(llcrypt_ctx_cachep, GFP_NOFS);
454 list_add(&ctx->free_list, &llcrypt_free_ctxs);
457 llcrypt_bounce_page_pool =
458 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
459 if (!llcrypt_bounce_page_pool)
463 mutex_unlock(&llcrypt_init_mutex);
467 mutex_unlock(&llcrypt_init_mutex);
471 void llcrypt_msg(const struct inode *inode, int mask,
472 const char *fmt, ...)
474 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
475 DEFAULT_RATELIMIT_BURST);
476 struct va_format vaf;
479 if (!__ratelimit(&rs))
486 CDEBUG(mask, "llcrypt (%s, inode %lu): %pV\n",
487 inode->i_sb->s_id, inode->i_ino, &vaf);
489 CDEBUG(mask, "llcrypt: %pV\n", &vaf);
494 * llcrypt_init() - Set up for fs encryption.
496 int __init llcrypt_init(void)
501 * Use an unbound workqueue to allow bios to be decrypted in parallel
502 * even when they happen to complete on the same CPU. This sacrifices
503 * locality, but it's worthwhile since decryption is CPU-intensive.
505 * Also use a high-priority workqueue to prioritize decryption work,
506 * which blocks reads from completing, over regular application tasks.
508 llcrypt_read_workqueue = alloc_workqueue("llcrypt_read_queue",
509 WQ_UNBOUND | WQ_HIGHPRI,
511 if (!llcrypt_read_workqueue)
514 llcrypt_ctx_cachep = KMEM_CACHE(llcrypt_ctx, SLAB_RECLAIM_ACCOUNT);
515 if (!llcrypt_ctx_cachep)
516 goto fail_free_queue;
518 llcrypt_info_cachep = KMEM_CACHE(llcrypt_info, SLAB_RECLAIM_ACCOUNT);
519 if (!llcrypt_info_cachep)
522 err = llcrypt_init_keyring();
529 kmem_cache_destroy(llcrypt_info_cachep);
531 kmem_cache_destroy(llcrypt_ctx_cachep);
533 destroy_workqueue(llcrypt_read_workqueue);
539 * llcrypt_exit() - Clean up for fs encryption.
541 void __exit llcrypt_exit(void)
543 llcrypt_exit_keyring();
547 * Make sure all delayed rcu free inodes are flushed before we
552 kmem_cache_destroy(llcrypt_info_cachep);
553 kmem_cache_destroy(llcrypt_ctx_cachep);
554 destroy_workqueue(llcrypt_read_workqueue);