1 // SPDX-License-Identifier: GPL-2.0-only
3 * This contains encryption functions for per-file encryption.
5 * Copyright (C) 2015, Google, Inc.
6 * Copyright (C) 2015, Motorola Mobility
8 * Written by Michael Halcrow, 2014.
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
14 * Add llcrypt_pullback_bio_page()
17 * This has not yet undergone a rigorous security audit.
19 * The usage of AES-XTS should conform to recommendations in NIST
20 * Special Publication 800-38E and IEEE P1619/D16.
23 * Linux commit 219d54332a09
27 #include <linux/pagemap.h>
28 #include <linux/mempool.h>
29 #include <linux/module.h>
30 #include <linux/scatterlist.h>
31 #include <linux/ratelimit.h>
32 #include <linux/dcache.h>
33 #include <linux/namei.h>
34 #include <crypto/aes.h>
35 #include <crypto/skcipher.h>
36 #include "llcrypt_private.h"
39 #include <crypto/internal/cipher.h>
41 MODULE_IMPORT_NS(CRYPTO_INTERNAL);
44 static unsigned int num_prealloc_crypto_pages = 32;
45 static unsigned int num_prealloc_crypto_ctxs = 128;
47 module_param(num_prealloc_crypto_pages, uint, 0444);
48 MODULE_PARM_DESC(num_prealloc_crypto_pages,
49 "Number of crypto pages to preallocate");
50 module_param(num_prealloc_crypto_ctxs, uint, 0444);
51 MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
52 "Number of crypto contexts to preallocate");
54 static char *client_encryption_engine = "aes-ni";
55 module_param(client_encryption_engine, charp, 0444);
56 MODULE_PARM_DESC(client_encryption_engine, "Client encryption engine");
58 enum llcrypt_crypto_engine_type llcrypt_crypto_engine = LLCRYPT_ENGINE_AES_NI;
60 static mempool_t *llcrypt_bounce_page_pool = NULL;
62 static LIST_HEAD(llcrypt_free_ctxs);
63 static DEFINE_SPINLOCK(llcrypt_ctx_lock);
65 static struct workqueue_struct *llcrypt_read_workqueue;
66 static DEFINE_MUTEX(llcrypt_init_mutex);
68 static struct kmem_cache *llcrypt_ctx_cachep;
69 struct kmem_cache *llcrypt_info_cachep;
71 void llcrypt_enqueue_decrypt_work(struct work_struct *work)
73 queue_work(llcrypt_read_workqueue, work);
75 EXPORT_SYMBOL(llcrypt_enqueue_decrypt_work);
78 * llcrypt_release_ctx() - Release a decryption context
79 * @ctx: The decryption context to release.
81 * If the decryption context was allocated from the pre-allocated pool, return
82 * it to that pool. Else, free it.
84 void llcrypt_release_ctx(struct llcrypt_ctx *ctx)
88 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
89 kmem_cache_free(llcrypt_ctx_cachep, ctx);
91 spin_lock_irqsave(&llcrypt_ctx_lock, flags);
92 list_add(&ctx->free_list, &llcrypt_free_ctxs);
93 spin_unlock_irqrestore(&llcrypt_ctx_lock, flags);
96 EXPORT_SYMBOL(llcrypt_release_ctx);
99 * llcrypt_get_ctx() - Get a decryption context
100 * @gfp_flags: The gfp flag for memory allocation
102 * Allocate and initialize a decryption context.
104 * Return: A new decryption context on success; an ERR_PTR() otherwise.
106 struct llcrypt_ctx *llcrypt_get_ctx(gfp_t gfp_flags)
108 struct llcrypt_ctx *ctx;
112 * First try getting a ctx from the free list so that we don't have to
113 * call into the slab allocator.
115 spin_lock_irqsave(&llcrypt_ctx_lock, flags);
116 ctx = list_first_entry_or_null(&llcrypt_free_ctxs,
117 struct llcrypt_ctx, free_list);
119 list_del(&ctx->free_list);
120 spin_unlock_irqrestore(&llcrypt_ctx_lock, flags);
122 ctx = kmem_cache_zalloc(llcrypt_ctx_cachep, gfp_flags);
124 return ERR_PTR(-ENOMEM);
125 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
127 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
131 EXPORT_SYMBOL(llcrypt_get_ctx);
133 struct page *llcrypt_alloc_bounce_page(gfp_t gfp_flags)
135 return mempool_alloc(llcrypt_bounce_page_pool, gfp_flags);
139 * llcrypt_free_bounce_page() - free a ciphertext bounce page
141 * Free a bounce page that was allocated by llcrypt_encrypt_pagecache_blocks(),
142 * or by llcrypt_alloc_bounce_page() directly.
144 void llcrypt_free_bounce_page(struct page *bounce_page)
148 set_page_private(bounce_page, (unsigned long)NULL);
149 ClearPagePrivate(bounce_page);
150 mempool_free(bounce_page, llcrypt_bounce_page_pool);
152 EXPORT_SYMBOL(llcrypt_free_bounce_page);
154 void llcrypt_generate_iv(union llcrypt_iv *iv, u64 lblk_num,
155 const struct llcrypt_info *ci)
157 memset(iv, 0, ci->ci_mode->ivsize);
158 iv->lblk_num = cpu_to_le64(lblk_num);
160 if (llcrypt_is_direct_key_policy(&ci->ci_policy))
161 memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
163 if (ci->ci_essiv_tfm != NULL)
164 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
167 /* Encrypt or decrypt a single filesystem block of file contents */
168 int llcrypt_crypt_block(const struct inode *inode, llcrypt_direction_t rw,
169 u64 lblk_num, struct page *src_page,
170 struct page *dest_page, unsigned int len,
171 unsigned int offs, gfp_t gfp_flags)
174 struct skcipher_request *req = NULL;
175 DECLARE_CRYPTO_WAIT(wait);
176 struct scatterlist dst, src;
177 struct llcrypt_info *ci = llcrypt_info(inode);
178 struct crypto_skcipher *tfm = ci->ci_ctfm;
182 if (dest_page != src_page)
183 memcpy(page_address(dest_page), page_address(src_page),
188 if (WARN_ON_ONCE(len <= 0))
190 if (WARN_ON_ONCE(len % LL_CRYPTO_BLOCK_SIZE != 0))
193 llcrypt_generate_iv(&iv, lblk_num, ci);
195 req = skcipher_request_alloc(tfm, gfp_flags);
199 skcipher_request_set_callback(
200 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
201 crypto_req_done, &wait);
203 sg_init_table(&dst, 1);
204 sg_set_page(&dst, dest_page, len, offs);
205 sg_init_table(&src, 1);
206 sg_set_page(&src, src_page, len, offs);
207 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
208 if (rw == FS_DECRYPT)
209 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
211 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
212 skcipher_request_free(req);
214 llcrypt_err(inode, "%scryption failed for block %llu: %d",
215 (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
222 * llcrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page
223 * @page: The locked pagecache page containing the block(s) to encrypt
224 * @len: Total size of the block(s) to encrypt. Must be a nonzero
225 * multiple of the filesystem's block size.
226 * @offs: Byte offset within @page of the first block to encrypt. Must be
227 * a multiple of the filesystem's block size.
228 * @gfp_flags: Memory allocation flags
230 * A new bounce page is allocated, and the specified block(s) are encrypted into
231 * it. In the bounce page, the ciphertext block(s) will be located at the same
232 * offsets at which the plaintext block(s) were located in the source page; any
233 * other parts of the bounce page will be left uninitialized. However, normally
234 * blocksize == PAGE_SIZE and the whole page is encrypted at once.
236 * This is for use by the filesystem's ->writepages() method.
238 * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
240 struct page *llcrypt_encrypt_pagecache_blocks(struct page *page,
246 const struct inode *inode = page->mapping->host;
247 const unsigned int blockbits = inode->i_blkbits;
248 const unsigned int blocksize = 1 << blockbits;
249 struct page *ciphertext_page;
250 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
255 if (WARN_ON_ONCE(!PageLocked(page)))
256 return ERR_PTR(-EINVAL);
258 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
259 return ERR_PTR(-EINVAL);
261 ciphertext_page = llcrypt_alloc_bounce_page(gfp_flags);
262 if (!ciphertext_page)
263 return ERR_PTR(-ENOMEM);
265 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
266 err = llcrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
267 page, ciphertext_page,
268 blocksize, i, gfp_flags);
270 llcrypt_free_bounce_page(ciphertext_page);
274 SetPagePrivate(ciphertext_page);
275 set_page_private(ciphertext_page, (unsigned long)page);
276 return ciphertext_page;
278 EXPORT_SYMBOL(llcrypt_encrypt_pagecache_blocks);
281 * llcrypt_encrypt_block() - Encrypt a filesystem block in a page
282 * @inode: The inode to which this block belongs
283 * @src: The page containing the block to encrypt
284 * @dst: The page which will contain the encrypted data
285 * @len: Size of block to encrypt. Doesn't need to be a multiple of the
286 * fs block size, but must be a multiple of LL_CRYPTO_BLOCK_SIZE.
287 * @offs: Byte offset within @page at which the block to encrypt begins
288 * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
289 * number of the block within the file
290 * @gfp_flags: Memory allocation flags
292 * Encrypt a possibly-compressed filesystem block that is located in an
293 * arbitrary page, not necessarily in the original pagecache page. The @inode
294 * and @lblk_num must be specified, as they can't be determined from @page.
295 * The decrypted data will be stored in @dst.
297 * Return: 0 on success; -errno on failure
299 int llcrypt_encrypt_block(const struct inode *inode, struct page *src,
300 struct page *dst, unsigned int len, unsigned int offs,
301 u64 lblk_num, gfp_t gfp_flags)
303 return llcrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, src, dst,
304 len, offs, gfp_flags);
306 EXPORT_SYMBOL(llcrypt_encrypt_block);
309 * llcrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page
310 * @page: The locked pagecache page containing the block(s) to decrypt
311 * @len: Total size of the block(s) to decrypt. Must be a nonzero
312 * multiple of the filesystem's block size.
313 * @offs: Byte offset within @page of the first block to decrypt. Must be
314 * a multiple of the filesystem's block size.
316 * The specified block(s) are decrypted in-place within the pagecache page,
317 * which must still be locked and not uptodate. Normally, blocksize ==
318 * PAGE_SIZE and the whole page is decrypted at once.
320 * This is for use by the filesystem's ->readpages() method.
322 * Return: 0 on success; -errno on failure
324 int llcrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
327 const struct inode *inode = page->mapping->host;
328 const unsigned int blockbits = inode->i_blkbits;
329 const unsigned int blocksize = 1 << blockbits;
330 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
335 if (WARN_ON_ONCE(!PageLocked(page)))
338 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
341 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
342 err = llcrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
343 page, blocksize, i, GFP_NOFS);
349 EXPORT_SYMBOL(llcrypt_decrypt_pagecache_blocks);
352 * llcrypt_decrypt_block() - Cache a decrypted filesystem block in a page
353 * @inode: The inode to which this block belongs
354 * @src: The page containing the block to decrypt
355 * @dst: The page which will contain the plain data
356 * @len: Size of block to decrypt. Doesn't need to be a multiple of the
357 * fs block size, but must be a multiple of LL_CRYPTO_BLOCK_SIZE.
358 * @offs: Byte offset within @page at which the block to decrypt begins
359 * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
360 * number of the block within the file
362 * Decrypt a possibly-compressed filesystem block that is located in an
363 * arbitrary page, not necessarily in the original pagecache page. The @inode
364 * and @lblk_num must be specified, as they can't be determined from @page.
365 * The encrypted data will be stored in @dst.
367 * Return: 0 on success; -errno on failure
369 int llcrypt_decrypt_block(const struct inode *inode, struct page *src,
370 struct page *dst, unsigned int len, unsigned int offs,
371 u64 lblk_num, gfp_t gfp_flags)
373 return llcrypt_crypt_block(inode, FS_DECRYPT, lblk_num, src, dst,
374 len, offs, gfp_flags);
376 EXPORT_SYMBOL(llcrypt_decrypt_block);
379 * Validate dentries in encrypted directories to make sure we aren't potentially
380 * caching stale dentries after a key has been added.
382 static int llcrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
389 * Plaintext names are always valid, since llcrypt doesn't support
390 * reverting to ciphertext names without evicting the directory's inode
391 * -- which implies eviction of the dentries in the directory.
393 if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
397 * Ciphertext name; valid if the directory's key is still unavailable.
399 * Although llcrypt forbids rename() on ciphertext names, we still must
400 * use dget_parent() here rather than use ->d_parent directly. That's
401 * because a corrupted fs image may contain directory hard links, which
402 * the VFS handles by moving the directory's dentry tree in the dcache
403 * each time ->lookup() finds the directory and it already has a dentry
404 * elsewhere. Thus ->d_parent can be changing, and we must safely grab
405 * a reference to some ->d_parent to prevent it from being freed.
408 if (flags & LOOKUP_RCU)
411 dir = dget_parent(dentry);
412 err = llcrypt_get_encryption_info(d_inode(dir));
413 valid = !llcrypt_has_encryption_key(d_inode(dir));
422 const struct dentry_operations llcrypt_d_ops = {
423 .d_revalidate = llcrypt_d_revalidate,
426 static void llcrypt_destroy(void)
428 struct llcrypt_ctx *pos, *n;
430 list_for_each_entry_safe(pos, n, &llcrypt_free_ctxs, free_list)
431 kmem_cache_free(llcrypt_ctx_cachep, pos);
432 INIT_LIST_HEAD(&llcrypt_free_ctxs);
433 mempool_destroy(llcrypt_bounce_page_pool);
434 llcrypt_bounce_page_pool = NULL;
438 * llcrypt_initialize() - allocate major buffers for fs encryption.
439 * @cop_flags: llcrypt operations flags
441 * We only call this when we start accessing encrypted files, since it
442 * results in memory getting allocated that wouldn't otherwise be used.
444 * Return: Zero on success, non-zero otherwise.
446 int llcrypt_initialize(unsigned int cop_flags)
448 int i, res = -ENOMEM;
450 /* No need to allocate a bounce page pool if this FS won't use it. */
451 if (cop_flags & LL_CFLG_OWN_PAGES)
454 mutex_lock(&llcrypt_init_mutex);
455 if (llcrypt_bounce_page_pool)
456 goto already_initialized;
458 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
459 struct llcrypt_ctx *ctx;
461 ctx = kmem_cache_zalloc(llcrypt_ctx_cachep, GFP_NOFS);
464 list_add(&ctx->free_list, &llcrypt_free_ctxs);
467 llcrypt_bounce_page_pool =
468 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
469 if (!llcrypt_bounce_page_pool)
473 mutex_unlock(&llcrypt_init_mutex);
477 mutex_unlock(&llcrypt_init_mutex);
481 void llcrypt_msg(const struct inode *inode, int mask,
482 const char *fmt, ...)
484 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
485 DEFAULT_RATELIMIT_BURST);
486 struct va_format vaf;
489 if (!__ratelimit(&rs))
496 CDEBUG(mask, "llcrypt (%s, inode %lu): %pV\n",
497 inode->i_sb->s_id, inode->i_ino, &vaf);
499 CDEBUG(mask, "llcrypt: %pV\n", &vaf);
503 static inline int set_llcrypt_crypto_engine_type(void)
505 if (strcmp(client_encryption_engine, "system-default") == 0)
506 llcrypt_crypto_engine = LLCRYPT_ENGINE_SYSTEM_DEFAULT;
507 else if (strcmp(client_encryption_engine, "aes-ni") == 0)
508 llcrypt_crypto_engine = LLCRYPT_ENGINE_AES_NI;
510 llcrypt_crypto_engine = LLCRYPT_ENGINE_INVALID;
512 if (llcrypt_crypto_engine == LLCRYPT_ENGINE_INVALID)
519 * llcrypt_init() - Set up for fs encryption.
521 int __init llcrypt_init(void)
526 * Use an unbound workqueue to allow bios to be decrypted in parallel
527 * even when they happen to complete on the same CPU. This sacrifices
528 * locality, but it's worthwhile since decryption is CPU-intensive.
530 * Also use a high-priority workqueue to prioritize decryption work,
531 * which blocks reads from completing, over regular application tasks.
533 llcrypt_read_workqueue = alloc_workqueue("llcrypt_read_queue",
534 WQ_UNBOUND | WQ_HIGHPRI,
536 if (!llcrypt_read_workqueue)
539 llcrypt_ctx_cachep = KMEM_CACHE(llcrypt_ctx, SLAB_RECLAIM_ACCOUNT);
540 if (!llcrypt_ctx_cachep)
541 goto fail_free_queue;
543 llcrypt_info_cachep = KMEM_CACHE(llcrypt_info, SLAB_RECLAIM_ACCOUNT);
544 if (!llcrypt_info_cachep)
547 err = set_llcrypt_crypto_engine_type();
549 CERROR("libcfs: bad crypto engine provided via 'client_encryption_engine': rc = %d\n",
554 err = llcrypt_init_keyring();
561 kmem_cache_destroy(llcrypt_info_cachep);
563 kmem_cache_destroy(llcrypt_ctx_cachep);
565 destroy_workqueue(llcrypt_read_workqueue);
571 * llcrypt_exit() - Clean up for fs encryption.
573 void __exit llcrypt_exit(void)
575 llcrypt_exit_keyring();
579 * Make sure all delayed rcu free inodes are flushed before we
584 kmem_cache_destroy(llcrypt_info_cachep);
585 kmem_cache_destroy(llcrypt_ctx_cachep);
586 destroy_workqueue(llcrypt_read_workqueue);