+ if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
+ for (i = 0; i < page_count; i++) {
+ struct brw_page *pg = pga[i];
+ struct page *data_page = NULL;
+ bool retried = false;
+ bool lockedbymyself;
+
+retry_encrypt:
+ /* The page can already be locked when we arrive here.
+ * This is possible when cl_page_assume/vvp_page_assume
+ * is stuck on wait_on_page_writeback with page lock
+ * held. In this case there is no risk for the lock to
+ * be released while we are doing our encryption
+ * processing, because writeback against that page will
+ * end in vvp_page_completion_write/cl_page_completion,
+ * which means only once the page is fully processed.
+ */
+ lockedbymyself = trylock_page(pg->pg);
+ data_page =
+ llcrypt_encrypt_pagecache_blocks(pg->pg,
+ PAGE_SIZE, 0,
+ GFP_NOFS);
+ if (lockedbymyself)
+ unlock_page(pg->pg);
+ if (IS_ERR(data_page)) {
+ rc = PTR_ERR(data_page);
+ if (rc == -ENOMEM && !retried) {
+ retried = true;
+ rc = 0;
+ goto retry_encrypt;
+ }
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+ /* len is forced to PAGE_SIZE, and poff to 0
+ * so store the old, clear text info
+ */
+ pg->pg = data_page;
+ pg->bp_count_diff = PAGE_SIZE - pg->count;
+ pg->count = PAGE_SIZE;
+ pg->bp_off_diff = pg->off & ~PAGE_MASK;
+ pg->off = pg->off & PAGE_MASK;
+ }
+ }
+