+ if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
+ for (i = 0; i < page_count; i++) {
+ struct brw_page *pg = pga[i];
+ struct page *data_page = NULL;
+ bool retried = false;
+ bool lockedbymyself;
+ u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
+ struct address_space *map_orig = NULL;
+ pgoff_t index_orig;
+
+retry_encrypt:
+ if (nunits & ~LUSTRE_ENCRYPTION_MASK)
+ nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
+ LUSTRE_ENCRYPTION_UNIT_SIZE;
+ /* The page can already be locked when we arrive here.
+ * This is possible when cl_page_assume/vvp_page_assume
+ * is stuck on wait_on_page_writeback with page lock
+ * held. In this case there is no risk for the lock to
+ * be released while we are doing our encryption
+ * processing, because writeback against that page will
+ * end in vvp_page_completion_write/cl_page_completion,
+ * which means only once the page is fully processed.
+ */
+ lockedbymyself = trylock_page(pg->pg);
+ if (directio) {
+ map_orig = pg->pg->mapping;
+ pg->pg->mapping = inode->i_mapping;
+ index_orig = pg->pg->index;
+ pg->pg->index = pg->off >> PAGE_SHIFT;
+ }
+ data_page =
+ llcrypt_encrypt_pagecache_blocks(pg->pg,
+ nunits, 0,
+ GFP_NOFS);
+ if (directio) {
+ pg->pg->mapping = map_orig;
+ pg->pg->index = index_orig;
+ }
+ if (lockedbymyself)
+ unlock_page(pg->pg);
+ if (IS_ERR(data_page)) {
+ rc = PTR_ERR(data_page);
+ if (rc == -ENOMEM && !retried) {
+ retried = true;
+ rc = 0;
+ goto retry_encrypt;
+ }
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+ /* Set PageChecked flag on bounce page for
+ * disambiguation in osc_release_bounce_pages().
+ */
+ SetPageChecked(data_page);
+ pg->pg = data_page;
+ /* there should be no gap in the middle of page array */
+ if (i == page_count - 1) {
+ struct osc_async_page *oap = brw_page2oap(pg);
+
+ oa->o_size = oap->oap_count +
+ oap->oap_obj_off + oap->oap_page_off;
+ }
+ /* len is forced to nunits, and relative offset to 0
+ * so store the old, clear text info
+ */
+ pg->bp_count_diff = nunits - pg->count;
+ pg->count = nunits;
+ pg->bp_off_diff = pg->off & ~PAGE_MASK;
+ pg->off = pg->off & PAGE_MASK;
+ }
+ } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
+ for (i = 0; i < page_count; i++) {
+ struct brw_page *pg = pga[i];
+ u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
+
+ if (nunits & ~LUSTRE_ENCRYPTION_MASK)
+ nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
+ LUSTRE_ENCRYPTION_UNIT_SIZE;
+ /* count/off are forced to cover the whole encryption
+ * unit size so that all encrypted data is stored on the
+ * OST, so adjust bp_{count,off}_diff for the size of
+ * the clear text.
+ */
+ pg->bp_count_diff = nunits - pg->count;
+ pg->count = nunits;
+ pg->bp_off_diff = pg->off & ~PAGE_MASK;
+ pg->off = pg->off & PAGE_MASK;
+ }
+ }
+