+ if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode) &&
+ llcrypt_has_encryption_key(inode)) {
+ struct page **pa = NULL;
+
+#ifdef CONFIG_LL_ENCRYPTION
+ OBD_ALLOC_PTR_ARRAY_LARGE(pa, page_count);
+ if (pa == NULL) {
+ ptlrpc_request_free(req);
+ RETURN(-ENOMEM);
+ }
+
+ rc = obd_pool_get_pages_array(pa, page_count);
+ if (rc) {
+ CDEBUG(D_SEC, "failed to allocate from enc pool: %d\n",
+ rc);
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+#endif
+
+ for (i = 0; i < page_count; i++) {
+ struct brw_page *brwpg = pga[i];
+ struct page *data_page = NULL;
+ bool retried = false;
+ bool lockedbymyself;
+ u32 nunits =
+ (brwpg->bp_off & ~PAGE_MASK) + brwpg->bp_count;
+ struct address_space *map_orig = NULL;
+ pgoff_t index_orig;
+
+retry_encrypt:
+ nunits = round_up(nunits, LUSTRE_ENCRYPTION_UNIT_SIZE);
+ /* The page can already be locked when we arrive here.
+ * This is possible when cl_page_assume/vvp_page_assume
+ * is stuck on wait_on_page_writeback with page lock
+ * held. In this case there is no risk for the lock to
+ * be released while we are doing our encryption
+ * processing, because writeback against that page will
+ * end in vvp_page_completion_write/cl_page_completion,
+ * which means only once the page is fully processed.
+ */
+ lockedbymyself = trylock_page(brwpg->bp_page);
+ if (directio) {
+ map_orig = brwpg->bp_page->mapping;
+ brwpg->bp_page->mapping = inode->i_mapping;
+ index_orig = brwpg->bp_page->index;
+ clpage = oap2cl_page(brw_page2oap(brwpg));
+ brwpg->bp_page->index = clpage->cp_page_index;
+ }
+ data_page =
+ osc_encrypt_pagecache_blocks(brwpg->bp_page,
+ pa ? pa[i] : NULL,
+ nunits, 0,
+ GFP_NOFS);
+ if (directio) {
+ brwpg->bp_page->mapping = map_orig;
+ brwpg->bp_page->index = index_orig;
+ }
+ if (lockedbymyself)
+ unlock_page(brwpg->bp_page);
+ if (IS_ERR(data_page)) {
+ rc = PTR_ERR(data_page);
+ if (rc == -ENOMEM && !retried) {
+ retried = true;
+ rc = 0;
+ goto retry_encrypt;
+ }
+ if (pa) {
+ obd_pool_put_pages_array(pa + i,
+ page_count - i);
+ OBD_FREE_PTR_ARRAY_LARGE(pa,
+ page_count);
+ }
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+ /* Set PageChecked flag on bounce page for
+ * disambiguation in osc_release_bounce_pages().
+ */
+ SetPageChecked(data_page);
+ brwpg->bp_page = data_page;
+ /* there should be no gap in the middle of page array */
+ if (i == page_count - 1) {
+ struct osc_async_page *oap =
+ brw_page2oap(brwpg);
+
+ oa->o_size = oap->oap_count +
+ oap->oap_obj_off + oap->oap_page_off;
+ }
+ /* len is forced to nunits, and relative offset to 0
+ * so store the old, clear text info
+ */
+ brwpg->bp_count_diff = nunits - brwpg->bp_count;
+ brwpg->bp_count = nunits;
+ brwpg->bp_off_diff = brwpg->bp_off & ~PAGE_MASK;
+ brwpg->bp_off = brwpg->bp_off & PAGE_MASK;
+ }
+
+ if (pa)
+ OBD_FREE_PTR_ARRAY_LARGE(pa, page_count);
+ } else if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
+ struct osc_async_page *oap = brw_page2oap(pga[0]);
+ struct cl_page *clpage = oap2cl_page(oap);
+ struct cl_object *clobj = clpage->cp_obj;
+ struct cl_attr attr = { 0 };
+ struct lu_env *env;
+ __u16 refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env)) {
+ rc = PTR_ERR(env);
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ cl_object_attr_lock(clobj);
+ rc = cl_object_attr_get(env, clobj, &attr);
+ cl_object_attr_unlock(clobj);
+ cl_env_put(env, &refcheck);
+ if (rc != 0) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+ if (attr.cat_size)
+ oa->o_size = attr.cat_size;
+ } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode) &&
+ llcrypt_has_encryption_key(inode)) {
+ for (i = 0; i < page_count; i++) {
+ struct brw_page *pg = pga[i];
+ u32 nunits = (pg->bp_off & ~PAGE_MASK) + pg->bp_count;
+
+ nunits = round_up(nunits, LUSTRE_ENCRYPTION_UNIT_SIZE);
+ /* count/off are forced to cover the whole encryption
+ * unit size so that all encrypted data is stored on the
+ * OST, so adjust bp_{count,off}_diff for the size of
+ * the clear text.
+ */
+ pg->bp_count_diff = nunits - pg->bp_count;
+ pg->bp_count = nunits;
+ pg->bp_off_diff = pg->bp_off & ~PAGE_MASK;
+ pg->bp_off = pg->bp_off & PAGE_MASK;
+ }
+ }
+