+/*
+ * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
+ */
+static
+int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
+ struct krb5_header *khdr,
+ char *confounder,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ int adj_nob)
+{
+ struct blkcipher_desc ciph_desc;
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ int blocksize, i, rc, nob = 0;
+
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+ LASSERT(desc->bd_iov_count);
+ LASSERT(GET_ENC_KIOV(desc));
+
+ blocksize = crypto_blkcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
+
+ ciph_desc.tfm = tfm;
+ ciph_desc.info = local_iv;
+ ciph_desc.flags = 0;
+
+ /* encrypt confounder */
+ buf_to_sg(&src, confounder, blocksize);
+ buf_to_sg(&dst, cipher->data, blocksize);
+
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ if (rc) {
+ CERROR("error to encrypt confounder: %d\n", rc);
+ return rc;
+ }
+
+ /* encrypt clear pages */
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ sg_init_table(&src, 1);
+ sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
+ (BD_GET_KIOV(desc, i).kiov_len +
+ blocksize - 1) &
+ (~(blocksize - 1)),
+ BD_GET_KIOV(desc, i).kiov_offset);
+ if (adj_nob)
+ nob += src.length;
+ sg_init_table(&dst, 1);
+ sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
+ src.length, src.offset);
+
+ BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
+ BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
+
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ src.length);
+ if (rc) {
+ CERROR("error to encrypt page: %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* encrypt krb5 header */
+ buf_to_sg(&src, khdr, sizeof(*khdr));
+ buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ sizeof(*khdr));
+ if (rc) {
+ CERROR("error to encrypt krb5 header: %d\n", rc);
+ return rc;
+ }
+
+ if (adj_nob)
+ desc->bd_nob = nob;
+
+ return 0;
+}
+
+/*
+ * desc->bd_nob_transferred is the size of cipher text received.
+ * desc->bd_nob is the target size of plain text supposed to be.
+ *
+ * if adj_nob != 0, we adjust each page's kiov_len to the actual
+ * plain text size.
+ * - for client read: we don't know data size for each page, so
+ * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
+ * be smaller, so we need to adjust it according to
+ * bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
+ * this means we DO NOT support the situation that server send an odd size
+ * data in a page which is not the last one.
+ * - for server write: we knows exactly data size for each page being expected,
+ * thus kiov_len is accurate already, so we should not adjust it at all.
+ * and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
+ * round_up(bd_iov[]->kiov_len) which
+ * should have been done by prep_bulk().
+ */
+static
+int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
+ struct krb5_header *khdr,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ rawobj_t *plain,
+ int adj_nob)
+{
+ struct blkcipher_desc ciph_desc;
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ int ct_nob = 0, pt_nob = 0;
+ int blocksize, i, rc;
+
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+ LASSERT(desc->bd_iov_count);
+ LASSERT(GET_ENC_KIOV(desc));
+ LASSERT(desc->bd_nob_transferred);
+
+ blocksize = crypto_blkcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
+
+ ciph_desc.tfm = tfm;
+ ciph_desc.info = local_iv;
+ ciph_desc.flags = 0;
+
+ if (desc->bd_nob_transferred % blocksize) {
+ CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
+ return -EPROTO;
+ }
+
+ /* decrypt head (confounder) */
+ buf_to_sg(&src, cipher->data, blocksize);
+ buf_to_sg(&dst, plain->data, blocksize);
+
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ if (rc) {
+ CERROR("error to decrypt confounder: %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
+ i++) {
+ if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
+ != 0 ||
+ BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
+ != 0) {
+ CERROR("page %d: odd offset %u len %u, blocksize %d\n",
+ i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
+ BD_GET_ENC_KIOV(desc, i).kiov_len,
+ blocksize);
+ return -EFAULT;
+ }
+
+ if (adj_nob) {
+ if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+ desc->bd_nob_transferred)
+ BD_GET_ENC_KIOV(desc, i).kiov_len =
+ desc->bd_nob_transferred - ct_nob;
+
+ BD_GET_KIOV(desc, i).kiov_len =
+ BD_GET_ENC_KIOV(desc, i).kiov_len;
+ if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+ desc->bd_nob)
+ BD_GET_KIOV(desc, i).kiov_len =
+ desc->bd_nob - pt_nob;
+ } else {
+ /* this should be guaranteed by LNET */
+ LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
+ kiov_len <=
+ desc->bd_nob_transferred);
+ LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
+ BD_GET_ENC_KIOV(desc, i).kiov_len);
+ }
+
+ if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
+ continue;
+
+ sg_init_table(&src, 1);
+ sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
+ BD_GET_ENC_KIOV(desc, i).kiov_len,
+ BD_GET_ENC_KIOV(desc, i).kiov_offset);
+ dst = src;
+ if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
+ sg_assign_page(&dst,
+ BD_GET_KIOV(desc, i).kiov_page);
+
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ src.length);
+ if (rc) {
+ CERROR("error to decrypt page: %d\n", rc);
+ return rc;
+ }
+
+ if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
+ memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
+ BD_GET_KIOV(desc, i).kiov_offset,
+ page_address(BD_GET_ENC_KIOV(desc, i).
+ kiov_page) +
+ BD_GET_KIOV(desc, i).kiov_offset,
+ BD_GET_KIOV(desc, i).kiov_len);
+ }
+
+ ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
+ pt_nob += BD_GET_KIOV(desc, i).kiov_len;
+ }
+
+ if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+ CERROR("%d cipher text transferred but only %d decrypted\n",
+ desc->bd_nob_transferred, ct_nob);
+ return -EFAULT;
+ }
+
+ if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+ CERROR("%d plain text expected but only %d received\n",
+ desc->bd_nob, pt_nob);
+ return -EFAULT;
+ }
+
+ /* if needed, clear up the rest unused iovs */
+ if (adj_nob)
+ while (i < desc->bd_iov_count)
+ BD_GET_KIOV(desc, i++).kiov_len = 0;
+
+ /* decrypt tail (krb5 header) */
+ buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
+ buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ sizeof(*khdr));
+ if (rc) {
+ CERROR("error to decrypt tail: %d\n", rc);
+ return rc;
+ }
+
+ if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
+ CERROR("krb5 header doesn't match\n");
+ return -EACCES;
+ }
+
+ return 0;
+}
+