+static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
+ struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
+ int adj_nob)
+{
+ struct blkcipher_desc cdesc = {
+ .tfm = tfm,
+ .info = iv,
+ .flags = 0,
+ };
+ struct scatterlist ptxt;
+ struct scatterlist ctxt;
+ int blocksize;
+ int i;
+ int rc;
+ int nob = 0;
+
+ blocksize = crypto_blkcipher_blocksize(tfm);
+
+ sg_init_table(&ptxt, 1);
+ sg_init_table(&ctxt, 1);
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ sg_set_page(&ptxt, BD_GET_KIOV(desc, i).kiov_page,
+ sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
+ blocksize),
+ BD_GET_KIOV(desc, i).kiov_offset);
+ nob += ptxt.length;
+
+ sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
+ ptxt.length, ptxt.offset);
+
+ BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
+ BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
+
+ rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
+ ptxt.length);
+ if (rc) {
+ CERROR("failed to encrypt page: %d\n", rc);
+ return rc;
+ }
+ }
+
+ if (adj_nob)
+ desc->bd_nob = nob;
+
+ return 0;
+}
+
+static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
+ struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
+ int adj_nob)
+{
+ struct blkcipher_desc cdesc = {
+ .tfm = tfm,
+ .info = iv,
+ .flags = 0,
+ };
+ struct scatterlist ptxt;
+ struct scatterlist ctxt;
+ int blocksize;
+ int i;
+ int rc;
+ int pnob = 0;
+ int cnob = 0;
+
+ sg_init_table(&ptxt, 1);
+ sg_init_table(&ctxt, 1);
+
+ blocksize = crypto_blkcipher_blocksize(tfm);
+ if (desc->bd_nob_transferred % blocksize != 0) {
+ CERROR("Transfer not a multiple of block size: %d\n",
+ desc->bd_nob_transferred);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
+ i++) {
+ lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
+ lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
+
+ if (ciov->kiov_offset % blocksize != 0 ||
+ ciov->kiov_len % blocksize != 0) {
+ CERROR("Invalid bulk descriptor vector\n");
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ /* Must adjust bytes here because we know the actual sizes after
+ * decryption. Similar to what gss_cli_ctx_unwrap_bulk does for
+ * integrity only mode */
+ if (adj_nob) {
+ /* cipher text must not exceed transferred size */
+ if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
+ ciov->kiov_len =
+ desc->bd_nob_transferred - cnob;
+
+ piov->kiov_len = ciov->kiov_len;
+
+ /* plain text must not exceed bulk's size */
+ if (ciov->kiov_len + pnob > desc->bd_nob)
+ piov->kiov_len = desc->bd_nob - pnob;
+ } else {
+ /* Taken from krb5_decrypt since it was not verified
+ * whether or not LNET guarantees these */
+ if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
+ piov->kiov_len > ciov->kiov_len) {
+ CERROR("Invalid decrypted length\n");
+ return GSS_S_FAILURE;
+ }
+ }
+
+ if (ciov->kiov_len == 0)
+ continue;
+
+ sg_init_table(&ctxt, 1);
+ sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
+ ciov->kiov_offset);
+ ptxt = ctxt;
+
+ /* In the event the plain text size is not a multiple
+ * of blocksize we decrypt in place and copy the result
+ * after the decryption */
+ if (piov->kiov_len % blocksize == 0)
+ sg_assign_page(&ptxt, piov->kiov_page);
+
+ rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt,
+ ctxt.length);
+ if (rc) {
+ CERROR("Decryption failed for page: %d\n", rc);
+ return GSS_S_FAILURE;
+ }
+
+ if (piov->kiov_len % blocksize != 0) {
+ memcpy(page_address(piov->kiov_page) +
+ piov->kiov_offset,
+ page_address(ciov->kiov_page) +
+ ciov->kiov_offset,
+ piov->kiov_len);
+ }
+
+ cnob += ciov->kiov_len;
+ pnob += piov->kiov_len;
+ }
+
+ /* if needed, clear up the rest unused iovs */
+ if (adj_nob)
+ while (i < desc->bd_iov_count)
+ BD_GET_KIOV(desc, i++).kiov_len = 0;
+
+ if (unlikely(cnob != desc->bd_nob_transferred)) {
+ CERROR("%d cipher text transferred but only %d decrypted\n",
+ desc->bd_nob_transferred, cnob);
+ return GSS_S_FAILURE;
+ }
+
+ if (unlikely(!adj_nob && pnob != desc->bd_nob)) {
+ CERROR("%d plain text expected but only %d received\n",
+ desc->bd_nob, pnob);
+ return GSS_S_FAILURE;
+ }
+
+ return 0;
+}
+