OBD_FREE_PTR(kctx);
}
-static
-void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
+/*
+ * Should be used for buffers allocated with k/vmalloc().
+ *
+ * Dispose of @sgt with teardown_sgtable().
+ *
+ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
+ * in cases where a single sg is sufficient. No attempt to reduce the
+ * number of sgs by squeezing physically contiguous pages together is
+ * made though, for simplicity.
+ *
+ * This function is copied from the ceph filesystem code.
+ */
+static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
+ const void *buf, unsigned int buf_len)
+{
+ struct scatterlist *sg;
+ const bool is_vmalloc = is_vmalloc_addr(buf);
+ unsigned int off = offset_in_page(buf);
+ unsigned int chunk_cnt = 1;
+ unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
+ int i;
+ int ret;
+
+ if (buf_len == 0) {
+ memset(sgt, 0, sizeof(*sgt));
+ return -EINVAL;
+ }
+
+ if (is_vmalloc) {
+ chunk_cnt = chunk_len >> PAGE_SHIFT;
+ chunk_len = PAGE_SIZE;
+ }
+
+ if (chunk_cnt > 1) {
+ ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
+ if (ret)
+ return ret;
+ } else {
+ WARN_ON(chunk_cnt != 1);
+ sg_init_table(prealloc_sg, 1);
+ sgt->sgl = prealloc_sg;
+ sgt->nents = sgt->orig_nents = 1;
+ }
+
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
+ struct page *page;
+ unsigned int len = min(chunk_len - off, buf_len);
+
+ if (is_vmalloc)
+ page = vmalloc_to_page(buf);
+ else
+ page = virt_to_page(buf);
+
+ sg_set_page(sg, page, len, off);
+
+ off = 0;
+ buf += len;
+ buf_len -= len;
+ }
+
+ WARN_ON(buf_len != 0);
+
+ return 0;
+}
+
+static void teardown_sgtable(struct sg_table *sgt)
{
- sg_init_table(sg, 1);
- sg_set_buf(sg, ptr, len);
+ if (sgt->orig_nents > 1)
+ sg_free_table(sgt);
}
static
void * out,
int length)
{
+ struct sg_table sg_out;
struct blkcipher_desc desc;
struct scatterlist sg;
__u8 local_iv[16] = {0};
if (crypto_blkcipher_ivsize(tfm) > 16) {
CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
- goto out;
- }
+ goto out;
+ }
- if (iv)
+ if (iv)
memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
- memcpy(out, in, length);
- buf_to_sg(&sg, out, length);
+ memcpy(out, in, length);
+
+ ret = setup_sgtable(&sg_out, &sg, out, length);
+ if (ret != 0)
+ goto out;
if (decrypt)
- ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
+ ret = crypto_blkcipher_decrypt_iv(&desc, sg_out.sgl,
+ sg_out.sgl, length);
else
- ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
+ ret = crypto_blkcipher_encrypt_iv(&desc, sg_out.sgl,
+ sg_out.sgl, length);
+ teardown_sgtable(&sg_out);
out:
- return(ret);
+ return ret;
}
static inline
int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
- struct hash_desc desc;
- struct scatterlist sg[1];
- int i;
+ struct hash_desc desc;
+ struct sg_table sgt;
+ struct scatterlist sg[1];
+ int i, rc;
crypto_hash_setkey(tfm, key->data, key->len);
desc.tfm = tfm;
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
- buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- crypto_hash_update(&desc, sg, msgs[i].len);
+
+ rc = setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
+ if (rc != 0)
+ return rc;
+
+ crypto_hash_update(&desc, sgt.sgl, msgs[i].len);
+
+ teardown_sgtable(&sgt);
}
for (i = 0; i < iovcnt; i++) {
}
if (khdr) {
- buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- crypto_hash_update(&desc, sg, sizeof(*khdr));
+ rc = setup_sgtable(&sgt, sg, (char *) khdr, sizeof(*khdr));
+ if (rc != 0)
+ return rc;
+
+ crypto_hash_update(&desc, sgt.sgl, sizeof(*khdr));
+
+ teardown_sgtable(&sgt);
}
return crypto_hash_final(&desc, cksum->data);
int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
- struct hash_desc desc;
- struct scatterlist sg[1];
- int i;
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ struct sg_table sgt;
+ int i, rc;
LASSERT(kb->kb_tfm);
desc.tfm = tfm;
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
- buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- crypto_hash_update(&desc, sg, msgs[i].len);
+
+ rc = setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
+ if (rc != 0)
+ return rc;
+
+ crypto_hash_update(&desc, sgt.sgl, msgs[i].len);
+
+ teardown_sgtable(&sgt);
}
for (i = 0; i < iovcnt; i++) {
}
if (khdr) {
- buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- crypto_hash_update(&desc, sg, sizeof(*khdr));
- }
+ rc = setup_sgtable(&sgt, sg, (char *) khdr, sizeof(*khdr));
+ if (rc != 0)
+ return rc;
+
+ crypto_hash_update(&desc, sgt.sgl, sizeof(*khdr));
+
+ teardown_sgtable(&sgt);
+ }
crypto_hash_final(&desc, cksum->data);
{
struct blkcipher_desc desc;
struct scatterlist src, dst;
+ struct sg_table sg_src, sg_dst;
__u8 local_iv[16] = {0}, *buf;
__u32 datalen = 0;
int i, rc;
desc.flags = 0;
for (i = 0; i < inobj_cnt; i++) {
- LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
-
- buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
- buf_to_sg(&dst, buf, outobj->len - datalen);
+ LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
+
+ rc = setup_sgtable(&sg_src, &src, inobjs[i].data,
+ inobjs[i].len);
+ if (rc != 0)
+ RETURN(rc);
+
+ rc = setup_sgtable(&sg_dst, &dst, buf,
+ outobj->len - datalen);
+ if (rc != 0) {
+ teardown_sgtable(&sg_src);
+ RETURN(rc);
+ }
if (mode_ecb) {
if (enc)
- rc = crypto_blkcipher_encrypt(
- &desc, &dst, &src, src.length);
+ rc = crypto_blkcipher_encrypt(&desc, sg_dst.sgl,
+ sg_src.sgl,
+ inobjs[i].len);
else
- rc = crypto_blkcipher_decrypt(
- &desc, &dst, &src, src.length);
+ rc = crypto_blkcipher_decrypt(&desc, sg_dst.sgl,
+ sg_src.sgl,
+ inobjs[i].len);
} else {
if (enc)
- rc = crypto_blkcipher_encrypt_iv(
- &desc, &dst, &src, src.length);
+ rc = crypto_blkcipher_encrypt_iv(&desc,
+ sg_dst.sgl,
+ sg_src.sgl,
+ inobjs[i].len);
else
- rc = crypto_blkcipher_decrypt_iv(
- &desc, &dst, &src, src.length);
+ rc = crypto_blkcipher_decrypt_iv(&desc,
+ sg_dst.sgl,
+ sg_src.sgl,
+ inobjs[i].len);
}
+ teardown_sgtable(&sg_src);
+ teardown_sgtable(&sg_dst);
+
if (rc) {
CERROR("encrypt error %d\n", rc);
RETURN(rc);
struct blkcipher_desc ciph_desc;
__u8 local_iv[16] = {0};
struct scatterlist src, dst;
+ struct sg_table sg_src, sg_dst;
int blocksize, i, rc, nob = 0;
LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
ciph_desc.flags = 0;
/* encrypt confounder */
- buf_to_sg(&src, confounder, blocksize);
- buf_to_sg(&dst, cipher->data, blocksize);
+ rc = setup_sgtable(&sg_src, &src, confounder, blocksize);
+ if (rc != 0)
+ return rc;
+
+ rc = setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
+ if (rc != 0) {
+ teardown_sgtable(&sg_src);
+ return rc;
+ }
+
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
+ sg_src.sgl, blocksize);
+
+ teardown_sgtable(&sg_dst);
+ teardown_sgtable(&sg_src);
- rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
if (rc) {
CERROR("error to encrypt confounder: %d\n", rc);
return rc;
}
/* encrypt krb5 header */
- buf_to_sg(&src, khdr, sizeof(*khdr));
- buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+ rc = setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
+ if (rc != 0)
+ return rc;
+
+ rc = setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
+ sizeof(*khdr));
+ if (rc != 0) {
+ teardown_sgtable(&sg_src);
+ return rc;
+ }
- rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
sizeof(*khdr));
+
+ teardown_sgtable(&sg_dst);
+ teardown_sgtable(&sg_src);
+
if (rc) {
CERROR("error to encrypt krb5 header: %d\n", rc);
return rc;
struct blkcipher_desc ciph_desc;
__u8 local_iv[16] = {0};
struct scatterlist src, dst;
+ struct sg_table sg_src, sg_dst;
int ct_nob = 0, pt_nob = 0;
int blocksize, i, rc;
}
/* decrypt head (confounder) */
- buf_to_sg(&src, cipher->data, blocksize);
- buf_to_sg(&dst, plain->data, blocksize);
+ rc = setup_sgtable(&sg_src, &src, cipher->data, blocksize);
+ if (rc != 0)
+ return rc;
+
+ rc = setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
+ if (rc != 0) {
+ teardown_sgtable(&sg_src);
+ return rc;
+ }
+
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
+ sg_src.sgl, blocksize);
+
+ teardown_sgtable(&sg_dst);
+ teardown_sgtable(&sg_src);
- rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
if (rc) {
CERROR("error to decrypt confounder: %d\n", rc);
return rc;
BD_GET_KIOV(desc, i++).kiov_len = 0;
/* decrypt tail (krb5 header) */
- buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
- buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+ rc = setup_sgtable(&sg_src, &src, cipher->data + blocksize,
+ sizeof(*khdr));
+ if (rc != 0)
+ return rc;
+
+ rc = setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
+ sizeof(*khdr));
+ if (rc != 0) {
+ teardown_sgtable(&sg_src);
+ return rc;
+ }
- rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
sizeof(*khdr));
+
+ teardown_sgtable(&sg_src);
+ teardown_sgtable(&sg_dst);
+
if (rc) {
CERROR("error to decrypt tail: %d\n", rc);
return rc;