- * Should be used for buffers allocated with k/vmalloc().
- *
- * Dispose of @sgt with teardown_sgtable().
- *
- * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
- * in cases where a single sg is sufficient. No attempt to reduce the
- * number of sgs by squeezing physically contiguous pages together is
- * made though, for simplicity.
- *
- * This function is copied from the ceph filesystem code.
- */
-static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
- const void *buf, unsigned int buf_len)
-{
- struct scatterlist *sg;
- const bool is_vmalloc = is_vmalloc_addr(buf);
- unsigned int off = offset_in_page(buf);
- unsigned int chunk_cnt = 1;
- unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
- int i;
- int ret;
-
- if (buf_len == 0) {
- memset(sgt, 0, sizeof(*sgt));
- return -EINVAL;
- }
-
- if (is_vmalloc) {
- chunk_cnt = chunk_len >> PAGE_SHIFT;
- chunk_len = PAGE_SIZE;
- }
-
- if (chunk_cnt > 1) {
- ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
- if (ret)
- return ret;
- } else {
- WARN_ON(chunk_cnt != 1);
- sg_init_table(prealloc_sg, 1);
- sgt->sgl = prealloc_sg;
- sgt->nents = sgt->orig_nents = 1;
- }
-
- for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
- struct page *page;
- unsigned int len = min(chunk_len - off, buf_len);
-
- if (is_vmalloc)
- page = vmalloc_to_page(buf);
- else
- page = virt_to_page(buf);
-
- sg_set_page(sg, page, len, off);
-
- off = 0;
- buf += len;
- buf_len -= len;
- }
-
- WARN_ON(buf_len != 0);
-
- return 0;
-}
-
-static void teardown_sgtable(struct sg_table *sgt)
-{
- if (sgt->orig_nents > 1)
- sg_free_table(sgt);
-}
-
-static
-__u32 krb5_encrypt(struct crypto_blkcipher *tfm,
- int decrypt,
- void * iv,
- void * in,
- void * out,
- int length)
-{
- struct sg_table sg_out;
- struct blkcipher_desc desc;
- struct scatterlist sg;
- __u8 local_iv[16] = {0};
- __u32 ret = -EINVAL;
-
- LASSERT(tfm);
- desc.tfm = tfm;
- desc.info = local_iv;
- desc.flags= 0;
-
- if (length % crypto_blkcipher_blocksize(tfm) != 0) {
- CERROR("output length %d mismatch blocksize %d\n",
- length, crypto_blkcipher_blocksize(tfm));
- goto out;
- }
-
- if (crypto_blkcipher_ivsize(tfm) > 16) {
- CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
- goto out;
- }
-
- if (iv)
- memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
-
- memcpy(out, in, length);
-
- ret = setup_sgtable(&sg_out, &sg, out, length);
- if (ret != 0)
- goto out;
-
- if (decrypt)
- ret = crypto_blkcipher_decrypt_iv(&desc, sg_out.sgl,
- sg_out.sgl, length);
- else
- ret = crypto_blkcipher_encrypt_iv(&desc, sg_out.sgl,
- sg_out.sgl, length);
-
- teardown_sgtable(&sg_out);
-out:
- return ret;
-}
-
-static inline
-int krb5_digest_hmac(struct crypto_hash *tfm,
- rawobj_t *key,
- struct krb5_header *khdr,
- int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs,
- rawobj_t *cksum)
-{
- struct hash_desc desc;
- struct sg_table sgt;
- struct scatterlist sg[1];
- int i, rc;
-
- crypto_hash_setkey(tfm, key->data, key->len);
- desc.tfm = tfm;
- desc.flags= 0;
-
- crypto_hash_init(&desc);
-
- for (i = 0; i < msgcnt; i++) {
- if (msgs[i].len == 0)
- continue;
-
- rc = setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
- if (rc != 0)
- return rc;
-
- crypto_hash_update(&desc, sgt.sgl, msgs[i].len);
-
- teardown_sgtable(&sgt);
- }
-
- for (i = 0; i < iovcnt; i++) {
- if (iovs[i].kiov_len == 0)
- continue;
-
- sg_init_table(sg, 1);
- sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
- iovs[i].kiov_offset);
- crypto_hash_update(&desc, sg, iovs[i].kiov_len);
- }
-
- if (khdr) {
- rc = setup_sgtable(&sgt, sg, (char *) khdr, sizeof(*khdr));
- if (rc != 0)
- return rc;
-
- crypto_hash_update(&desc, sgt.sgl, sizeof(*khdr));
-
- teardown_sgtable(&sgt);
- }
-
- return crypto_hash_final(&desc, cksum->data);
-}
-
-static inline
-int krb5_digest_norm(struct crypto_hash *tfm,
- struct krb5_keyblock *kb,
- struct krb5_header *khdr,
- int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs,
- rawobj_t *cksum)
-{
- struct hash_desc desc;
- struct scatterlist sg[1];
- struct sg_table sgt;
- int i, rc;
-
- LASSERT(kb->kb_tfm);
- desc.tfm = tfm;
- desc.flags= 0;
-
- crypto_hash_init(&desc);
-
- for (i = 0; i < msgcnt; i++) {
- if (msgs[i].len == 0)
- continue;
-
- rc = setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
- if (rc != 0)
- return rc;
-
- crypto_hash_update(&desc, sgt.sgl, msgs[i].len);
-
- teardown_sgtable(&sgt);
- }
-
- for (i = 0; i < iovcnt; i++) {
- if (iovs[i].kiov_len == 0)
- continue;
-
- sg_init_table(sg, 1);
- sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
- iovs[i].kiov_offset);
- crypto_hash_update(&desc, sg, iovs[i].kiov_len);
- }
-
- if (khdr) {
- rc = setup_sgtable(&sgt, sg, (char *) khdr, sizeof(*khdr));
- if (rc != 0)
- return rc;
-
- crypto_hash_update(&desc, sgt.sgl, sizeof(*khdr));
-
- teardown_sgtable(&sgt);
- }
-
- crypto_hash_final(&desc, cksum->data);
-
- return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
- cksum->data, cksum->len);
-}
-
-/*