Whamcloud - gitweb
LU-6020 gss: properly map buffers to sg 19/17319/4
authorAndrew Perepechko <andrew.perepechko@seagate.com>
Sat, 21 Nov 2015 13:55:24 +0000 (16:55 +0300)
committerOleg Drokin <oleg.drokin@intel.com>
Sun, 13 Dec 2015 20:57:33 +0000 (20:57 +0000)
A lot of buffer pointers passed to buf_to_sg() as input are coming
from vmalloc(), e.g. OBD_ALLOC_LARGE() in ptlrpc_add_rqs_to_pool().
sg_set_buf() uses virt_to_page() to map virtual addresses to
struct page, which does not work for vmalloc addresses.

The original code for buf_to_sg() caused the following crash:

BUG: unable to handle kernel paging request at ffffeb040057c040
IP: [<ffffffff81300367>] scatterwalk_pagedone+0x27/0x70
PGD 0
Oops: 0000 [#1] SMP
CPU 1
Pid: 2374, comm: ptlrpcd_3 Tainted: G           O 3.6.10-030610-generic
RIP: 0010:[<ffffffff81300367>]  [<ffffffff81300367>] scatterwalk_pagedone+0x27/0x70
RSP: 0018:ffff8801a3c178a8  EFLAGS: 00010282
RAX: ffffeb040057c040 RBX: ffff8801a3c17938 RCX: ffffeb040057c040
RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffff8801a3c17970
RBP: ffff8801a3c178a8 R08: 00000000000005a8 R09: ffff8801a3c17a40
R10: ffff8801a30370d0 R11: 0000000000000a68 R12: 0000000000000010
R13: ffff8801a3c17a08 R14: ffff8801a3c17970 R15: ffff88017d1c2c80
FS:  0000000000000000(0000) GS:ffff8801afa40000(0000) knlGS:0000000000000000
CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffffeb040057c040 CR3: 0000000001c0c000 CR4: 00000000001407e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Process ptlrpcd_3 (pid: 2374, threadinfo ffff8801a3c16000, task ffff8801a44e0000)
Stack:
 ffff8801a3c178b8 ffffffff813004bd ffff8801a3c17908 ffffffff8130303f
 ffff880100000000 ffffffff00000000 ffff8801a3c17908 ffff8801a3c17b18
 ffffc90015f015a8 0000000000000000 0000000000000010 0000000000000010
Call Trace:
 [<ffffffff813004bd>] scatterwalk_done+0x3d/0x50
 [<ffffffff8130303f>] blkcipher_walk_done+0x8f/0x230
 [<ffffffff8130a39f>] crypto_cbc_encrypt+0xff/0x190
 [<ffffffffa0688660>] ? aes_decrypt+0x80/0x80 [aesni_intel]
 [<ffffffffa0a1a1e4>] krb5_encrypt_bulk+0x164/0x5b0 [ptlrpc_gss]
 [<ffffffffa0a1a812>] gss_wrap_bulk_kerberos+0x1e2/0x490 [ptlrpc_gss]
 [<ffffffffa0a1600e>] lgss_wrap_bulk+0x2e/0x100 [ptlrpc_gss]
 [<ffffffffa0a0d98e>] gss_cli_ctx_wrap_bulk+0x44e/0x650 [ptlrpc_gss]
 [<ffffffffa0ab867c>] sptlrpc_cli_wrap_bulk+0x3c/0x70 [ptlrpc]
 [<ffffffffa0aba2d0>] sptlrpc_cli_wrap_request+0x60/0x360 [ptlrpc]
 [<ffffffffa0a8cde4>] ptl_send_rpc+0x164/0xc30 [ptlrpc]
 [<ffffffffa07be957>] ? libcfs_debug_msg+0x47/0x50 [libcfs]
 [<ffffffffa0a80ee0>] ptlrpc_send_new_req+0x3b0/0x940 [ptlrpc]
 [<ffffffffa0a86530>] ptlrpc_check_set+0x8e0/0x1d50 [ptlrpc]
 [<ffffffff816ac9f6>] ? schedule_timeout+0x146/0x260
 [<ffffffffa0ab0c9b>] ptlrpcd_check+0x4eb/0x5d0 [ptlrpc]
 [<ffffffffa0ab105f>] ptlrpcd+0x2df/0x420 [ptlrpc]
 [<ffffffff8108efa0>] ? try_to_wake_up+0x200/0x200
 [<ffffffffa0ab0d80>] ? ptlrpcd_check+0x5d0/0x5d0 [ptlrpc]
 [<ffffffff8107c5f3>] kthread+0x93/0xa0
 [<ffffffff816b8d04>] kernel_thread_helper+0x4/0x10
 [<ffffffff8107c560>] ? flush_kthread_worker+0xb0/0xb0
 [<ffffffff816b8d00>] ? gs_change+0x13/0x13

Change-Id: I346d50568b65ed10da2762ca34562fc2858a05d8
Signed-off-by: Andrew Perepechko <andrew.perepechko@seagate.com>
Xyratex-bug-id: SNT-15
Reviewed-on: http://review.whamcloud.com/17319
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Sebastien Buisson <sbuisson@ddn.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: John L. Hammond <john.hammond@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/ptlrpc/gss/gss_krb5_mech.c

index 09895ba..289ab08 100644 (file)
@@ -522,11 +522,75 @@ void gss_delete_sec_context_kerberos(void *internal_ctx)
         OBD_FREE_PTR(kctx);
 }
 
-static
-void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
+/*
+ * Should be used for buffers allocated with k/vmalloc().
+ *
+ * Dispose of @sgt with teardown_sgtable().
+ *
+ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
+ * in cases where a single sg is sufficient.  No attempt to reduce the
+ * number of sgs by squeezing physically contiguous pages together is
+ * made though, for simplicity.
+ *
+ * This function is copied from the ceph filesystem code.
+ */
+static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
+                        const void *buf, unsigned int buf_len)
+{
+       struct scatterlist *sg;
+       const bool is_vmalloc = is_vmalloc_addr(buf);
+       unsigned int off = offset_in_page(buf);
+       unsigned int chunk_cnt = 1;
+       unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
+       int i;
+       int ret;
+
+       if (buf_len == 0) {
+               memset(sgt, 0, sizeof(*sgt));
+               return -EINVAL;
+       }
+
+       if (is_vmalloc) {
+               chunk_cnt = chunk_len >> PAGE_SHIFT;
+               chunk_len = PAGE_SIZE;
+       }
+
+       if (chunk_cnt > 1) {
+               ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
+               if (ret)
+                       return ret;
+       } else {
+               WARN_ON(chunk_cnt != 1);
+               sg_init_table(prealloc_sg, 1);
+               sgt->sgl = prealloc_sg;
+               sgt->nents = sgt->orig_nents = 1;
+       }
+
+       for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
+               struct page *page;
+               unsigned int len = min(chunk_len - off, buf_len);
+
+               if (is_vmalloc)
+                       page = vmalloc_to_page(buf);
+               else
+                       page = virt_to_page(buf);
+
+               sg_set_page(sg, page, len, off);
+
+               off = 0;
+               buf += len;
+               buf_len -= len;
+       }
+
+       WARN_ON(buf_len != 0);
+
+       return 0;
+}
+
+static void teardown_sgtable(struct sg_table *sgt)
 {
-       sg_init_table(sg, 1);
-       sg_set_buf(sg, ptr, len);
+       if (sgt->orig_nents > 1)
+               sg_free_table(sgt);
 }
 
 static
@@ -537,6 +601,7 @@ __u32 krb5_encrypt(struct crypto_blkcipher *tfm,
                    void * out,
                    int length)
 {
+       struct sg_table sg_out;
         struct blkcipher_desc desc;
         struct scatterlist    sg;
         __u8 local_iv[16] = {0};
@@ -555,22 +620,28 @@ __u32 krb5_encrypt(struct crypto_blkcipher *tfm,
 
        if (crypto_blkcipher_ivsize(tfm) > 16) {
                CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
-                goto out;
-        }
+               goto out;
+       }
 
-        if (iv)
+       if (iv)
                memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
 
-        memcpy(out, in, length);
-        buf_to_sg(&sg, out, length);
+       memcpy(out, in, length);
+
+       ret = setup_sgtable(&sg_out, &sg, out, length);
+       if (ret != 0)
+               goto out;
 
         if (decrypt)
-               ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
+               ret = crypto_blkcipher_decrypt_iv(&desc, sg_out.sgl,
+                                                 sg_out.sgl, length);
         else
-               ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
+               ret = crypto_blkcipher_encrypt_iv(&desc, sg_out.sgl,
+                                                 sg_out.sgl, length);
 
+       teardown_sgtable(&sg_out);
 out:
-        return(ret);
+       return ret;
 }
 
 static inline
@@ -581,9 +652,10 @@ int krb5_digest_hmac(struct crypto_hash *tfm,
                      int iovcnt, lnet_kiov_t *iovs,
                      rawobj_t *cksum)
 {
-        struct hash_desc   desc;
-        struct scatterlist sg[1];
-        int                i;
+       struct hash_desc        desc;
+       struct sg_table         sgt;
+       struct scatterlist      sg[1];
+       int                     i, rc;
 
        crypto_hash_setkey(tfm, key->data, key->len);
         desc.tfm  = tfm;
@@ -594,8 +666,14 @@ int krb5_digest_hmac(struct crypto_hash *tfm,
         for (i = 0; i < msgcnt; i++) {
                 if (msgs[i].len == 0)
                         continue;
-                buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-               crypto_hash_update(&desc, sg, msgs[i].len);
+
+               rc = setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
+               if (rc != 0)
+                       return rc;
+
+               crypto_hash_update(&desc, sgt.sgl, msgs[i].len);
+
+               teardown_sgtable(&sgt);
         }
 
         for (i = 0; i < iovcnt; i++) {
@@ -609,8 +687,13 @@ int krb5_digest_hmac(struct crypto_hash *tfm,
         }
 
         if (khdr) {
-                buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-               crypto_hash_update(&desc, sg, sizeof(*khdr));
+               rc = setup_sgtable(&sgt, sg, (char *) khdr, sizeof(*khdr));
+               if (rc != 0)
+                       return rc;
+
+               crypto_hash_update(&desc, sgt.sgl, sizeof(*khdr));
+
+               teardown_sgtable(&sgt);
         }
 
        return crypto_hash_final(&desc, cksum->data);
@@ -624,9 +707,10 @@ int krb5_digest_norm(struct crypto_hash *tfm,
                      int iovcnt, lnet_kiov_t *iovs,
                      rawobj_t *cksum)
 {
-        struct hash_desc   desc;
-        struct scatterlist sg[1];
-        int                i;
+       struct hash_desc        desc;
+       struct scatterlist      sg[1];
+       struct sg_table         sgt;
+       int                     i, rc;
 
         LASSERT(kb->kb_tfm);
         desc.tfm  = tfm;
@@ -637,8 +721,14 @@ int krb5_digest_norm(struct crypto_hash *tfm,
         for (i = 0; i < msgcnt; i++) {
                 if (msgs[i].len == 0)
                         continue;
-                buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-               crypto_hash_update(&desc, sg, msgs[i].len);
+
+               rc = setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
+               if (rc != 0)
+                       return rc;
+
+               crypto_hash_update(&desc, sgt.sgl, msgs[i].len);
+
+               teardown_sgtable(&sgt);
         }
 
         for (i = 0; i < iovcnt; i++) {
@@ -652,9 +742,14 @@ int krb5_digest_norm(struct crypto_hash *tfm,
         }
 
         if (khdr) {
-                buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-               crypto_hash_update(&desc, sg, sizeof(*khdr));
-        }
+               rc = setup_sgtable(&sgt, sg, (char *) khdr, sizeof(*khdr));
+               if (rc != 0)
+                       return rc;
+
+               crypto_hash_update(&desc, sgt.sgl, sizeof(*khdr));
+
+               teardown_sgtable(&sgt);
+       }
 
        crypto_hash_final(&desc, cksum->data);
 
@@ -888,6 +983,7 @@ int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
 {
         struct blkcipher_desc desc;
         struct scatterlist    src, dst;
+       struct sg_table         sg_src, sg_dst;
         __u8                  local_iv[16] = {0}, *buf;
         __u32                 datalen = 0;
         int                   i, rc;
@@ -899,27 +995,45 @@ int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
         desc.flags = 0;
 
         for (i = 0; i < inobj_cnt; i++) {
-                LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
-
-                buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
-                buf_to_sg(&dst, buf, outobj->len - datalen);
+               LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
+
+               rc = setup_sgtable(&sg_src, &src, inobjs[i].data,
+                                  inobjs[i].len);
+               if (rc != 0)
+                       RETURN(rc);
+
+               rc = setup_sgtable(&sg_dst, &dst, buf,
+                                  outobj->len - datalen);
+               if (rc != 0) {
+                       teardown_sgtable(&sg_src);
+                       RETURN(rc);
+               }
 
                 if (mode_ecb) {
                         if (enc)
-                               rc = crypto_blkcipher_encrypt(
-                                        &desc, &dst, &src, src.length);
+                               rc = crypto_blkcipher_encrypt(&desc, sg_dst.sgl,
+                                                             sg_src.sgl,
+                                                             inobjs[i].len);
                         else
-                               rc = crypto_blkcipher_decrypt(
-                                        &desc, &dst, &src, src.length);
+                               rc = crypto_blkcipher_decrypt(&desc, sg_dst.sgl,
+                                                             sg_src.sgl,
+                                                             inobjs[i].len);
                 } else {
                         if (enc)
-                               rc = crypto_blkcipher_encrypt_iv(
-                                        &desc, &dst, &src, src.length);
+                               rc = crypto_blkcipher_encrypt_iv(&desc,
+                                                                sg_dst.sgl,
+                                                                sg_src.sgl,
+                                                                inobjs[i].len);
                         else
-                               rc = crypto_blkcipher_decrypt_iv(
-                                        &desc, &dst, &src, src.length);
+                               rc = crypto_blkcipher_decrypt_iv(&desc,
+                                                                sg_dst.sgl,
+                                                                sg_src.sgl,
+                                                                inobjs[i].len);
                 }
 
+               teardown_sgtable(&sg_src);
+               teardown_sgtable(&sg_dst);
+
                 if (rc) {
                         CERROR("encrypt error %d\n", rc);
                         RETURN(rc);
@@ -947,6 +1061,7 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
         struct blkcipher_desc   ciph_desc;
         __u8                    local_iv[16] = {0};
         struct scatterlist      src, dst;
+       struct sg_table         sg_src, sg_dst;
         int                     blocksize, i, rc, nob = 0;
 
        LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
@@ -962,10 +1077,22 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
         ciph_desc.flags = 0;
 
         /* encrypt confounder */
-        buf_to_sg(&src, confounder, blocksize);
-        buf_to_sg(&dst, cipher->data, blocksize);
+       rc = setup_sgtable(&sg_src, &src, confounder, blocksize);
+       if (rc != 0)
+               return rc;
+
+       rc = setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
+       if (rc != 0) {
+               teardown_sgtable(&sg_src);
+               return rc;
+       }
+
+       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
+                                        sg_src.sgl, blocksize);
+
+       teardown_sgtable(&sg_dst);
+       teardown_sgtable(&sg_src);
 
-       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
         if (rc) {
                 CERROR("error to encrypt confounder: %d\n", rc);
                 return rc;
@@ -997,11 +1124,23 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
         }
 
         /* encrypt krb5 header */
-        buf_to_sg(&src, khdr, sizeof(*khdr));
-        buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+       rc = setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
+       if (rc != 0)
+               return rc;
+
+       rc = setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
+                          sizeof(*khdr));
+       if (rc != 0) {
+               teardown_sgtable(&sg_src);
+               return rc;
+       }
 
-       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
                                         sizeof(*khdr));
+
+       teardown_sgtable(&sg_dst);
+       teardown_sgtable(&sg_src);
+
         if (rc) {
                 CERROR("error to encrypt krb5 header: %d\n", rc);
                 return rc;
@@ -1042,6 +1181,7 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
         struct blkcipher_desc   ciph_desc;
         __u8                    local_iv[16] = {0};
         struct scatterlist      src, dst;
+       struct sg_table         sg_src, sg_dst;
         int                     ct_nob = 0, pt_nob = 0;
         int                     blocksize, i, rc;
 
@@ -1064,10 +1204,22 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
         }
 
         /* decrypt head (confounder) */
-        buf_to_sg(&src, cipher->data, blocksize);
-        buf_to_sg(&dst, plain->data, blocksize);
+       rc = setup_sgtable(&sg_src, &src, cipher->data, blocksize);
+       if (rc != 0)
+               return rc;
+
+       rc = setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
+       if (rc != 0) {
+               teardown_sgtable(&sg_src);
+               return rc;
+       }
+
+       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
+                                        sg_src.sgl, blocksize);
+
+       teardown_sgtable(&sg_dst);
+       teardown_sgtable(&sg_src);
 
-       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
         if (rc) {
                 CERROR("error to decrypt confounder: %d\n", rc);
                 return rc;
@@ -1157,11 +1309,24 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                        BD_GET_KIOV(desc, i++).kiov_len = 0;
 
         /* decrypt tail (krb5 header) */
-        buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
-        buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+       rc = setup_sgtable(&sg_src, &src, cipher->data + blocksize,
+                          sizeof(*khdr));
+       if (rc != 0)
+               return rc;
+
+       rc = setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
+                          sizeof(*khdr));
+       if (rc != 0) {
+               teardown_sgtable(&sg_src);
+               return rc;
+       }
 
-       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
                                         sizeof(*khdr));
+
+       teardown_sgtable(&sg_src);
+       teardown_sgtable(&sg_dst);
+
         if (rc) {
                 CERROR("error to decrypt tail: %d\n", rc);
                 return rc;