Whamcloud - gitweb
LU-4629 gss: fix uninitialized variable
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
index 7eb0c95..e79ffdd 100644 (file)
@@ -1,9 +1,9 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * Modifications for Lustre
  *
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2013, Intel Corporation.
  *
  * Author: Eric Mei <ericm@clusterfs.com>
  */
  *
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
 #define DEBUG_SUBSYSTEM S_SEC
 #ifdef __KERNEL__
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/crypto.h>
-#include <linux/random.h>
 #include <linux/mutex.h>
 #else
 #include <liblustre.h>
@@ -155,14 +151,14 @@ static const char * enctype2str(__u32 enctype)
 static
 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
 {
-        kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
-        if (kb->kb_tfm == NULL) {
-                CERROR("failed to alloc tfm: %s, mode %d\n",
-                       alg_name, alg_mode);
-                return -1;
-        }
-
-        if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
+       kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
+       if (IS_ERR(kb->kb_tfm)) {
+               CERROR("failed to alloc tfm: %s, mode %d\n",
+                      alg_name, alg_mode);
+               return -1;
+       }
+
+       if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
                 CERROR("failed to set %s key, len %d\n",
                        alg_name, kb->kb_key.len);
                 return -1;
@@ -205,7 +201,7 @@ void keyblock_free(struct krb5_keyblock *kb)
 {
         rawobj_free(&kb->kb_key);
         if (kb->kb_tfm)
-                ll_crypto_free_blkcipher(kb->kb_tfm);
+               crypto_free_blkcipher(kb->kb_tfm);
 }
 
 static
@@ -241,7 +237,7 @@ int get_rawobj(char **ptr, const char *end, rawobj_t *res)
         if (q > end || q < p)
                 return -1;
 
-        OBD_ALLOC(res->data, len);
+        OBD_ALLOC_LARGE(res->data, len);
         if (!res->data)
                 return -1;
 
@@ -257,12 +253,12 @@ int get_keyblock(char **ptr, const char *end,
 {
         char *buf;
 
-        OBD_ALLOC(buf, keysize);
+        OBD_ALLOC_LARGE(buf, keysize);
         if (buf == NULL)
                 return -1;
 
         if (get_bytes(ptr, end, buf, keysize)) {
-                OBD_FREE(buf, keysize);
+                OBD_FREE_LARGE(buf, keysize);
                 return -1;
         }
 
@@ -533,13 +529,12 @@ void gss_delete_sec_context_kerberos(void *internal_ctx)
 static
 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
 {
-        sg->page = virt_to_page(ptr);
-        sg->offset = offset_in_page(ptr);
-        sg->length = len;
+       sg_init_table(sg, 1);
+       sg_set_buf(sg, ptr, len);
 }
 
 static
-__u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
+__u32 krb5_encrypt(struct crypto_blkcipher *tfm,
                    int decrypt,
                    void * iv,
                    void * in,
@@ -556,36 +551,34 @@ __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
         desc.info = local_iv;
         desc.flags= 0;
 
-        if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
+       if (length % crypto_blkcipher_blocksize(tfm) != 0) {
                 CERROR("output length %d mismatch blocksize %d\n",
-                       length, ll_crypto_blkcipher_blocksize(tfm));
+                      length, crypto_blkcipher_blocksize(tfm));
                 goto out;
         }
 
-        if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
-                CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
+       if (crypto_blkcipher_ivsize(tfm) > 16) {
+               CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
                 goto out;
         }
 
         if (iv)
-                memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
+               memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
 
         memcpy(out, in, length);
         buf_to_sg(&sg, out, length);
 
         if (decrypt)
-                ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
+               ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
         else
-                ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
+               ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
 
 out:
         return(ret);
 }
 
-#ifdef HAVE_ASYNC_BLOCK_CIPHER
-
 static inline
-int krb5_digest_hmac(struct ll_crypto_hash *tfm,
+int krb5_digest_hmac(struct crypto_hash *tfm,
                      rawobj_t *key,
                      struct krb5_header *khdr,
                      int msgcnt, rawobj_t *msgs,
@@ -596,80 +589,38 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
         struct scatterlist sg[1];
         int                i;
 
-        ll_crypto_hash_setkey(tfm, key->data, key->len);
+       crypto_hash_setkey(tfm, key->data, key->len);
         desc.tfm  = tfm;
         desc.flags= 0;
 
-        ll_crypto_hash_init(&desc);
+       crypto_hash_init(&desc);
 
         for (i = 0; i < msgcnt; i++) {
                 if (msgs[i].len == 0)
                         continue;
                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-                ll_crypto_hash_update(&desc, sg, msgs[i].len);
+               crypto_hash_update(&desc, sg, msgs[i].len);
         }
 
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
-                ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
-        }
-
-        if (khdr) {
-                buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-                ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
-        }
-
-        return ll_crypto_hash_final(&desc, cksum->data);
-}
-
-#else /* ! HAVE_ASYNC_BLOCK_CIPHER */
-
-static inline
-int krb5_digest_hmac(struct ll_crypto_hash *tfm,
-                     rawobj_t *key,
-                     struct krb5_header *khdr,
-                     int msgcnt, rawobj_t *msgs,
-                     int iovcnt, lnet_kiov_t *iovs,
-                     rawobj_t *cksum)
-{
-        struct scatterlist sg[1];
-        __u32              keylen = key->len, i;
-
-        crypto_hmac_init(tfm, key->data, &keylen);
 
-        for (i = 0; i < msgcnt; i++) {
-                if (msgs[i].len == 0)
-                        continue;
-                buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-                crypto_hmac_update(tfm, sg, 1);
-        }
-
-        for (i = 0; i < iovcnt; i++) {
-                if (iovs[i].kiov_len == 0)
-                        continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
-                crypto_hmac_update(tfm, sg, 1);
+               sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+                           iovs[i].kiov_offset);
+               crypto_hash_update(&desc, sg, iovs[i].kiov_len);
         }
 
         if (khdr) {
                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-                crypto_hmac_update(tfm, sg, 1);
+               crypto_hash_update(&desc, sg, sizeof(*khdr));
         }
 
-        crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
-        return 0;
+       return crypto_hash_final(&desc, cksum->data);
 }
 
-#endif /* HAVE_ASYNC_BLOCK_CIPHER */
-
 static inline
-int krb5_digest_norm(struct ll_crypto_hash *tfm,
+int krb5_digest_norm(struct crypto_hash *tfm,
                      struct krb5_keyblock *kb,
                      struct krb5_header *khdr,
                      int msgcnt, rawobj_t *msgs,
@@ -684,30 +635,30 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm,
         desc.tfm  = tfm;
         desc.flags= 0;
 
-        ll_crypto_hash_init(&desc);
+       crypto_hash_init(&desc);
 
         for (i = 0; i < msgcnt; i++) {
                 if (msgs[i].len == 0)
                         continue;
                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-                ll_crypto_hash_update(&desc, sg, msgs[i].len);
+               crypto_hash_update(&desc, sg, msgs[i].len);
         }
 
         for (i = 0; i < iovcnt; i++) {
                 if (iovs[i].kiov_len == 0)
                         continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
-                ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+
+               sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+                           iovs[i].kiov_offset);
+               crypto_hash_update(&desc, sg, iovs[i].kiov_len);
         }
 
         if (khdr) {
                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-                ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+               crypto_hash_update(&desc, sg, sizeof(*khdr));
         }
 
-        ll_crypto_hash_final(&desc, cksum->data);
+       crypto_hash_final(&desc, cksum->data);
 
         return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
                             cksum->data, cksum->len);
@@ -726,17 +677,17 @@ __s32 krb5_make_checksum(__u32 enctype,
                          rawobj_t *cksum)
 {
         struct krb5_enctype   *ke = &enctypes[enctype];
-        struct ll_crypto_hash *tfm;
+       struct crypto_hash    *tfm;
         __u32                  code = GSS_S_FAILURE;
         int                    rc;
 
-        if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
+       if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
                 return GSS_S_FAILURE;
         }
 
-        cksum->len = ll_crypto_hash_digestsize(tfm);
-        OBD_ALLOC(cksum->data, cksum->len);
+       cksum->len = crypto_hash_digestsize(tfm);
+        OBD_ALLOC_LARGE(cksum->data, cksum->len);
         if (!cksum->data) {
                 cksum->len = 0;
                 goto out_tfm;
@@ -752,7 +703,7 @@ __s32 krb5_make_checksum(__u32 enctype,
         if (rc == 0)
                 code = GSS_S_COMPLETE;
 out_tfm:
-        ll_crypto_free_hash(tfm);
+       crypto_free_hash(tfm);
         return code;
 }
 
@@ -777,9 +728,9 @@ static void fill_krb5_header(struct krb5_ctx *kctx,
         }
 
         khdr->kh_filler = 0xff;
-        spin_lock(&krb5_seq_lock);
-        khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
-        spin_unlock(&krb5_seq_lock);
+       spin_lock(&krb5_seq_lock);
+       khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+       spin_unlock(&krb5_seq_lock);
 }
 
 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
@@ -930,7 +881,7 @@ int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
 }
 
 static
-int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
                          int mode_ecb,
                          int inobj_cnt,
                          rawobj_t *inobjs,
@@ -957,17 +908,17 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
 
                 if (mode_ecb) {
                         if (enc)
-                                rc = ll_crypto_blkcipher_encrypt(
+                               rc = crypto_blkcipher_encrypt(
                                         &desc, &dst, &src, src.length);
                         else
-                                rc = ll_crypto_blkcipher_decrypt(
+                               rc = crypto_blkcipher_decrypt(
                                         &desc, &dst, &src, src.length);
                 } else {
                         if (enc)
-                                rc = ll_crypto_blkcipher_encrypt_iv(
+                               rc = crypto_blkcipher_encrypt_iv(
                                         &desc, &dst, &src, src.length);
                         else
-                                rc = ll_crypto_blkcipher_decrypt_iv(
+                               rc = crypto_blkcipher_decrypt_iv(
                                         &desc, &dst, &src, src.length);
                 }
 
@@ -984,8 +935,11 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
         RETURN(0);
 }
 
+/*
+ * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
+ */
 static
-int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
                       struct krb5_header *khdr,
                       char *confounder,
                       struct ptlrpc_bulk_desc *desc,
@@ -1000,7 +954,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
         LASSERT(desc->bd_iov_count);
         LASSERT(desc->bd_enc_iov);
 
-        blocksize = ll_crypto_blkcipher_blocksize(tfm);
+       blocksize = crypto_blkcipher_blocksize(tfm);
         LASSERT(blocksize > 1);
         LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
@@ -1012,7 +966,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
         buf_to_sg(&src, confounder, blocksize);
         buf_to_sg(&dst, cipher->data, blocksize);
 
-        rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
+       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
         if (rc) {
                 CERROR("error to encrypt confounder: %d\n", rc);
                 return rc;
@@ -1020,22 +974,19 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
 
         /* encrypt clear pages */
         for (i = 0; i < desc->bd_iov_count; i++) {
-                src.page = desc->bd_iov[i].kiov_page;
-                src.offset = desc->bd_iov[i].kiov_offset;
-                src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
-                             (~(blocksize - 1));
-
-                if (adj_nob)
-                        nob += src.length;
-
-                dst.page = desc->bd_enc_iov[i].kiov_page;
-                dst.offset = src.offset;
-                dst.length = src.length;
+               sg_set_page(&src, desc->bd_iov[i].kiov_page,
+                           (desc->bd_iov[i].kiov_len + blocksize - 1) &
+                           (~(blocksize - 1)),
+                           desc->bd_iov[i].kiov_offset);
+               if (adj_nob)
+                       nob += src.length;
+               sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
+                           src.offset);
 
                 desc->bd_enc_iov[i].kiov_offset = dst.offset;
                 desc->bd_enc_iov[i].kiov_len = dst.length;
 
-                rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+               rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
                                                     src.length);
                 if (rc) {
                         CERROR("error to encrypt page: %d\n", rc);
@@ -1047,8 +998,8 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
         buf_to_sg(&src, khdr, sizeof(*khdr));
         buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
 
-        rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
-                                            &dst, &src, sizeof(*khdr));
+       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+                                        sizeof(*khdr));
         if (rc) {
                 CERROR("error to encrypt krb5 header: %d\n", rc);
                 return rc;
@@ -1063,13 +1014,26 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
 /*
  * desc->bd_nob_transferred is the size of cipher text received.
  * desc->bd_nob is the target size of plain text supposed to be.
+ *
+ * if adj_nob != 0, we adjust each page's kiov_len to the actual
+ * plain text size.
+ * - for client read: we don't know data size for each page, so
+ *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
+ *   be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
+ *   this means we DO NOT support the situation that server send an odd size
+ *   data in a page which is not the last one.
+ * - for server write: we knows exactly data size for each page being expected,
+ *   thus kiov_len is accurate already, so we should not adjust it at all.
+ *   and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
+ *   should have been done by prep_bulk().
  */
 static
-int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                       struct krb5_header *khdr,
                       struct ptlrpc_bulk_desc *desc,
                       rawobj_t *cipher,
-                      rawobj_t *plain)
+                      rawobj_t *plain,
+                      int adj_nob)
 {
         struct blkcipher_desc   ciph_desc;
         __u8                    local_iv[16] = {0};
@@ -1081,7 +1045,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
         LASSERT(desc->bd_enc_iov);
         LASSERT(desc->bd_nob_transferred);
 
-        blocksize = ll_crypto_blkcipher_blocksize(tfm);
+       blocksize = crypto_blkcipher_blocksize(tfm);
         LASSERT(blocksize > 1);
         LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
@@ -1098,54 +1062,60 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
         buf_to_sg(&src, cipher->data, blocksize);
         buf_to_sg(&dst, plain->data, blocksize);
 
-        rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
+       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
         if (rc) {
                 CERROR("error to decrypt confounder: %d\n", rc);
                 return rc;
         }
 
-        /*
-         * decrypt clear pages. note the enc_iov is prepared by prep_bulk()
-         * which already done some sanity checkings.
-         *
-         * desc->bd_nob is the actual plain text size supposed to be
-         * transferred. desc->bd_nob_transferred is the actual cipher
-         * text received.
-         */
         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
              i++) {
-                if (desc->bd_enc_iov[i].kiov_len == 0)
-                        continue;
-
-                if (ct_nob + desc->bd_enc_iov[i].kiov_len >
-                    desc->bd_nob_transferred)
-                        desc->bd_enc_iov[i].kiov_len =
-                                desc->bd_nob_transferred - ct_nob;
+                if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
+                    desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
+                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
+                               i, desc->bd_enc_iov[i].kiov_offset,
+                               desc->bd_enc_iov[i].kiov_len, blocksize);
+                        return -EFAULT;
+                }
 
-                desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
-                if (pt_nob + desc->bd_enc_iov[i].kiov_len > desc->bd_nob)
-                        desc->bd_iov[i].kiov_len = desc->bd_nob - pt_nob;
+                if (adj_nob) {
+                        if (ct_nob + desc->bd_enc_iov[i].kiov_len >
+                            desc->bd_nob_transferred)
+                                desc->bd_enc_iov[i].kiov_len =
+                                        desc->bd_nob_transferred - ct_nob;
 
-                src.page = desc->bd_enc_iov[i].kiov_page;
-                src.offset = desc->bd_enc_iov[i].kiov_offset;
-                src.length = desc->bd_enc_iov[i].kiov_len;
+                        desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
+                        if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
+                                desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
+                } else {
+                        /* this should be guaranteed by LNET */
+                        LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
+                                desc->bd_nob_transferred);
+                        LASSERT(desc->bd_iov[i].kiov_len <=
+                                desc->bd_enc_iov[i].kiov_len);
+                }
 
-                dst = src;
+                if (desc->bd_enc_iov[i].kiov_len == 0)
+                        continue;
 
-                if (desc->bd_iov[i].kiov_offset % blocksize == 0)
-                        dst.page = desc->bd_iov[i].kiov_page;
+               sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
+                           desc->bd_enc_iov[i].kiov_len,
+                           desc->bd_enc_iov[i].kiov_offset);
+               dst = src;
+               if (desc->bd_iov[i].kiov_len % blocksize == 0)
+                       sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
 
-                rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
-                                                    src.length);
+               rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+                                                src.length);
                 if (rc) {
                         CERROR("error to decrypt page: %d\n", rc);
                         return rc;
                 }
 
-                if (desc->bd_iov[i].kiov_offset % blocksize) {
-                        memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
+                if (desc->bd_iov[i].kiov_len % blocksize != 0) {
+                       memcpy(page_address(desc->bd_iov[i].kiov_page) +
                                desc->bd_iov[i].kiov_offset,
-                               cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
+                              page_address(desc->bd_enc_iov[i].kiov_page) +
                                desc->bd_iov[i].kiov_offset,
                                desc->bd_iov[i].kiov_len);
                 }
@@ -1154,12 +1124,29 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
                 pt_nob += desc->bd_iov[i].kiov_len;
         }
 
+        if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+                CERROR("%d cipher text transferred but only %d decrypted\n",
+                       desc->bd_nob_transferred, ct_nob);
+                return -EFAULT;
+        }
+
+        if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+                CERROR("%d plain text expected but only %d received\n",
+                       desc->bd_nob, pt_nob);
+                return -EFAULT;
+        }
+
+        /* if needed, clear up the rest unused iovs */
+        if (adj_nob)
+                while (i < desc->bd_iov_count)
+                        desc->bd_iov[i++].kiov_len = 0;
+
         /* decrypt tail (krb5 header) */
         buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
         buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
 
-        rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
-                                            &dst, &src, sizeof(*khdr));
+       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+                                        sizeof(*khdr));
         if (rc) {
                 CERROR("error to decrypt tail: %d\n", rc);
                 return rc;
@@ -1193,7 +1180,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
                 ke->ke_conf_size >=
-                ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+               crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
 
         /*
          * final token format:
@@ -1208,7 +1195,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
         fill_krb5_header(kctx, khdr, 1);
 
         /* generate confounder */
-        get_random_bytes(conf, ke->ke_conf_size);
+        cfs_get_random_bytes(conf, ke->ke_conf_size);
 
         /* get encryption blocksize. note kc_keye might not associated with
          * a tfm, currently only for arcfour-hmac */
@@ -1217,7 +1204,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                 blocksize = 1;
         } else {
                 LASSERT(kctx->kc_keye.kb_tfm);
-                blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
         }
         LASSERT(blocksize <= ke->ke_conf_size);
 
@@ -1262,23 +1249,23 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
         cipher.len = token->len - sizeof(*khdr);
         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
 
-        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                rawobj_t                 arc4_keye;
-                struct ll_crypto_cipher *arc4_tfm;
+       if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+               rawobj_t                 arc4_keye;
+               struct crypto_blkcipher *arc4_tfm;
 
-                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
-                                       NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
-                        CERROR("failed to obtain arc4 enc key\n");
-                        GOTO(arc4_out, rc = -EACCES);
-                }
+               if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
+                                      NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+                       CERROR("failed to obtain arc4 enc key\n");
+                       GOTO(arc4_out, rc = -EACCES);
+               }
 
-                arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
-                if (arc4_tfm == NULL) {
-                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
-                        GOTO(arc4_out_key, rc = -EACCES);
-                }
+               arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               if (IS_ERR(arc4_tfm)) {
+                       CERROR("failed to alloc tfm arc4 in ECB mode\n");
+                       GOTO(arc4_out_key, rc = -EACCES);
+               }
 
-                if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+               if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
                                                arc4_keye.len)) {
                         CERROR("failed to set arc4 key, len %d\n",
                                arc4_keye.len);
@@ -1288,7 +1275,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
                                           3, data_desc, &cipher, 1);
 arc4_out_tfm:
-                ll_crypto_free_blkcipher(arc4_tfm);
+               crypto_free_blkcipher(arc4_tfm);
 arc4_out_key:
                 rawobj_free(&arc4_keye);
 arc4_out:
@@ -1326,7 +1313,7 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
         LASSERT(desc->bd_enc_iov);
         LASSERT(kctx->kc_keye.kb_tfm);
 
-        blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+       blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
 
         for (i = 0; i < desc->bd_iov_count; i++) {
                 LASSERT(desc->bd_enc_iov[i].kiov_page);
@@ -1378,7 +1365,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         fill_krb5_header(kctx, khdr, 1);
 
         /* generate confounder */
-        get_random_bytes(conf, ke->ke_conf_size);
+        cfs_get_random_bytes(conf, ke->ke_conf_size);
 
         /* get encryption blocksize. note kc_keye might not associated with
          * a tfm, currently only for arcfour-hmac */
@@ -1387,7 +1374,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
                 blocksize = 1;
         } else {
                 LASSERT(kctx->kc_keye.kb_tfm);
-                blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
         }
 
         /*
@@ -1497,7 +1484,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                 blocksize = 1;
         } else {
                 LASSERT(kctx->kc_keye.kb_tfm);
-                blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
         }
 
         /* expected token layout:
@@ -1524,7 +1511,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
         }
 
         /* decrypting */
-        OBD_ALLOC(tmpbuf, bodysize);
+        OBD_ALLOC_LARGE(tmpbuf, bodysize);
         if (!tmpbuf)
                 return GSS_S_FAILURE;
 
@@ -1535,26 +1522,26 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
         plain_out.data = tmpbuf;
         plain_out.len = bodysize;
 
-        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                rawobj_t                 arc4_keye;
-                struct ll_crypto_cipher *arc4_tfm;
+       if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+               rawobj_t                 arc4_keye;
+               struct crypto_blkcipher *arc4_tfm;
 
-                cksum.data = token->data + token->len - ke->ke_hash_size;
-                cksum.len = ke->ke_hash_size;
+               cksum.data = token->data + token->len - ke->ke_hash_size;
+               cksum.len = ke->ke_hash_size;
 
-                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
-                                       NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
-                        CERROR("failed to obtain arc4 enc key\n");
-                        GOTO(arc4_out, rc = -EACCES);
-                }
+               if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
+                                      NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+                       CERROR("failed to obtain arc4 enc key\n");
+                       GOTO(arc4_out, rc = -EACCES);
+               }
 
-                arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
-                if (arc4_tfm == NULL) {
-                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
-                        GOTO(arc4_out_key, rc = -EACCES);
-                }
+               arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               if (IS_ERR(arc4_tfm)) {
+                       CERROR("failed to alloc tfm arc4 in ECB mode\n");
+                       GOTO(arc4_out_key, rc = -EACCES);
+               }
 
-                if (ll_crypto_blkcipher_setkey(arc4_tfm,
+               if (crypto_blkcipher_setkey(arc4_tfm,
                                          arc4_keye.data, arc4_keye.len)) {
                         CERROR("failed to set arc4 key, len %d\n",
                                arc4_keye.len);
@@ -1564,7 +1551,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
                                           1, &cipher_in, &plain_out, 0);
 arc4_out_tfm:
-                ll_crypto_free_blkcipher(arc4_tfm);
+               crypto_free_blkcipher(arc4_tfm);
 arc4_out_key:
                 rawobj_free(&arc4_keye);
 arc4_out:
@@ -1621,7 +1608,7 @@ arc4_out:
 
         major = GSS_S_COMPLETE;
 out_free:
-        OBD_FREE(tmpbuf, bodysize);
+        OBD_FREE_LARGE(tmpbuf, bodysize);
         rawobj_free(&cksum);
         return major;
 }
@@ -1629,7 +1616,7 @@ out_free:
 static
 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
                                struct ptlrpc_bulk_desc *desc,
-                               rawobj_t *token)
+                               rawobj_t *token, int adj_nob)
 {
         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
@@ -1663,7 +1650,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
                 LBUG();
         } else {
                 LASSERT(kctx->kc_keye.kb_tfm);
-                blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
         }
         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
 
@@ -1685,7 +1672,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         plain.len = cipher.len;
 
         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
-                               desc, &cipher, &plain);
+                               desc, &cipher, &plain, adj_nob);
         if (rc)
                 return GSS_S_DEFECTIVE_TOKEN;
 
@@ -1786,17 +1773,17 @@ static struct gss_api_mech gss_kerberos_mech = {
 
 int __init init_kerberos_module(void)
 {
-        int status;
+       int status;
 
-        spin_lock_init(&krb5_seq_lock);
+       spin_lock_init(&krb5_seq_lock);
 
-        status = lgss_mech_register(&gss_kerberos_mech);
-        if (status)
-                CERROR("Failed to register kerberos gss mechanism!\n");
-        return status;
+       status = lgss_mech_register(&gss_kerberos_mech);
+       if (status)
+               CERROR("Failed to register kerberos gss mechanism!\n");
+       return status;
 }
 
-void __exit cleanup_kerberos_module(void)
+void cleanup_kerberos_module(void)
 {
         lgss_mech_unregister(&gss_kerberos_mech);
 }