Whamcloud - gitweb
LU-6210 gss: Change positional struct initializers to C99
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
index b11b267..000d7a8 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  *
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2015, Intel Corporation.
  *
  * Author: Eric Mei <ericm@clusterfs.com>
  */
  */
 
 #define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/crypto.h>
 #include <linux/mutex.h>
-#else
-#include <liblustre.h>
-#endif
 
 #include <obd.h>
 #include <obd_class.h>
@@ -72,8 +68,9 @@
 #include "gss_api.h"
 #include "gss_asn1.h"
 #include "gss_krb5.h"
+#include "gss_crypto.h"
 
-static cfs_spinlock_t krb5_seq_lock;
+static spinlock_t krb5_seq_lock;
 
 struct krb5_enctype {
         char           *ke_dispname;
@@ -91,51 +88,45 @@ struct krb5_enctype {
  * yet. this need to be fixed in the future.
  */
 static struct krb5_enctype enctypes[] = {
-        [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
-                "des-cbc-md5",
-                "cbc(des)",
-                "md5",
-                0,
-                16,
-                8,
-                0,
-        },
-        [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
-                "des3-hmac-sha1",
-                "cbc(des3_ede)",
-                "hmac(sha1)",
-                0,
-                20,
-                8,
-                1,
-        },
-        [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
-                "aes128-cts-hmac-sha1-96",
-                "cbc(aes)",
-                "hmac(sha1)",
-                0,
-                12,
-                16,
-                1,
-        },
-        [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
-                "aes256-cts-hmac-sha1-96",
-                "cbc(aes)",
-                "hmac(sha1)",
-                0,
-                12,
-                16,
-                1,
-        },
-        [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
-                "arcfour-hmac-md5",
-                "ecb(arc4)",
-                "hmac(md5)",
-                0,
-                16,
-                8,
-                1,
-        },
+       [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
+               .ke_dispname    = "des-cbc-md5",
+               .ke_enc_name    = "cbc(des)",
+               .ke_hash_name   = "md5",
+               .ke_hash_size   = 16,
+               .ke_conf_size   = 8,
+       },
+       [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
+               .ke_dispname    = "des3-hmac-sha1",
+               .ke_enc_name    = "cbc(des3_ede)",
+               .ke_hash_name   = "hmac(sha1)",
+               .ke_hash_size   = 20,
+               .ke_conf_size   = 8,
+               .ke_hash_hmac   = 1,
+       },
+       [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
+               .ke_dispname    = "aes128-cts-hmac-sha1-96",
+               .ke_enc_name    = "cbc(aes)",
+               .ke_hash_name   = "hmac(sha1)",
+               .ke_hash_size   = 12,
+               .ke_conf_size   = 16,
+               .ke_hash_hmac   = 1,
+       },
+       [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
+               .ke_dispname    = "aes256-cts-hmac-sha1-96",
+               .ke_enc_name    = "cbc(aes)",
+               .ke_hash_name   = "hmac(sha1)",
+               .ke_hash_size   = 12,
+               .ke_conf_size   = 16,
+               .ke_hash_hmac   = 1,
+       },
+       [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
+               .ke_dispname    = "arcfour-hmac-md5",
+               .ke_enc_name    = "ecb(arc4)",
+               .ke_hash_name   = "hmac(md5)",
+               .ke_hash_size   = 16,
+               .ke_conf_size   = 8,
+               .ke_hash_hmac   = 1,
+       }
 };
 
 #define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
@@ -149,25 +140,6 @@ static const char * enctype2str(__u32 enctype)
 }
 
 static
-int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
-{
-       kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
-       if (IS_ERR(kb->kb_tfm)) {
-               CERROR("failed to alloc tfm: %s, mode %d\n",
-                      alg_name, alg_mode);
-               return -1;
-       }
-
-        if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
-                CERROR("failed to set %s key, len %d\n",
-                       alg_name, kb->kb_key.len);
-                return -1;
-        }
-
-        return 0;
-}
-
-static
 int krb5_init_keys(struct krb5_ctx *kctx)
 {
         struct krb5_enctype *ke;
@@ -180,175 +152,104 @@ int krb5_init_keys(struct krb5_ctx *kctx)
 
         ke = &enctypes[kctx->kc_enctype];
 
-        /* tfm arc4 is stateful, user should alloc-use-free by his own */
-        if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
-            keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
-                return -1;
-
-        /* tfm hmac is stateful, user should alloc-use-free by his own */
-        if (ke->ke_hash_hmac == 0 &&
-            keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
-                return -1;
-        if (ke->ke_hash_hmac == 0 &&
-            keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
-                return -1;
-
-        return 0;
-}
-
-static
-void keyblock_free(struct krb5_keyblock *kb)
-{
-        rawobj_free(&kb->kb_key);
-        if (kb->kb_tfm)
-                ll_crypto_free_blkcipher(kb->kb_tfm);
-}
-
-static
-int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
-{
-        return rawobj_dup(&new->kb_key, &kb->kb_key);
-}
-
-static
-int get_bytes(char **ptr, const char *end, void *res, int len)
-{
-        char *p, *q;
-        p = *ptr;
-        q = p + len;
-        if (q > end || q < p)
-                return -1;
-        memcpy(res, p, len);
-        *ptr = q;
-        return 0;
-}
-
-static
-int get_rawobj(char **ptr, const char *end, rawobj_t *res)
-{
-        char   *p, *q;
-        __u32   len;
-
-        p = *ptr;
-        if (get_bytes(&p, end, &len, sizeof(len)))
-                return -1;
-
-        q = p + len;
-        if (q > end || q < p)
-                return -1;
-
-        OBD_ALLOC_LARGE(res->data, len);
-        if (!res->data)
-                return -1;
-
-        res->len = len;
-        memcpy(res->data, p, len);
-        *ptr = q;
-        return 0;
-}
-
-static
-int get_keyblock(char **ptr, const char *end,
-                 struct krb5_keyblock *kb, __u32 keysize)
-{
-        char *buf;
-
-        OBD_ALLOC_LARGE(buf, keysize);
-        if (buf == NULL)
-                return -1;
+       /* tfm arc4 is stateful, user should alloc-use-free by his own */
+       if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
+           gss_keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
+               return -1;
 
-        if (get_bytes(ptr, end, buf, keysize)) {
-                OBD_FREE_LARGE(buf, keysize);
-                return -1;
-        }
+       /* tfm hmac is stateful, user should alloc-use-free by his own */
+       if (ke->ke_hash_hmac == 0 &&
+           gss_keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
+               return -1;
+       if (ke->ke_hash_hmac == 0 &&
+           gss_keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
+               return -1;
 
-        kb->kb_key.len = keysize;
-        kb->kb_key.data = buf;
         return 0;
 }
 
 static
 void delete_context_kerberos(struct krb5_ctx *kctx)
 {
-        rawobj_free(&kctx->kc_mech_used);
+       rawobj_free(&kctx->kc_mech_used);
 
-        keyblock_free(&kctx->kc_keye);
-        keyblock_free(&kctx->kc_keyi);
-        keyblock_free(&kctx->kc_keyc);
+       gss_keyblock_free(&kctx->kc_keye);
+       gss_keyblock_free(&kctx->kc_keyi);
+       gss_keyblock_free(&kctx->kc_keyc);
 }
 
 static
 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
 {
-        unsigned int    tmp_uint, keysize;
+       unsigned int    tmp_uint, keysize;
 
-        /* seed_init flag */
-        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
-                goto out_err;
-        kctx->kc_seed_init = (tmp_uint != 0);
+       /* seed_init flag */
+       if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
+               goto out_err;
+       kctx->kc_seed_init = (tmp_uint != 0);
 
-        /* seed */
-        if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
-                goto out_err;
+       /* seed */
+       if (gss_get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
+               goto out_err;
 
-        /* sign/seal algorithm, not really used now */
-        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
-            get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
-                goto out_err;
+       /* sign/seal algorithm, not really used now */
+       if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
+           gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
+               goto out_err;
 
-        /* end time */
-        if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
-                goto out_err;
+       /* end time */
+       if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
+               goto out_err;
 
-        /* seq send */
-        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
-                goto out_err;
-        kctx->kc_seq_send = tmp_uint;
+       /* seq send */
+       if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
+               goto out_err;
+       kctx->kc_seq_send = tmp_uint;
 
-        /* mech oid */
-        if (get_rawobj(&p, end, &kctx->kc_mech_used))
-                goto out_err;
+       /* mech oid */
+       if (gss_get_rawobj(&p, end, &kctx->kc_mech_used))
+               goto out_err;
 
-        /* old style enc/seq keys in format:
-         *   - enctype (u32)
-         *   - keysize (u32)
-         *   - keydata
-         * we decompose them to fit into the new context
-         */
+       /* old style enc/seq keys in format:
+        *   - enctype (u32)
+        *   - keysize (u32)
+        *   - keydata
+        * we decompose them to fit into the new context
+        */
 
-        /* enc key */
-        if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
-                goto out_err;
+       /* enc key */
+       if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
+               goto out_err;
 
-        if (get_bytes(&p, end, &keysize, sizeof(keysize)))
-                goto out_err;
+       if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
+               goto out_err;
 
-        if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
-                goto out_err;
+       if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
+               goto out_err;
 
-        /* seq key */
-        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
-            tmp_uint != kctx->kc_enctype)
-                goto out_err;
+       /* seq key */
+       if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
+           tmp_uint != kctx->kc_enctype)
+               goto out_err;
 
-        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
-            tmp_uint != keysize)
-                goto out_err;
+       if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
+           tmp_uint != keysize)
+               goto out_err;
 
-        if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
-                goto out_err;
+       if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
+               goto out_err;
 
-        /* old style fallback */
-        if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
-                goto out_err;
+       /* old style fallback */
+       if (gss_keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
+               goto out_err;
 
-        if (p != end)
-                goto out_err;
+       if (p != end)
+               goto out_err;
 
-        CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
-        return 0;
+       CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
+       return 0;
 out_err:
-        return GSS_S_FAILURE;
+       return GSS_S_FAILURE;
 }
 
 /* Flags for version 2 context flags */
@@ -359,58 +260,59 @@ out_err:
 static
 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
 {
-        unsigned int    tmp_uint, keysize;
-
-        /* end time */
-        if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
-                goto out_err;
-
-        /* flags */
-        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
-                goto out_err;
-
-        if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
-                kctx->kc_initiate = 1;
-        if (tmp_uint & KRB5_CTX_FLAG_CFX)
-                kctx->kc_cfx = 1;
-        if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
-                kctx->kc_have_acceptor_subkey = 1;
-
-        /* seq send */
-        if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
-                goto out_err;
-
-        /* enctype */
-        if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
-                goto out_err;
-
-        /* size of each key */
-        if (get_bytes(&p, end, &keysize, sizeof(keysize)))
-                goto out_err;
-
-        /* number of keys - should always be 3 */
-        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
-                goto out_err;
-
-        if (tmp_uint != 3) {
-                CERROR("Invalid number of keys: %u\n", tmp_uint);
-                goto out_err;
-        }
-
-        /* ke */
-        if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
-                goto out_err;
-        /* ki */
-        if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
-                goto out_err;
-        /* ki */
-        if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
-                goto out_err;
+       unsigned int    tmp_uint, keysize;
+
+       /* end time */
+       if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
+               goto out_err;
+
+       /* flags */
+       if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
+               goto out_err;
+
+       if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
+               kctx->kc_initiate = 1;
+       if (tmp_uint & KRB5_CTX_FLAG_CFX)
+               kctx->kc_cfx = 1;
+       if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
+               kctx->kc_have_acceptor_subkey = 1;
+
+       /* seq send */
+       if (gss_get_bytes(&p, end, &kctx->kc_seq_send,
+           sizeof(kctx->kc_seq_send)))
+               goto out_err;
+
+       /* enctype */
+       if (gss_get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
+               goto out_err;
+
+       /* size of each key */
+       if (gss_get_bytes(&p, end, &keysize, sizeof(keysize)))
+               goto out_err;
+
+       /* number of keys - should always be 3 */
+       if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
+               goto out_err;
+
+       if (tmp_uint != 3) {
+               CERROR("Invalid number of keys: %u\n", tmp_uint);
+               goto out_err;
+       }
 
-        CDEBUG(D_SEC, "succesfully imported v2 context\n");
-        return 0;
+       /* ke */
+       if (gss_get_keyblock(&p, end, &kctx->kc_keye, keysize))
+               goto out_err;
+       /* ki */
+       if (gss_get_keyblock(&p, end, &kctx->kc_keyi, keysize))
+               goto out_err;
+       /* ki */
+       if (gss_get_keyblock(&p, end, &kctx->kc_keyc, keysize))
+               goto out_err;
+
+       CDEBUG(D_SEC, "successfully imported v2 context\n");
+       return 0;
 out_err:
-        return GSS_S_FAILURE;
+       return GSS_S_FAILURE;
 }
 
 /*
@@ -422,15 +324,15 @@ static
 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
                                       struct gss_ctx *gctx)
 {
-        struct krb5_ctx *kctx;
-        char            *p = (char *) inbuf->data;
-        char            *end = (char *) (inbuf->data + inbuf->len);
-        unsigned int     tmp_uint, rc;
-
-        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
-                CERROR("Fail to read version\n");
-                return GSS_S_FAILURE;
-        }
+       struct krb5_ctx *kctx;
+       char *p = (char *)inbuf->data;
+       char *end = (char *)(inbuf->data + inbuf->len);
+       unsigned int tmp_uint, rc;
+
+       if (gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
+               CERROR("Fail to read version\n");
+               return GSS_S_FAILURE;
+       }
 
         /* only support 0, 1 for the moment */
         if (tmp_uint > 2) {
@@ -488,17 +390,17 @@ __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
         if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
                 goto out_err;
 
-        if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
-                goto out_err;
-        if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
-                goto out_err;
-        if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
-                goto out_err;
+       if (gss_keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
+               goto out_err;
+       if (gss_keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
+               goto out_err;
+       if (gss_keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
+               goto out_err;
         if (krb5_init_keys(knew))
                 goto out_err;
 
         gctx_new->internal_ctx_id = knew;
-        CDEBUG(D_SEC, "succesfully copied reverse context\n");
+       CDEBUG(D_SEC, "successfully copied reverse context\n");
         return GSS_S_COMPLETE;
 
 out_err:
@@ -513,7 +415,7 @@ __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
 {
         struct krb5_ctx *kctx = gctx->internal_ctx_id;
 
-        *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
+       *endtime = (unsigned long)((__u32) kctx->kc_endtime);
         return GSS_S_COMPLETE;
 }
 
@@ -526,229 +428,50 @@ void gss_delete_sec_context_kerberos(void *internal_ctx)
         OBD_FREE_PTR(kctx);
 }
 
-static
-void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
-{
-        sg->page = virt_to_page(ptr);
-        sg->offset = offset_in_page(ptr);
-        sg->length = len;
-}
-
-static
-__u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
-                   int decrypt,
-                   void * iv,
-                   void * in,
-                   void * out,
-                   int length)
-{
-        struct blkcipher_desc desc;
-        struct scatterlist    sg;
-        __u8 local_iv[16] = {0};
-        __u32 ret = -EINVAL;
-
-        LASSERT(tfm);
-        desc.tfm  = tfm;
-        desc.info = local_iv;
-        desc.flags= 0;
-
-        if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
-                CERROR("output length %d mismatch blocksize %d\n",
-                       length, ll_crypto_blkcipher_blocksize(tfm));
-                goto out;
-        }
-
-        if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
-                CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
-                goto out;
-        }
-
-        if (iv)
-                memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
-
-        memcpy(out, in, length);
-        buf_to_sg(&sg, out, length);
-
-        if (decrypt)
-                ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
-        else
-                ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
-
-out:
-        return(ret);
-}
-
-#ifdef HAVE_ASYNC_BLOCK_CIPHER
-
-static inline
-int krb5_digest_hmac(struct ll_crypto_hash *tfm,
-                     rawobj_t *key,
-                     struct krb5_header *khdr,
-                     int msgcnt, rawobj_t *msgs,
-                     int iovcnt, lnet_kiov_t *iovs,
-                     rawobj_t *cksum)
-{
-        struct hash_desc   desc;
-        struct scatterlist sg[1];
-        int                i;
-
-        ll_crypto_hash_setkey(tfm, key->data, key->len);
-        desc.tfm  = tfm;
-        desc.flags= 0;
-
-        ll_crypto_hash_init(&desc);
-
-        for (i = 0; i < msgcnt; i++) {
-                if (msgs[i].len == 0)
-                        continue;
-                buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-                ll_crypto_hash_update(&desc, sg, msgs[i].len);
-        }
-
-        for (i = 0; i < iovcnt; i++) {
-                if (iovs[i].kiov_len == 0)
-                        continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
-                ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
-        }
-
-        if (khdr) {
-                buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-                ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
-        }
-
-        return ll_crypto_hash_final(&desc, cksum->data);
-}
-
-#else /* ! HAVE_ASYNC_BLOCK_CIPHER */
-
-static inline
-int krb5_digest_hmac(struct ll_crypto_hash *tfm,
-                     rawobj_t *key,
-                     struct krb5_header *khdr,
-                     int msgcnt, rawobj_t *msgs,
-                     int iovcnt, lnet_kiov_t *iovs,
-                     rawobj_t *cksum)
-{
-        struct scatterlist sg[1];
-        __u32              keylen = key->len, i;
-
-        crypto_hmac_init(tfm, key->data, &keylen);
-
-        for (i = 0; i < msgcnt; i++) {
-                if (msgs[i].len == 0)
-                        continue;
-                buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-                crypto_hmac_update(tfm, sg, 1);
-        }
-
-        for (i = 0; i < iovcnt; i++) {
-                if (iovs[i].kiov_len == 0)
-                        continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
-                crypto_hmac_update(tfm, sg, 1);
-        }
-
-        if (khdr) {
-                buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-                crypto_hmac_update(tfm, sg, 1);
-        }
-
-        crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
-        return 0;
-}
-
-#endif /* HAVE_ASYNC_BLOCK_CIPHER */
-
-static inline
-int krb5_digest_norm(struct ll_crypto_hash *tfm,
-                     struct krb5_keyblock *kb,
-                     struct krb5_header *khdr,
-                     int msgcnt, rawobj_t *msgs,
-                     int iovcnt, lnet_kiov_t *iovs,
-                     rawobj_t *cksum)
-{
-        struct hash_desc   desc;
-        struct scatterlist sg[1];
-        int                i;
-
-        LASSERT(kb->kb_tfm);
-        desc.tfm  = tfm;
-        desc.flags= 0;
-
-        ll_crypto_hash_init(&desc);
-
-        for (i = 0; i < msgcnt; i++) {
-                if (msgs[i].len == 0)
-                        continue;
-                buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-                ll_crypto_hash_update(&desc, sg, msgs[i].len);
-        }
-
-        for (i = 0; i < iovcnt; i++) {
-                if (iovs[i].kiov_len == 0)
-                        continue;
-                sg[0].page = iovs[i].kiov_page;
-                sg[0].offset = iovs[i].kiov_offset;
-                sg[0].length = iovs[i].kiov_len;
-                ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
-        }
-
-        if (khdr) {
-                buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-                ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
-        }
-
-        ll_crypto_hash_final(&desc, cksum->data);
-
-        return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
-                            cksum->data, cksum->len);
-}
-
 /*
  * compute (keyed/keyless) checksum against the plain text which appended
  * with krb5 wire token header.
  */
 static
 __s32 krb5_make_checksum(__u32 enctype,
-                         struct krb5_keyblock *kb,
-                         struct krb5_header *khdr,
-                         int msgcnt, rawobj_t *msgs,
-                         int iovcnt, lnet_kiov_t *iovs,
-                         rawobj_t *cksum)
+                        struct gss_keyblock *kb,
+                        struct krb5_header *khdr,
+                        int msgcnt, rawobj_t *msgs,
+                        int iovcnt, lnet_kiov_t *iovs,
+                        rawobj_t *cksum)
 {
         struct krb5_enctype   *ke = &enctypes[enctype];
-        struct ll_crypto_hash *tfm;
+       struct crypto_hash    *tfm;
+       rawobj_t               hdr;
         __u32                  code = GSS_S_FAILURE;
         int                    rc;
 
-        if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
+       if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
                 return GSS_S_FAILURE;
         }
 
-        cksum->len = ll_crypto_hash_digestsize(tfm);
+       cksum->len = crypto_hash_digestsize(tfm);
         OBD_ALLOC_LARGE(cksum->data, cksum->len);
         if (!cksum->data) {
                 cksum->len = 0;
                 goto out_tfm;
         }
 
+       hdr.data = (__u8 *)khdr;
+       hdr.len = sizeof(*khdr);
+
         if (ke->ke_hash_hmac)
-                rc = krb5_digest_hmac(tfm, &kb->kb_key,
-                                      khdr, msgcnt, msgs, iovcnt, iovs, cksum);
+               rc = gss_digest_hmac(tfm, &kb->kb_key,
+                                    &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
         else
-                rc = krb5_digest_norm(tfm, kb,
-                                      khdr, msgcnt, msgs, iovcnt, iovs, cksum);
+               rc = gss_digest_norm(tfm, kb,
+                                    &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
 
         if (rc == 0)
                 code = GSS_S_COMPLETE;
 out_tfm:
-        ll_crypto_free_hash(tfm);
+       crypto_free_hash(tfm);
         return code;
 }
 
@@ -773,9 +496,9 @@ static void fill_krb5_header(struct krb5_ctx *kctx,
         }
 
         khdr->kh_filler = 0xff;
-        cfs_spin_lock(&krb5_seq_lock);
-        khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
-        cfs_spin_unlock(&krb5_seq_lock);
+       spin_lock(&krb5_seq_lock);
+       khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+       spin_unlock(&krb5_seq_lock);
 }
 
 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
@@ -835,7 +558,7 @@ __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
 
         /* fill krb5 header */
         LASSERT(token->len >= sizeof(*khdr));
-        khdr = (struct krb5_header *) token->data;
+       khdr = (struct krb5_header *)token->data;
         fill_krb5_header(kctx, khdr, 0);
 
         /* checksum */
@@ -872,7 +595,7 @@ __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
                 return GSS_S_DEFECTIVE_TOKEN;
         }
 
-        khdr = (struct krb5_header *) token->data;
+       khdr = (struct krb5_header *)token->data;
 
         major = verify_krb5_header(kctx, khdr, 0);
         if (major != GSS_S_COMPLETE) {
@@ -904,87 +627,11 @@ __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
         return GSS_S_COMPLETE;
 }
 
-static
-int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
-{
-        int padding;
-
-        padding = (blocksize - (msg->len & (blocksize - 1))) &
-                  (blocksize - 1);
-        if (!padding)
-                return 0;
-
-        if (msg->len + padding > msg_buflen) {
-                CERROR("bufsize %u too small: datalen %u, padding %u\n",
-                        msg_buflen, msg->len, padding);
-                return -EINVAL;
-        }
-
-        memset(msg->data + msg->len, padding, padding);
-        msg->len += padding;
-        return 0;
-}
-
-static
-int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
-                         int mode_ecb,
-                         int inobj_cnt,
-                         rawobj_t *inobjs,
-                         rawobj_t *outobj,
-                         int enc)
-{
-        struct blkcipher_desc desc;
-        struct scatterlist    src, dst;
-        __u8                  local_iv[16] = {0}, *buf;
-        __u32                 datalen = 0;
-        int                   i, rc;
-        ENTRY;
-
-        buf = outobj->data;
-        desc.tfm  = tfm;
-        desc.info = local_iv;
-        desc.flags = 0;
-
-        for (i = 0; i < inobj_cnt; i++) {
-                LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
-
-                buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
-                buf_to_sg(&dst, buf, outobj->len - datalen);
-
-                if (mode_ecb) {
-                        if (enc)
-                                rc = ll_crypto_blkcipher_encrypt(
-                                        &desc, &dst, &src, src.length);
-                        else
-                                rc = ll_crypto_blkcipher_decrypt(
-                                        &desc, &dst, &src, src.length);
-                } else {
-                        if (enc)
-                                rc = ll_crypto_blkcipher_encrypt_iv(
-                                        &desc, &dst, &src, src.length);
-                        else
-                                rc = ll_crypto_blkcipher_decrypt_iv(
-                                        &desc, &dst, &src, src.length);
-                }
-
-                if (rc) {
-                        CERROR("encrypt error %d\n", rc);
-                        RETURN(rc);
-                }
-
-                datalen += inobjs[i].len;
-                buf += inobjs[i].len;
-        }
-
-        outobj->len = datalen;
-        RETURN(0);
-}
-
 /*
  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
  */
 static
-int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
                       struct krb5_header *khdr,
                       char *confounder,
                       struct ptlrpc_bulk_desc *desc,
@@ -994,12 +641,14 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
         struct blkcipher_desc   ciph_desc;
         __u8                    local_iv[16] = {0};
         struct scatterlist      src, dst;
+       struct sg_table         sg_src, sg_dst;
         int                     blocksize, i, rc, nob = 0;
 
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
         LASSERT(desc->bd_iov_count);
-        LASSERT(desc->bd_enc_iov);
+       LASSERT(GET_ENC_KIOV(desc));
 
-        blocksize = ll_crypto_blkcipher_blocksize(tfm);
+       blocksize = crypto_blkcipher_blocksize(tfm);
         LASSERT(blocksize > 1);
         LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
@@ -1008,10 +657,22 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
         ciph_desc.flags = 0;
 
         /* encrypt confounder */
-        buf_to_sg(&src, confounder, blocksize);
-        buf_to_sg(&dst, cipher->data, blocksize);
+       rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
+       if (rc != 0)
+               return rc;
+
+       rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data, blocksize);
+       if (rc != 0) {
+               gss_teardown_sgtable(&sg_src);
+               return rc;
+       }
+
+       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
+                                        sg_src.sgl, blocksize);
+
+       gss_teardown_sgtable(&sg_dst);
+       gss_teardown_sgtable(&sg_src);
 
-        rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
         if (rc) {
                 CERROR("error to encrypt confounder: %d\n", rc);
                 return rc;
@@ -1019,22 +680,22 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
 
         /* encrypt clear pages */
         for (i = 0; i < desc->bd_iov_count; i++) {
-                src.page = desc->bd_iov[i].kiov_page;
-                src.offset = desc->bd_iov[i].kiov_offset;
-                src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
-                             (~(blocksize - 1));
-
-                if (adj_nob)
-                        nob += src.length;
-
-                dst.page = desc->bd_enc_iov[i].kiov_page;
-                dst.offset = src.offset;
-                dst.length = src.length;
-
-                desc->bd_enc_iov[i].kiov_offset = dst.offset;
-                desc->bd_enc_iov[i].kiov_len = dst.length;
-
-                rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+               sg_init_table(&src, 1);
+               sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
+                           (BD_GET_KIOV(desc, i).kiov_len +
+                               blocksize - 1) &
+                           (~(blocksize - 1)),
+                           BD_GET_KIOV(desc, i).kiov_offset);
+               if (adj_nob)
+                       nob += src.length;
+               sg_init_table(&dst, 1);
+               sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
+                           src.length, src.offset);
+
+               BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
+               BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
+
+               rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
                                                     src.length);
                 if (rc) {
                         CERROR("error to encrypt page: %d\n", rc);
@@ -1043,11 +704,23 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
         }
 
         /* encrypt krb5 header */
-        buf_to_sg(&src, khdr, sizeof(*khdr));
-        buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+       rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
+       if (rc != 0)
+               return rc;
+
+       rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
+                          sizeof(*khdr));
+       if (rc != 0) {
+               gss_teardown_sgtable(&sg_src);
+               return rc;
+       }
+
+       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
+                                        sizeof(*khdr));
+
+       gss_teardown_sgtable(&sg_dst);
+       gss_teardown_sgtable(&sg_src);
 
-        rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
-                                            &dst, &src, sizeof(*khdr));
         if (rc) {
                 CERROR("error to encrypt krb5 header: %d\n", rc);
                 return rc;
@@ -1067,16 +740,18 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
  * plain text size.
  * - for client read: we don't know data size for each page, so
  *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
- *   be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
+ *   be smaller, so we need to adjust it according to
+ *   bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
  *   this means we DO NOT support the situation that server send an odd size
  *   data in a page which is not the last one.
  * - for server write: we knows exactly data size for each page being expected,
  *   thus kiov_len is accurate already, so we should not adjust it at all.
- *   and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
+ *   and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
+ *   round_up(bd_iov[]->kiov_len) which
  *   should have been done by prep_bulk().
  */
 static
-int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                       struct krb5_header *khdr,
                       struct ptlrpc_bulk_desc *desc,
                       rawobj_t *cipher,
@@ -1086,14 +761,16 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
         struct blkcipher_desc   ciph_desc;
         __u8                    local_iv[16] = {0};
         struct scatterlist      src, dst;
+       struct sg_table         sg_src, sg_dst;
         int                     ct_nob = 0, pt_nob = 0;
         int                     blocksize, i, rc;
 
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
         LASSERT(desc->bd_iov_count);
-        LASSERT(desc->bd_enc_iov);
+       LASSERT(GET_ENC_KIOV(desc));
         LASSERT(desc->bd_nob_transferred);
 
-        blocksize = ll_crypto_blkcipher_blocksize(tfm);
+       blocksize = crypto_blkcipher_blocksize(tfm);
         LASSERT(blocksize > 1);
         LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
@@ -1107,71 +784,92 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
         }
 
         /* decrypt head (confounder) */
-        buf_to_sg(&src, cipher->data, blocksize);
-        buf_to_sg(&dst, plain->data, blocksize);
+       rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
+       if (rc != 0)
+               return rc;
+
+       rc = gss_setup_sgtable(&sg_dst, &dst, plain->data, blocksize);
+       if (rc != 0) {
+               gss_teardown_sgtable(&sg_src);
+               return rc;
+       }
+
+       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
+                                        sg_src.sgl, blocksize);
+
+       gss_teardown_sgtable(&sg_dst);
+       gss_teardown_sgtable(&sg_src);
 
-        rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
         if (rc) {
                 CERROR("error to decrypt confounder: %d\n", rc);
                 return rc;
         }
 
-        for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
-             i++) {
-                if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
-                    desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
-                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
-                               i, desc->bd_enc_iov[i].kiov_offset,
-                               desc->bd_enc_iov[i].kiov_len, blocksize);
-                        return -EFAULT;
-                }
-
-                if (adj_nob) {
-                        if (ct_nob + desc->bd_enc_iov[i].kiov_len >
-                            desc->bd_nob_transferred)
-                                desc->bd_enc_iov[i].kiov_len =
-                                        desc->bd_nob_transferred - ct_nob;
-
-                        desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
-                        if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
-                                desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
-                } else {
-                        /* this should be guaranteed by LNET */
-                        LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
-                                desc->bd_nob_transferred);
-                        LASSERT(desc->bd_iov[i].kiov_len <=
-                                desc->bd_enc_iov[i].kiov_len);
-                }
+       for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
+            i++) {
+               if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
+                   != 0 ||
+                   BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
+                   != 0) {
+                       CERROR("page %d: odd offset %u len %u, blocksize %d\n",
+                              i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
+                              BD_GET_ENC_KIOV(desc, i).kiov_len,
+                              blocksize);
+                       return -EFAULT;
+               }
 
-                if (desc->bd_enc_iov[i].kiov_len == 0)
-                        continue;
+               if (adj_nob) {
+                       if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+                           desc->bd_nob_transferred)
+                               BD_GET_ENC_KIOV(desc, i).kiov_len =
+                                       desc->bd_nob_transferred - ct_nob;
+
+                       BD_GET_KIOV(desc, i).kiov_len =
+                         BD_GET_ENC_KIOV(desc, i).kiov_len;
+                       if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+                           desc->bd_nob)
+                               BD_GET_KIOV(desc, i).kiov_len =
+                                 desc->bd_nob - pt_nob;
+               } else {
+                       /* this should be guaranteed by LNET */
+                       LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
+                               kiov_len <=
+                               desc->bd_nob_transferred);
+                       LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
+                               BD_GET_ENC_KIOV(desc, i).kiov_len);
+               }
 
-                src.page = desc->bd_enc_iov[i].kiov_page;
-                src.offset = desc->bd_enc_iov[i].kiov_offset;
-                src.length = desc->bd_enc_iov[i].kiov_len;
+               if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
+                       continue;
 
-                dst = src;
-                if (desc->bd_iov[i].kiov_len % blocksize == 0)
-                        dst.page = desc->bd_iov[i].kiov_page;
+               sg_init_table(&src, 1);
+               sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
+                           BD_GET_ENC_KIOV(desc, i).kiov_len,
+                           BD_GET_ENC_KIOV(desc, i).kiov_offset);
+               dst = src;
+               if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
+                       sg_assign_page(&dst,
+                                      BD_GET_KIOV(desc, i).kiov_page);
 
-                rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
-                                                    src.length);
+               rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+                                                src.length);
                 if (rc) {
                         CERROR("error to decrypt page: %d\n", rc);
                         return rc;
                 }
 
-                if (desc->bd_iov[i].kiov_len % blocksize != 0) {
-                        memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
-                               desc->bd_iov[i].kiov_offset,
-                               cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
-                               desc->bd_iov[i].kiov_offset,
-                               desc->bd_iov[i].kiov_len);
-                }
+               if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
+                       memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
+                              BD_GET_KIOV(desc, i).kiov_offset,
+                              page_address(BD_GET_ENC_KIOV(desc, i).
+                                           kiov_page) +
+                              BD_GET_KIOV(desc, i).kiov_offset,
+                              BD_GET_KIOV(desc, i).kiov_len);
+               }
 
-                ct_nob += desc->bd_enc_iov[i].kiov_len;
-                pt_nob += desc->bd_iov[i].kiov_len;
-        }
+               ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
+               pt_nob += BD_GET_KIOV(desc, i).kiov_len;
+       }
 
         if (unlikely(ct_nob != desc->bd_nob_transferred)) {
                 CERROR("%d cipher text transferred but only %d decrypted\n",
@@ -1185,17 +883,30 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
                 return -EFAULT;
         }
 
-        /* if needed, clear up the rest unused iovs */
-        if (adj_nob)
-                while (i < desc->bd_iov_count)
-                        desc->bd_iov[i++].kiov_len = 0;
+       /* if needed, clear up the rest unused iovs */
+       if (adj_nob)
+               while (i < desc->bd_iov_count)
+                       BD_GET_KIOV(desc, i++).kiov_len = 0;
 
         /* decrypt tail (krb5 header) */
-        buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
-        buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+       rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
+                              sizeof(*khdr));
+       if (rc != 0)
+               return rc;
+
+       rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
+                              sizeof(*khdr));
+       if (rc != 0) {
+               gss_teardown_sgtable(&sg_src);
+               return rc;
+       }
+
+       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
+                                        sizeof(*khdr));
+
+       gss_teardown_sgtable(&sg_src);
+       gss_teardown_sgtable(&sg_dst);
 
-        rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
-                                            &dst, &src, sizeof(*khdr));
         if (rc) {
                 CERROR("error to decrypt tail: %d\n", rc);
                 return rc;
@@ -1223,13 +934,14 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
         rawobj_t             cksum = RAWOBJ_EMPTY;
         rawobj_t             data_desc[3], cipher;
         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
+       __u8                 local_iv[16] = {0};
         int                  rc = 0;
 
         LASSERT(ke);
         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
                 ke->ke_conf_size >=
-                ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+               crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
 
         /*
          * final token format:
@@ -1240,7 +952,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
 
         /* fill krb5 header */
         LASSERT(token->len >= sizeof(*khdr));
-        khdr = (struct krb5_header *) token->data;
+       khdr = (struct krb5_header *)token->data;
         fill_krb5_header(kctx, khdr, 1);
 
         /* generate confounder */
@@ -1253,13 +965,13 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                 blocksize = 1;
         } else {
                 LASSERT(kctx->kc_keye.kb_tfm);
-                blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
         }
         LASSERT(blocksize <= ke->ke_conf_size);
 
-        /* padding the message */
-        if (add_padding(msg, msg_buflen, blocksize))
-                return GSS_S_FAILURE;
+       /* padding the message */
+       if (gss_add_padding(msg, msg_buflen, blocksize))
+               return GSS_S_FAILURE;
 
         /*
          * clear text layout for checksum:
@@ -1294,44 +1006,44 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
         data_desc[2].len = sizeof(*khdr);
 
         /* cipher text will be directly inplace */
-        cipher.data = (__u8 *) (khdr + 1);
+       cipher.data = (__u8 *)(khdr + 1);
         cipher.len = token->len - sizeof(*khdr);
         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
 
-        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                rawobj_t                 arc4_keye;
-                struct ll_crypto_cipher *arc4_tfm;
+       if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+               rawobj_t                 arc4_keye;
+               struct crypto_blkcipher *arc4_tfm;
 
-                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
-                                       NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
-                        CERROR("failed to obtain arc4 enc key\n");
-                        GOTO(arc4_out, rc = -EACCES);
-                }
+               if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
+                                      NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+                       CERROR("failed to obtain arc4 enc key\n");
+                       GOTO(arc4_out, rc = -EACCES);
+               }
 
-               arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
                if (IS_ERR(arc4_tfm)) {
                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-                if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+               if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
                                                arc4_keye.len)) {
                         CERROR("failed to set arc4 key, len %d\n",
                                arc4_keye.len);
                         GOTO(arc4_out_tfm, rc = -EACCES);
                 }
 
-                rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
-                                          3, data_desc, &cipher, 1);
+               rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
+                                      &cipher, 1);
 arc4_out_tfm:
-                ll_crypto_free_blkcipher(arc4_tfm);
+               crypto_free_blkcipher(arc4_tfm);
 arc4_out_key:
                 rawobj_free(&arc4_keye);
 arc4_out:
                 do {} while(0); /* just to avoid compile warning */
         } else {
-                rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
-                                          3, data_desc, &cipher, 1);
+               rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
+                                      data_desc, &cipher, 1);
         }
 
         if (rc != 0) {
@@ -1353,35 +1065,38 @@ arc4_out:
 
 static
 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
-                             struct ptlrpc_bulk_desc *desc)
+                            struct ptlrpc_bulk_desc *desc)
 {
-        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
-        int                  blocksize, i;
-
-        LASSERT(desc->bd_iov_count);
-        LASSERT(desc->bd_enc_iov);
-        LASSERT(kctx->kc_keye.kb_tfm);
-
-        blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
-
-        for (i = 0; i < desc->bd_iov_count; i++) {
-                LASSERT(desc->bd_enc_iov[i].kiov_page);
-                /*
-                 * offset should always start at page boundary of either
-                 * client or server side.
-                 */
-                if (desc->bd_iov[i].kiov_offset & blocksize) {
-                        CERROR("odd offset %d in page %d\n",
-                               desc->bd_iov[i].kiov_offset, i);
-                        return GSS_S_FAILURE;
-                }
+       struct krb5_ctx     *kctx = gctx->internal_ctx_id;
+       int                  blocksize, i;
+
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+       LASSERT(desc->bd_iov_count);
+       LASSERT(GET_ENC_KIOV(desc));
+       LASSERT(kctx->kc_keye.kb_tfm);
+
+       blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+
+       for (i = 0; i < desc->bd_iov_count; i++) {
+               LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
+               /*
+                * offset should always start at page boundary of either
+                * client or server side.
+                */
+               if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
+                       CERROR("odd offset %d in page %d\n",
+                              BD_GET_KIOV(desc, i).kiov_offset, i);
+                       return GSS_S_FAILURE;
+               }
 
-                desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
-                desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
-                                                blocksize - 1) & (~(blocksize - 1));
-        }
+               BD_GET_ENC_KIOV(desc, i).kiov_offset =
+                       BD_GET_KIOV(desc, i).kiov_offset;
+               BD_GET_ENC_KIOV(desc, i).kiov_len =
+                       (BD_GET_KIOV(desc, i).kiov_len +
+                        blocksize - 1) & (~(blocksize - 1));
+       }
 
-        return GSS_S_COMPLETE;
+       return GSS_S_COMPLETE;
 }
 
 static
@@ -1398,6 +1113,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         __u8                 conf[GSS_MAX_CIPHER_BLOCK];
         int                  rc = 0;
 
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
         LASSERT(ke);
         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
 
@@ -1410,7 +1126,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
 
         /* fill krb5 header */
         LASSERT(token->len >= sizeof(*khdr));
-        khdr = (struct krb5_header *) token->data;
+       khdr = (struct krb5_header *)token->data;
         fill_krb5_header(kctx, khdr, 1);
 
         /* generate confounder */
@@ -1423,7 +1139,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
                 blocksize = 1;
         } else {
                 LASSERT(kctx->kc_keye.kb_tfm);
-                blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
         }
 
         /*
@@ -1444,13 +1160,13 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         data_desc[0].data = conf;
         data_desc[0].len = ke->ke_conf_size;
 
-        /* compute checksum */
-        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
-                               khdr, 1, data_desc,
-                               desc->bd_iov_count, desc->bd_iov,
-                               &cksum))
-                return GSS_S_FAILURE;
-        LASSERT(cksum.len >= ke->ke_hash_size);
+       /* compute checksum */
+       if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+                              khdr, 1, data_desc,
+                              desc->bd_iov_count, GET_KIOV(desc),
+                              &cksum))
+               return GSS_S_FAILURE;
+       LASSERT(cksum.len >= ke->ke_hash_size);
 
         /*
          * clear text layout for encryption:
@@ -1467,7 +1183,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         data_desc[0].data = conf;
         data_desc[0].len = ke->ke_conf_size;
 
-        cipher.data = (__u8 *) (khdr + 1);
+       cipher.data = (__u8 *)(khdr + 1);
         cipher.len = blocksize + sizeof(*khdr);
 
         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
@@ -1511,6 +1227,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
         rawobj_t             hash_objs[3];
         int                  rc = 0;
         __u32                major;
+       __u8                 local_iv[16] = {0};
 
         LASSERT(ke);
 
@@ -1519,7 +1236,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                 return GSS_S_DEFECTIVE_TOKEN;
         }
 
-        khdr = (struct krb5_header *) token->data;
+       khdr = (struct krb5_header *)token->data;
 
         major = verify_krb5_header(kctx, khdr, 1);
         if (major != GSS_S_COMPLETE) {
@@ -1533,7 +1250,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                 blocksize = 1;
         } else {
                 LASSERT(kctx->kc_keye.kb_tfm);
-                blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
         }
 
         /* expected token layout:
@@ -1566,48 +1283,48 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
 
         major = GSS_S_FAILURE;
 
-        cipher_in.data = (__u8 *) (khdr + 1);
+       cipher_in.data = (__u8 *)(khdr + 1);
         cipher_in.len = bodysize;
         plain_out.data = tmpbuf;
         plain_out.len = bodysize;
 
-        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                rawobj_t                 arc4_keye;
-                struct ll_crypto_cipher *arc4_tfm;
+       if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+               rawobj_t                 arc4_keye;
+               struct crypto_blkcipher *arc4_tfm;
 
-                cksum.data = token->data + token->len - ke->ke_hash_size;
-                cksum.len = ke->ke_hash_size;
+               cksum.data = token->data + token->len - ke->ke_hash_size;
+               cksum.len = ke->ke_hash_size;
 
-                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
-                                       NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
-                        CERROR("failed to obtain arc4 enc key\n");
-                        GOTO(arc4_out, rc = -EACCES);
-                }
+               if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
+                                      NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+                       CERROR("failed to obtain arc4 enc key\n");
+                       GOTO(arc4_out, rc = -EACCES);
+               }
 
-               arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
                if (IS_ERR(arc4_tfm)) {
                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-                if (ll_crypto_blkcipher_setkey(arc4_tfm,
+               if (crypto_blkcipher_setkey(arc4_tfm,
                                          arc4_keye.data, arc4_keye.len)) {
                         CERROR("failed to set arc4 key, len %d\n",
                                arc4_keye.len);
                         GOTO(arc4_out_tfm, rc = -EACCES);
                 }
 
-                rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
-                                          1, &cipher_in, &plain_out, 0);
+               rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
+                                      &plain_out, 0);
 arc4_out_tfm:
-                ll_crypto_free_blkcipher(arc4_tfm);
+               crypto_free_blkcipher(arc4_tfm);
 arc4_out_key:
                 rawobj_free(&arc4_keye);
 arc4_out:
                 cksum = RAWOBJ_EMPTY;
         } else {
-                rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
-                                          1, &cipher_in, &plain_out, 0);
+               rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
+                                      &cipher_in, &plain_out, 0);
         }
 
         if (rc != 0) {
@@ -1677,6 +1394,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         int                  rc;
         __u32                major;
 
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
         LASSERT(ke);
 
         if (token->len < sizeof(*khdr)) {
@@ -1684,7 +1402,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
                 return GSS_S_DEFECTIVE_TOKEN;
         }
 
-        khdr = (struct krb5_header *) token->data;
+       khdr = (struct krb5_header *)token->data;
 
         major = verify_krb5_header(kctx, khdr, 1);
         if (major != GSS_S_COMPLETE) {
@@ -1699,7 +1417,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
                 LBUG();
         } else {
                 LASSERT(kctx->kc_keye.kb_tfm);
-                blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
         }
         LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
 
@@ -1734,12 +1452,13 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         data_desc[0].data = plain.data;
         data_desc[0].len = blocksize;
 
-        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
-                               khdr, 1, data_desc,
-                               desc->bd_iov_count, desc->bd_iov,
-                               &cksum))
-                return GSS_S_FAILURE;
-        LASSERT(cksum.len >= ke->ke_hash_size);
+       if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+                              khdr, 1, data_desc,
+                              desc->bd_iov_count,
+                              GET_KIOV(desc),
+                              &cksum))
+               return GSS_S_FAILURE;
+       LASSERT(cksum.len >= ke->ke_hash_size);
 
         if (memcmp(plain.data + blocksize + sizeof(*khdr),
                    cksum.data + cksum.len - ke->ke_hash_size,
@@ -1807,11 +1526,8 @@ static struct subflavor_desc gss_kerberos_sfs[] = {
         },
 };
 
-/*
- * currently we leave module owner NULL
- */
 static struct gss_api_mech gss_kerberos_mech = {
-        .gm_owner       = NULL, /*THIS_MODULE, */
+       /* .gm_owner uses default NULL value for THIS_MODULE */
         .gm_name        = "krb5",
         .gm_oid         = (rawobj_t)
                                 {9, "\052\206\110\206\367\022\001\002\002"},
@@ -1822,17 +1538,17 @@ static struct gss_api_mech gss_kerberos_mech = {
 
 int __init init_kerberos_module(void)
 {
-        int status;
+       int status;
 
-        cfs_spin_lock_init(&krb5_seq_lock);
+       spin_lock_init(&krb5_seq_lock);
 
-        status = lgss_mech_register(&gss_kerberos_mech);
-        if (status)
-                CERROR("Failed to register kerberos gss mechanism!\n");
-        return status;
+       status = lgss_mech_register(&gss_kerberos_mech);
+       if (status)
+               CERROR("Failed to register kerberos gss mechanism!\n");
+       return status;
 }
 
-void __exit cleanup_kerberos_module(void)
+void cleanup_kerberos_module(void)
 {
         lgss_mech_unregister(&gss_kerberos_mech);
 }