Whamcloud - gitweb
LU-12401 gss: fix checksum for Kerberos and SSK 99/35099/7
authorSebastien Buisson <sbuisson@ddn.com>
Fri, 7 Jun 2019 14:45:26 +0000 (23:45 +0900)
committerOleg Drokin <green@whamcloud.com>
Wed, 17 Jul 2019 06:21:40 +0000 (06:21 +0000)
When computing checksum for Kerberos, krb5 wire token header is
appended to the plain text. Make sure the actual header is appended
in gss_digest_hash().
For interop with older clients, introduce new server side tunable
'sptlrpc.gss.krb5_allow_old_client_csum'. When not set, servers refuse
Kerberos connection from older clients.

In gss_crypt_generic(), protect against an undefined behavior by
switching from memcpy to memmove.

When computing checksum for SSK, make sure the actual token is used
to store the checksum.

Fixes: a21c13d4df ("LU-8602 gss: Properly port gss to newer crypto api.")
Test-Parameters: envdefinitions=SHARED_KEY=true testlist=sanity,recovery-small,sanity-sec
Test-Parameters: envdefinitions=SHARED_KEY=true clientbuildno=6308 clientjob=lustre-reviews-patchless testlist=sanity,recovery-small,sanity-sec
Signed-off-by: Sebastien Buisson <sbuisson@ddn.com>
Change-Id: I0233ada481f132af112bf88c065f5421902c942e
Reviewed-on: https://review.whamcloud.com/35099
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Jeremy Filizetti <jeremy.filizetti@gmail.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
12 files changed:
lustre/include/lustre_sec.h
lustre/ptlrpc/gss/gss_api.h
lustre/ptlrpc/gss/gss_cli_upcall.c
lustre/ptlrpc/gss/gss_crypto.c
lustre/ptlrpc/gss/gss_crypto.h
lustre/ptlrpc/gss/gss_internal.h
lustre/ptlrpc/gss/gss_krb5_mech.c
lustre/ptlrpc/gss/gss_mech_switch.c
lustre/ptlrpc/gss/gss_sk_mech.c
lustre/ptlrpc/gss/gss_svc_upcall.c
lustre/ptlrpc/gss/lproc_gss.c
lustre/ptlrpc/sec_lproc.c

index b7bd9e6..ee2204f 100644 (file)
@@ -966,6 +966,7 @@ struct ptlrpc_bulk_sec_desc {
 };
 
 extern struct dentry *sptlrpc_debugfs_dir;
+extern struct proc_dir_entry *sptlrpc_lprocfs_dir;
 
 /*
  * round size up to next power of 2, for slab allocation.
index bf41678..a5f203e 100644 (file)
 
 struct gss_api_mech;
 
+typedef int (*digest_hash)(
+       struct ahash_request *req, rawobj_t *hdr,
+       int msgcnt, rawobj_t *msgs,
+       int iovcnt, lnet_kiov_t *iovs);
+
 /* The mechanism-independent gss-api context: */
 struct gss_ctx {
-        struct gss_api_mech    *mech_type;
-        void                   *internal_ctx_id;
+       struct gss_api_mech *mech_type;
+       void *internal_ctx_id;
+       digest_hash hash_func;
 };
 
 #define GSS_C_NO_BUFFER         ((rawobj_t) 0)
index 9ae9d8f..70d4711 100644 (file)
 
 static
 int ctx_init_pack_request(struct obd_import *imp,
-                          struct ptlrpc_request *req,
-                          int lustre_srv,
-                          uid_t uid, gid_t gid,
-                          long token_size,
-                          char __user *token)
+                         struct ptlrpc_request *req,
+                         int lustre_srv,
+                         uid_t uid, gid_t gid,
+                         long token_size,
+                         char __user *token)
 {
-        struct lustre_msg       *msg = req->rq_reqbuf;
-        struct gss_sec          *gsec;
-        struct gss_header       *ghdr;
-        struct ptlrpc_user_desc *pud;
-        __u32                   *p, size, offset = 2;
-        rawobj_t                 obj;
-
-        LASSERT(msg->lm_bufcount <= 4);
-        LASSERT(req->rq_cli_ctx);
-        LASSERT(req->rq_cli_ctx->cc_sec);
-
-        /* gss hdr */
-        ghdr = lustre_msg_buf(msg, 0, sizeof(*ghdr));
-        ghdr->gh_version = PTLRPC_GSS_VERSION;
-        ghdr->gh_sp = (__u8) imp->imp_sec->ps_part;
-        ghdr->gh_flags = 0;
-        ghdr->gh_proc = PTLRPC_GSS_PROC_INIT;
-        ghdr->gh_seq = 0;
-        ghdr->gh_svc = SPTLRPC_SVC_NULL;
-        ghdr->gh_handle.len = 0;
-
-        /* fix the user desc */
-        if (req->rq_pack_udesc) {
-                ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
-
-                pud = lustre_msg_buf(msg, offset, sizeof(*pud));
-                LASSERT(pud);
-                pud->pud_uid = pud->pud_fsuid = uid;
-                pud->pud_gid = pud->pud_fsgid = gid;
-                pud->pud_cap = 0;
-                pud->pud_ngroups = 0;
-                offset++;
-        }
+       struct lustre_msg       *msg = req->rq_reqbuf;
+       struct gss_sec          *gsec;
+       struct gss_header       *ghdr;
+       struct ptlrpc_user_desc *pud;
+       __u32                   *p, size, offset = 2;
+       rawobj_t                 obj;
+
+       LASSERT(msg->lm_bufcount <= 4);
+       LASSERT(req->rq_cli_ctx);
+       LASSERT(req->rq_cli_ctx->cc_sec);
+
+       /* gss hdr */
+       ghdr = lustre_msg_buf(msg, 0, sizeof(*ghdr));
+       ghdr->gh_version = PTLRPC_GSS_VERSION;
+       ghdr->gh_sp = (__u8) imp->imp_sec->ps_part;
+       ghdr->gh_flags = 0;
+       ghdr->gh_proc = PTLRPC_GSS_PROC_INIT;
+       ghdr->gh_seq = 0;
+       ghdr->gh_svc = SPTLRPC_SVC_NULL;
+       ghdr->gh_handle.len = 0;
+
+       /* fix the user desc */
+       if (req->rq_pack_udesc) {
+               ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
+
+               pud = lustre_msg_buf(msg, offset, sizeof(*pud));
+               LASSERT(pud);
+               pud->pud_uid = pud->pud_fsuid = uid;
+               pud->pud_gid = pud->pud_fsgid = gid;
+               pud->pud_cap = 0;
+               pud->pud_ngroups = 0;
+               offset++;
+       }
 
-        /* security payload */
-        p = lustre_msg_buf(msg, offset, 0);
-        size = msg->lm_buflens[offset];
-        LASSERT(p);
-
-        /* 1. lustre svc type */
-        LASSERT(size > 4);
-        *p++ = cpu_to_le32(lustre_srv);
-        size -= 4;
-
-        /* 2. target uuid */
-        obj.len = strlen(imp->imp_obd->u.cli.cl_target_uuid.uuid) + 1;
-        obj.data = imp->imp_obd->u.cli.cl_target_uuid.uuid;
-        if (rawobj_serialize(&obj, &p, &size))
-                LBUG();
-
-        /* 3. reverse context handle. actually only needed by root user,
-         *    but we send it anyway. */
-        gsec = sec2gsec(req->rq_cli_ctx->cc_sec);
-        obj.len = sizeof(gsec->gs_rvs_hdl);
-        obj.data = (__u8 *) &gsec->gs_rvs_hdl;
-        if (rawobj_serialize(&obj, &p, &size))
-                LBUG();
-
-        /* 4. now the token */
-        LASSERT(size >= (sizeof(__u32) + token_size));
-        *p++ = cpu_to_le32(((__u32) token_size));
+       /* new clients are expected to set KCSUM flag */
+       ghdr->gh_flags |= LUSTRE_GSS_PACK_KCSUM;
+
+       /* security payload */
+       p = lustre_msg_buf(msg, offset, 0);
+       size = msg->lm_buflens[offset];
+       LASSERT(p);
+
+       /* 1. lustre svc type */
+       LASSERT(size > 4);
+       *p++ = cpu_to_le32(lustre_srv);
+       size -= 4;
+
+       /* 2. target uuid */
+       obj.len = strlen(imp->imp_obd->u.cli.cl_target_uuid.uuid) + 1;
+       obj.data = imp->imp_obd->u.cli.cl_target_uuid.uuid;
+       if (rawobj_serialize(&obj, &p, &size))
+               LBUG();
+
+       /* 3. reverse context handle. actually only needed by root user,
+        *    but we send it anyway. */
+       gsec = sec2gsec(req->rq_cli_ctx->cc_sec);
+       obj.len = sizeof(gsec->gs_rvs_hdl);
+       obj.data = (__u8 *) &gsec->gs_rvs_hdl;
+       if (rawobj_serialize(&obj, &p, &size))
+               LBUG();
+
+       /* 4. now the token */
+       LASSERT(size >= (sizeof(__u32) + token_size));
+       *p++ = cpu_to_le32(((__u32) token_size));
        if (copy_from_user(p, token, token_size)) {
-                CERROR("can't copy token\n");
-                return -EFAULT;
-        }
-        size -= sizeof(__u32) + cfs_size_round4(token_size);
+               CERROR("can't copy token\n");
+               return -EFAULT;
+       }
+       size -= sizeof(__u32) + cfs_size_round4(token_size);
 
-        req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, offset,
-                                                msg->lm_buflens[offset] - size, 0);
-        return 0;
+       req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, offset,
+                                            msg->lm_buflens[offset] - size, 0);
+       return 0;
 }
 
 static
index 4231a0a..5aebc48 100644 (file)
@@ -254,7 +254,8 @@ int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
        if (iv)
                memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
 
-       memcpy(out, in, length);
+       if (in != out)
+               memmove(out, in, length);
 
        ret = gss_setup_sgtable(&sg_out, &sg, out, length);
        if (ret != 0)
@@ -272,8 +273,7 @@ out:
 
 int gss_digest_hash(struct ahash_request *req,
                    rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
-                   int iovcnt, lnet_kiov_t *iovs,
-                   rawobj_t *cksum)
+                   int iovcnt, lnet_kiov_t *iovs)
 {
        struct scatterlist sg[1];
        struct sg_table sgt;
@@ -310,7 +310,7 @@ int gss_digest_hash(struct ahash_request *req,
        }
 
        if (hdr) {
-               rc = gss_setup_sgtable(&sgt, sg, hdr, sizeof(*hdr));
+               rc = gss_setup_sgtable(&sgt, sg, hdr->data, hdr->len);
                if (rc)
                        return rc;
 
@@ -324,6 +324,59 @@ int gss_digest_hash(struct ahash_request *req,
        return rc;
 }
 
+int gss_digest_hash_compat(struct ahash_request *req,
+                          rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
+                          int iovcnt, lnet_kiov_t *iovs)
+{
+       struct scatterlist sg[1];
+       struct sg_table sgt;
+       int rc = 0;
+       int i;
+
+       for (i = 0; i < msgcnt; i++) {
+               if (msgs[i].len == 0)
+                       continue;
+
+               rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
+               if (rc)
+                       return rc;
+
+               ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
+               rc = crypto_ahash_update(req);
+               gss_teardown_sgtable(&sgt);
+               if (rc)
+                       return rc;
+       }
+
+       for (i = 0; i < iovcnt; i++) {
+               if (iovs[i].kiov_len == 0)
+                       continue;
+
+               sg_init_table(sg, 1);
+               sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+                           iovs[i].kiov_offset);
+
+               ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len);
+               rc = crypto_ahash_update(req);
+               if (rc)
+                       return rc;
+       }
+
+       if (hdr) {
+               rc = gss_setup_sgtable(&sgt, sg, &(hdr->len), sizeof(hdr->len));
+               if (rc)
+                       return rc;
+
+               ahash_request_set_crypt(req, sg, NULL, sizeof(hdr->len));
+               rc = crypto_ahash_update(req);
+               gss_teardown_sgtable(&sgt);
+               if (rc)
+                       return rc;
+       }
+
+       return rc;
+}
+
 int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
 {
        int padding;
index 98950b1..39a2b4e 100644 (file)
@@ -24,8 +24,10 @@ void gss_teardown_sgtable(struct sg_table *sgt);
 int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
                      const void *in, void *out, size_t length);
 int gss_digest_hash(struct ahash_request *req, rawobj_t *hdr,
-                   int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs,
-                   rawobj_t *cksum);
+                   int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs);
+int gss_digest_hash_compat(struct ahash_request *req,
+                          rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
+                          int iovcnt, lnet_kiov_t *iovs);
 int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize);
 int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv,
                      int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj,
index bf7966c..3a7a9d0 100644 (file)
@@ -117,8 +117,9 @@ enum ptlrpc_gss_tgt {
 };
 
 enum ptlrpc_gss_header_flags {
-        LUSTRE_GSS_PACK_BULK            = 1,
-        LUSTRE_GSS_PACK_USER            = 2,
+       LUSTRE_GSS_PACK_BULK            = 1,
+       LUSTRE_GSS_PACK_USER            = 2,
+       LUSTRE_GSS_PACK_KCSUM           = 4,
 };
 
 static inline
@@ -509,6 +510,7 @@ void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx);
 
 int  __init gss_init_svc_upcall(void);
 void gss_exit_svc_upcall(void);
+extern unsigned int krb5_allow_old_client_csum;
 
 /* lproc_gss.c */
 void gss_stat_oos_record_cli(int behind);
index 39ef0c2..e2ce12f 100644 (file)
@@ -446,7 +446,8 @@ __s32 krb5_make_checksum(__u32 enctype,
                         struct krb5_header *khdr,
                         int msgcnt, rawobj_t *msgs,
                         int iovcnt, lnet_kiov_t *iovs,
-                        rawobj_t *cksum)
+                        rawobj_t *cksum,
+                        digest_hash hash_func)
 {
        struct krb5_enctype *ke = &enctypes[enctype];
        struct ahash_request *req = NULL;
@@ -480,8 +481,13 @@ __s32 krb5_make_checksum(__u32 enctype,
        hdr.data = (__u8 *)khdr;
        hdr.len = sizeof(*khdr);
 
-       rc = gss_digest_hash(req, &hdr, msgcnt, msgs,
-                            iovcnt, iovs, cksum);
+       if (!hash_func) {
+               rc = -EPROTO;
+               CERROR("hash function for %s undefined\n",
+                      ke->ke_hash_name);
+               goto out_free_hash;
+       }
+       rc = hash_func(req, &hdr, msgcnt, msgs, iovcnt, iovs);
        if (rc)
                goto out_free_hash;
 
@@ -572,32 +578,33 @@ static __u32 verify_krb5_header(struct krb5_ctx *kctx,
 
 static
 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
-                           int msgcnt,
-                           rawobj_t *msgs,
-                           int iovcnt,
-                           lnet_kiov_t *iovs,
-                           rawobj_t *token)
+                          int msgcnt,
+                          rawobj_t *msgs,
+                          int iovcnt,
+                          lnet_kiov_t *iovs,
+                          rawobj_t *token)
 {
-        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
-        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
-        struct krb5_header  *khdr;
+       struct krb5_ctx     *kctx = gctx->internal_ctx_id;
+       struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+       struct krb5_header  *khdr;
        rawobj_t cksum = RAWOBJ_EMPTY;
        u32 major;
 
-        /* fill krb5 header */
-        LASSERT(token->len >= sizeof(*khdr));
+       /* fill krb5 header */
+       LASSERT(token->len >= sizeof(*khdr));
        khdr = (struct krb5_header *)token->data;
-        fill_krb5_header(kctx, khdr, 0);
+       fill_krb5_header(kctx, khdr, 0);
 
-        /* checksum */
+       /* checksum */
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr,
-                              msgcnt, msgs, iovcnt, iovs, &cksum))
+                              msgcnt, msgs, iovcnt, iovs, &cksum,
+                              gctx->hash_func))
                GOTO(out_free_cksum, major = GSS_S_FAILURE);
 
-        LASSERT(cksum.len >= ke->ke_hash_size);
-        LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
-        memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
-               ke->ke_hash_size);
+       LASSERT(cksum.len >= ke->ke_hash_size);
+       LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
+       memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
+              ke->ke_hash_size);
 
        token->len = sizeof(*khdr) + ke->ke_hash_size;
        major = GSS_S_COMPLETE;
@@ -608,11 +615,11 @@ out_free_cksum:
 
 static
 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
-                              int msgcnt,
-                              rawobj_t *msgs,
-                              int iovcnt,
-                              lnet_kiov_t *iovs,
-                              rawobj_t *token)
+                             int msgcnt,
+                             rawobj_t *msgs,
+                             int iovcnt,
+                             lnet_kiov_t *iovs,
+                             rawobj_t *token)
 {
        struct krb5_ctx *kctx = gctx->internal_ctx_id;
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
@@ -620,35 +627,34 @@ __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
        rawobj_t cksum = RAWOBJ_EMPTY;
        u32 major;
 
-        if (token->len < sizeof(*khdr)) {
-                CERROR("short signature: %u\n", token->len);
-                return GSS_S_DEFECTIVE_TOKEN;
-        }
+       if (token->len < sizeof(*khdr)) {
+               CERROR("short signature: %u\n", token->len);
+               return GSS_S_DEFECTIVE_TOKEN;
+       }
 
        khdr = (struct krb5_header *)token->data;
 
-        major = verify_krb5_header(kctx, khdr, 0);
-        if (major != GSS_S_COMPLETE) {
-                CERROR("bad krb5 header\n");
+       major = verify_krb5_header(kctx, khdr, 0);
+       if (major != GSS_S_COMPLETE) {
+               CERROR("bad krb5 header\n");
                goto out;
-        }
+       }
 
-        if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
-                CERROR("short signature: %u, require %d\n",
-                       token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
+       if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
+               CERROR("short signature: %u, require %d\n",
+                      token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
                GOTO(out, major = GSS_S_FAILURE);
-        }
+       }
 
-        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
-                               khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
-                CERROR("failed to make checksum\n");
+       if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
+                              khdr, msgcnt, msgs, iovcnt, iovs, &cksum,
+                              gctx->hash_func))
                GOTO(out_free_cksum, major = GSS_S_FAILURE);
-        }
 
-        LASSERT(cksum.len >= ke->ke_hash_size);
-        if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
-                   ke->ke_hash_size)) {
-                CERROR("checksum mismatch\n");
+       LASSERT(cksum.len >= ke->ke_hash_size);
+       if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
+                  ke->ke_hash_size)) {
+               CERROR("checksum mismatch\n");
                GOTO(out_free_cksum, major = GSS_S_BAD_SIG);
        }
        major = GSS_S_COMPLETE;
@@ -953,101 +959,103 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
 
 static
 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
-                        rawobj_t *gsshdr,
-                        rawobj_t *msg,
-                        int msg_buflen,
-                        rawobj_t *token)
+                       rawobj_t *gsshdr,
+                       rawobj_t *msg,
+                       int msg_buflen,
+                       rawobj_t *token)
 {
-        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
-        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
-        struct krb5_header  *khdr;
-        int                  blocksize;
-        rawobj_t             cksum = RAWOBJ_EMPTY;
-        rawobj_t             data_desc[3], cipher;
-        __u8                 conf[GSS_MAX_CIPHER_BLOCK];
+       struct krb5_ctx     *kctx = gctx->internal_ctx_id;
+       struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+       struct krb5_header  *khdr;
+       int                  blocksize;
+       rawobj_t             cksum = RAWOBJ_EMPTY;
+       rawobj_t             data_desc[3], cipher;
+       __u8                 conf[GSS_MAX_CIPHER_BLOCK];
        __u8                 local_iv[16] = {0};
        u32 major;
-        int                  rc = 0;
+       int                  rc = 0;
 
-        LASSERT(ke);
-        LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
-        LASSERT(kctx->kc_keye.kb_tfm == NULL ||
-                ke->ke_conf_size >=
+       LASSERT(ke);
+       LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
+       LASSERT(kctx->kc_keye.kb_tfm == NULL ||
+               ke->ke_conf_size >=
                crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
 
-        /*
-         * final token format:
-         * ---------------------------------------------------
-         * | krb5 header | cipher text | checksum (16 bytes) |
-         * ---------------------------------------------------
-         */
+       /*
+        * final token format:
+        * ---------------------------------------------------
+        * | krb5 header | cipher text | checksum (16 bytes) |
+        * ---------------------------------------------------
+        */
 
-        /* fill krb5 header */
-        LASSERT(token->len >= sizeof(*khdr));
+       /* fill krb5 header */
+       LASSERT(token->len >= sizeof(*khdr));
        khdr = (struct krb5_header *)token->data;
-        fill_krb5_header(kctx, khdr, 1);
+       fill_krb5_header(kctx, khdr, 1);
 
-        /* generate confounder */
+       /* generate confounder */
        get_random_bytes(conf, ke->ke_conf_size);
 
-        /* get encryption blocksize. note kc_keye might not associated with
-         * a tfm, currently only for arcfour-hmac */
-        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-                blocksize = 1;
-        } else {
-                LASSERT(kctx->kc_keye.kb_tfm);
+       /* get encryption blocksize. note kc_keye might not associated with
+        * a tfm, currently only for arcfour-hmac */
+       if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+               LASSERT(kctx->kc_keye.kb_tfm == NULL);
+               blocksize = 1;
+       } else {
+               LASSERT(kctx->kc_keye.kb_tfm);
                blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
-        }
-        LASSERT(blocksize <= ke->ke_conf_size);
+       }
+       LASSERT(blocksize <= ke->ke_conf_size);
 
        /* padding the message */
        if (gss_add_padding(msg, msg_buflen, blocksize))
                return GSS_S_FAILURE;
 
-        /*
-         * clear text layout for checksum:
-         * ------------------------------------------------------
-         * | confounder | gss header | clear msgs | krb5 header |
-         * ------------------------------------------------------
-         */
-        data_desc[0].data = conf;
-        data_desc[0].len = ke->ke_conf_size;
-        data_desc[1].data = gsshdr->data;
-        data_desc[1].len = gsshdr->len;
-        data_desc[2].data = msg->data;
-        data_desc[2].len = msg->len;
-
-        /* compute checksum */
-        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
-                               khdr, 3, data_desc, 0, NULL, &cksum))
+       /*
+        * clear text layout for checksum:
+        * ------------------------------------------------------
+        * | confounder | gss header | clear msgs | krb5 header |
+        * ------------------------------------------------------
+        */
+       data_desc[0].data = conf;
+       data_desc[0].len = ke->ke_conf_size;
+       data_desc[1].data = gsshdr->data;
+       data_desc[1].len = gsshdr->len;
+       data_desc[2].data = msg->data;
+       data_desc[2].len = msg->len;
+
+       /* compute checksum */
+       if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+                              khdr, 3, data_desc, 0, NULL, &cksum,
+                              gctx->hash_func))
                GOTO(out_free_cksum, major = GSS_S_FAILURE);
-        LASSERT(cksum.len >= ke->ke_hash_size);
-
-        /*
-         * clear text layout for encryption:
-         * -----------------------------------------
-         * | confounder | clear msgs | krb5 header |
-         * -----------------------------------------
-         */
-        data_desc[0].data = conf;
-        data_desc[0].len = ke->ke_conf_size;
-        data_desc[1].data = msg->data;
-        data_desc[1].len = msg->len;
-        data_desc[2].data = (__u8 *) khdr;
-        data_desc[2].len = sizeof(*khdr);
-
-        /* cipher text will be directly inplace */
+       LASSERT(cksum.len >= ke->ke_hash_size);
+
+       /*
+        * clear text layout for encryption:
+        * -----------------------------------------
+        * | confounder | clear msgs | krb5 header |
+        * -----------------------------------------
+        */
+       data_desc[0].data = conf;
+       data_desc[0].len = ke->ke_conf_size;
+       data_desc[1].data = msg->data;
+       data_desc[1].len = msg->len;
+       data_desc[2].data = (__u8 *) khdr;
+       data_desc[2].len = sizeof(*khdr);
+
+       /* cipher text will be directly inplace */
        cipher.data = (__u8 *)(khdr + 1);
-        cipher.len = token->len - sizeof(*khdr);
-        LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
+       cipher.len = token->len - sizeof(*khdr);
+       LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                rawobj_t arc4_keye = RAWOBJ_EMPTY;
                struct crypto_blkcipher *arc4_tfm;
 
                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
-                                      NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+                                      NULL, 1, &cksum, 0, NULL, &arc4_keye,
+                                      gctx->hash_func)) {
                        CERROR("failed to obtain arc4 enc key\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
@@ -1059,11 +1067,11 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                }
 
                if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
-                                               arc4_keye.len)) {
-                        CERROR("failed to set arc4 key, len %d\n",
-                               arc4_keye.len);
-                        GOTO(arc4_out_tfm, rc = -EACCES);
-                }
+                                           arc4_keye.len)) {
+                       CERROR("failed to set arc4 key, len %d\n",
+                              arc4_keye.len);
+                       GOTO(arc4_out_tfm, rc = -EACCES);
+               }
 
                rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
                                       &cipher, 1);
@@ -1071,19 +1079,19 @@ arc4_out_tfm:
                crypto_free_blkcipher(arc4_tfm);
 arc4_out_key:
                rawobj_free(&arc4_keye);
-        } else {
+       } else {
                rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
                                       data_desc, &cipher, 1);
-        }
+       }
 
        if (rc)
                GOTO(out_free_cksum, major = GSS_S_FAILURE);
 
-        /* fill in checksum */
-        LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
-        memcpy((char *)(khdr + 1) + cipher.len,
-               cksum.data + cksum.len - ke->ke_hash_size,
-               ke->ke_hash_size);
+       /* fill in checksum */
+       LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
+       memcpy((char *)(khdr + 1) + cipher.len,
+              cksum.data + cksum.len - ke->ke_hash_size,
+              ke->ke_hash_size);
 
        /* final token length */
        token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
@@ -1131,107 +1139,107 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
 
 static
 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
-                             struct ptlrpc_bulk_desc *desc,
-                             rawobj_t *token, int adj_nob)
+                            struct ptlrpc_bulk_desc *desc,
+                            rawobj_t *token, int adj_nob)
 {
-        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
-        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
-        struct krb5_header  *khdr;
-        int                  blocksize;
-        rawobj_t             cksum = RAWOBJ_EMPTY;
-        rawobj_t             data_desc[1], cipher;
-        __u8                 conf[GSS_MAX_CIPHER_BLOCK];
+       struct krb5_ctx     *kctx = gctx->internal_ctx_id;
+       struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+       struct krb5_header  *khdr;
+       int                  blocksize;
+       rawobj_t             cksum = RAWOBJ_EMPTY;
+       rawobj_t             data_desc[1], cipher;
+       __u8                 conf[GSS_MAX_CIPHER_BLOCK];
        int rc = 0;
        u32 major;
 
        LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
-        LASSERT(ke);
-        LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
-
-        /*
-         * final token format:
-         * --------------------------------------------------
-         * | krb5 header | head/tail cipher text | checksum |
-         * --------------------------------------------------
-         */
-
-        /* fill krb5 header */
-        LASSERT(token->len >= sizeof(*khdr));
+       LASSERT(ke);
+       LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
+
+       /*
+        * final token format:
+        * --------------------------------------------------
+        * | krb5 header | head/tail cipher text | checksum |
+        * --------------------------------------------------
+        */
+
+       /* fill krb5 header */
+       LASSERT(token->len >= sizeof(*khdr));
        khdr = (struct krb5_header *)token->data;
-        fill_krb5_header(kctx, khdr, 1);
+       fill_krb5_header(kctx, khdr, 1);
 
-        /* generate confounder */
+       /* generate confounder */
        get_random_bytes(conf, ke->ke_conf_size);
 
-        /* get encryption blocksize. note kc_keye might not associated with
-         * a tfm, currently only for arcfour-hmac */
-        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-                blocksize = 1;
-        } else {
-                LASSERT(kctx->kc_keye.kb_tfm);
+       /* get encryption blocksize. note kc_keye might not associated with
+        * a tfm, currently only for arcfour-hmac */
+       if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+               LASSERT(kctx->kc_keye.kb_tfm == NULL);
+               blocksize = 1;
+       } else {
+               LASSERT(kctx->kc_keye.kb_tfm);
                blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
-        }
+       }
 
-        /*
-         * we assume the size of krb5_header (16 bytes) must be n * blocksize.
-         * the bulk token size would be exactly (sizeof(krb5_header) +
-         * blocksize + sizeof(krb5_header) + hashsize)
-         */
-        LASSERT(blocksize <= ke->ke_conf_size);
-        LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
-        LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
-
-        /*
-         * clear text layout for checksum:
-         * ------------------------------------------
-         * | confounder | clear pages | krb5 header |
-         * ------------------------------------------
-         */
-        data_desc[0].data = conf;
-        data_desc[0].len = ke->ke_conf_size;
+       /*
+        * we assume the size of krb5_header (16 bytes) must be n * blocksize.
+        * the bulk token size would be exactly (sizeof(krb5_header) +
+        * blocksize + sizeof(krb5_header) + hashsize)
+        */
+       LASSERT(blocksize <= ke->ke_conf_size);
+       LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+       LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
+
+       /*
+        * clear text layout for checksum:
+        * ------------------------------------------
+        * | confounder | clear pages | krb5 header |
+        * ------------------------------------------
+        */
+       data_desc[0].data = conf;
+       data_desc[0].len = ke->ke_conf_size;
 
        /* compute checksum */
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
                               khdr, 1, data_desc,
                               desc->bd_iov_count, GET_KIOV(desc),
-                              &cksum))
+                              &cksum, gctx->hash_func))
                GOTO(out_free_cksum, major = GSS_S_FAILURE);
        LASSERT(cksum.len >= ke->ke_hash_size);
 
-        /*
-         * clear text layout for encryption:
-         * ------------------------------------------
-         * | confounder | clear pages | krb5 header |
-         * ------------------------------------------
-         *        |              |             |
-         *        ----------  (cipher pages)   |
-         * result token:   |                   |
-         * -------------------------------------------
-         * | krb5 header | cipher text | cipher text |
-         * -------------------------------------------
-         */
-        data_desc[0].data = conf;
-        data_desc[0].len = ke->ke_conf_size;
+       /*
+        * clear text layout for encryption:
+        * ------------------------------------------
+        * | confounder | clear pages | krb5 header |
+        * ------------------------------------------
+        *        |              |             |
+        *        ----------  (cipher pages)   |
+        * result token:   |                   |
+        * -------------------------------------------
+        * | krb5 header | cipher text | cipher text |
+        * -------------------------------------------
+        */
+       data_desc[0].data = conf;
+       data_desc[0].len = ke->ke_conf_size;
 
        cipher.data = (__u8 *)(khdr + 1);
-        cipher.len = blocksize + sizeof(*khdr);
+       cipher.len = blocksize + sizeof(*khdr);
 
-        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                LBUG();
-                rc = 0;
-        } else {
-                rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
-                                       conf, desc, &cipher, adj_nob);
-        }
+       if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+               LBUG();
+               rc = 0;
+       } else {
+               rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
+                                      conf, desc, &cipher, adj_nob);
+       }
        if (rc)
                GOTO(out_free_cksum, major = GSS_S_FAILURE);
 
-        /* fill in checksum */
-        LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
-        memcpy((char *)(khdr + 1) + cipher.len,
-               cksum.data + cksum.len - ke->ke_hash_size,
-               ke->ke_hash_size);
+       /* fill in checksum */
+       LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
+       memcpy((char *)(khdr + 1) + cipher.len,
+              cksum.data + cksum.len - ke->ke_hash_size,
+              ke->ke_hash_size);
 
        /* final token length */
        token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
@@ -1243,80 +1251,80 @@ out_free_cksum:
 
 static
 __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
-                          rawobj_t        *gsshdr,
-                          rawobj_t        *token,
-                          rawobj_t        *msg)
+                         rawobj_t        *gsshdr,
+                         rawobj_t        *token,
+                         rawobj_t        *msg)
 {
-        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
-        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
-        struct krb5_header  *khdr;
-        unsigned char       *tmpbuf;
-        int                  blocksize, bodysize;
-        rawobj_t             cksum = RAWOBJ_EMPTY;
-        rawobj_t             cipher_in, plain_out;
-        rawobj_t             hash_objs[3];
-        int                  rc = 0;
-        __u32                major;
+       struct krb5_ctx     *kctx = gctx->internal_ctx_id;
+       struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+       struct krb5_header  *khdr;
+       unsigned char       *tmpbuf;
+       int                  blocksize, bodysize;
+       rawobj_t             cksum = RAWOBJ_EMPTY;
+       rawobj_t             cipher_in, plain_out;
+       rawobj_t             hash_objs[3];
+       int                  rc = 0;
+       __u32                major;
        __u8                 local_iv[16] = {0};
 
-        LASSERT(ke);
+       LASSERT(ke);
 
-        if (token->len < sizeof(*khdr)) {
-                CERROR("short signature: %u\n", token->len);
-                return GSS_S_DEFECTIVE_TOKEN;
-        }
+       if (token->len < sizeof(*khdr)) {
+               CERROR("short signature: %u\n", token->len);
+               return GSS_S_DEFECTIVE_TOKEN;
+       }
 
        khdr = (struct krb5_header *)token->data;
 
-        major = verify_krb5_header(kctx, khdr, 1);
-        if (major != GSS_S_COMPLETE) {
-                CERROR("bad krb5 header\n");
-                return major;
-        }
+       major = verify_krb5_header(kctx, khdr, 1);
+       if (major != GSS_S_COMPLETE) {
+               CERROR("bad krb5 header\n");
+               return major;
+       }
 
-        /* block size */
-        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-                blocksize = 1;
-        } else {
-                LASSERT(kctx->kc_keye.kb_tfm);
+       /* block size */
+       if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+               LASSERT(kctx->kc_keye.kb_tfm == NULL);
+               blocksize = 1;
+       } else {
+               LASSERT(kctx->kc_keye.kb_tfm);
                blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
-        }
+       }
 
-        /* expected token layout:
-         * ----------------------------------------
-         * | krb5 header | cipher text | checksum |
-         * ----------------------------------------
-         */
-        bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
+       /* expected token layout:
+        * ----------------------------------------
+        * | krb5 header | cipher text | checksum |
+        * ----------------------------------------
+        */
+       bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
 
-        if (bodysize % blocksize) {
-                CERROR("odd bodysize %d\n", bodysize);
-                return GSS_S_DEFECTIVE_TOKEN;
-        }
+       if (bodysize % blocksize) {
+               CERROR("odd bodysize %d\n", bodysize);
+               return GSS_S_DEFECTIVE_TOKEN;
+       }
 
-        if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
-                CERROR("incomplete token: bodysize %d\n", bodysize);
-                return GSS_S_DEFECTIVE_TOKEN;
-        }
+       if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
+               CERROR("incomplete token: bodysize %d\n", bodysize);
+               return GSS_S_DEFECTIVE_TOKEN;
+       }
 
-        if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
-                CERROR("buffer too small: %u, require %d\n",
-                       msg->len, bodysize - ke->ke_conf_size);
-                return GSS_S_FAILURE;
-        }
+       if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
+               CERROR("buffer too small: %u, require %d\n",
+                      msg->len, bodysize - ke->ke_conf_size);
+               return GSS_S_FAILURE;
+       }
 
-        /* decrypting */
-        OBD_ALLOC_LARGE(tmpbuf, bodysize);
-        if (!tmpbuf)
-                return GSS_S_FAILURE;
+       /* decrypting */
+       OBD_ALLOC_LARGE(tmpbuf, bodysize);
+       if (!tmpbuf)
+               return GSS_S_FAILURE;
 
-        major = GSS_S_FAILURE;
+       major = GSS_S_FAILURE;
 
        cipher_in.data = (__u8 *)(khdr + 1);
-        cipher_in.len = bodysize;
-        plain_out.data = tmpbuf;
-        plain_out.len = bodysize;
+       cipher_in.len = bodysize;
+       plain_out.data = tmpbuf;
+       plain_out.len = bodysize;
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                rawobj_t                 arc4_keye;
@@ -1326,7 +1334,8 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                cksum.len = ke->ke_hash_size;
 
                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
-                                      NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+                                      NULL, 1, &cksum, 0, NULL, &arc4_keye,
+                                      gctx->hash_func)) {
                        CERROR("failed to obtain arc4 enc key\n");
                        GOTO(arc4_out, rc = -EACCES);
                }
@@ -1338,168 +1347,169 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                }
 
                if (crypto_blkcipher_setkey(arc4_tfm,
-                                         arc4_keye.data, arc4_keye.len)) {
-                        CERROR("failed to set arc4 key, len %d\n",
-                               arc4_keye.len);
-                        GOTO(arc4_out_tfm, rc = -EACCES);
-                }
+                                           arc4_keye.data, arc4_keye.len)) {
+                       CERROR("failed to set arc4 key, len %d\n",
+                              arc4_keye.len);
+                       GOTO(arc4_out_tfm, rc = -EACCES);
+               }
 
                rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
                                       &plain_out, 0);
 arc4_out_tfm:
                crypto_free_blkcipher(arc4_tfm);
 arc4_out_key:
-                rawobj_free(&arc4_keye);
+               rawobj_free(&arc4_keye);
 arc4_out:
-                cksum = RAWOBJ_EMPTY;
-        } else {
+               cksum = RAWOBJ_EMPTY;
+       } else {
                rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1,
                                       &cipher_in, &plain_out, 0);
-        }
+       }
 
-        if (rc != 0) {
-                CERROR("error decrypt\n");
-                goto out_free;
-        }
-        LASSERT(plain_out.len == bodysize);
-
-        /* expected clear text layout:
-         * -----------------------------------------
-         * | confounder | clear msgs | krb5 header |
-         * -----------------------------------------
-         */
-
-        /* verify krb5 header in token is not modified */
-        if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
-                   sizeof(*khdr))) {
-                CERROR("decrypted krb5 header mismatch\n");
-                goto out_free;
-        }
+       if (rc != 0) {
+               CERROR("error decrypt\n");
+               goto out_free;
+       }
+       LASSERT(plain_out.len == bodysize);
 
-        /* verify checksum, compose clear text as layout:
-         * ------------------------------------------------------
-         * | confounder | gss header | clear msgs | krb5 header |
-         * ------------------------------------------------------
-         */
-        hash_objs[0].len = ke->ke_conf_size;
-        hash_objs[0].data = plain_out.data;
-        hash_objs[1].len = gsshdr->len;
-        hash_objs[1].data = gsshdr->data;
-        hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
-        hash_objs[2].data = plain_out.data + ke->ke_conf_size;
-        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
-                               khdr, 3, hash_objs, 0, NULL, &cksum))
-                goto out_free;
-
-        LASSERT(cksum.len >= ke->ke_hash_size);
-        if (memcmp((char *)(khdr + 1) + bodysize,
-                   cksum.data + cksum.len - ke->ke_hash_size,
-                   ke->ke_hash_size)) {
-                CERROR("checksum mismatch\n");
-                goto out_free;
-        }
+       /* expected clear text layout:
+        * -----------------------------------------
+        * | confounder | clear msgs | krb5 header |
+        * -----------------------------------------
+        */
+
+       /* verify krb5 header in token is not modified */
+       if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
+                  sizeof(*khdr))) {
+               CERROR("decrypted krb5 header mismatch\n");
+               goto out_free;
+       }
+
+       /* verify checksum, compose clear text as layout:
+        * ------------------------------------------------------
+        * | confounder | gss header | clear msgs | krb5 header |
+        * ------------------------------------------------------
+        */
+       hash_objs[0].len = ke->ke_conf_size;
+       hash_objs[0].data = plain_out.data;
+       hash_objs[1].len = gsshdr->len;
+       hash_objs[1].data = gsshdr->data;
+       hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
+       hash_objs[2].data = plain_out.data + ke->ke_conf_size;
+       if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+                              khdr, 3, hash_objs, 0, NULL, &cksum,
+                              gctx->hash_func))
+               goto out_free;
+
+       LASSERT(cksum.len >= ke->ke_hash_size);
+       if (memcmp((char *)(khdr + 1) + bodysize,
+                  cksum.data + cksum.len - ke->ke_hash_size,
+                  ke->ke_hash_size)) {
+               CERROR("checksum mismatch\n");
+               goto out_free;
+       }
 
-        msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
-        memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
+       msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
+       memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
 
-        major = GSS_S_COMPLETE;
+       major = GSS_S_COMPLETE;
 out_free:
-        OBD_FREE_LARGE(tmpbuf, bodysize);
-        rawobj_free(&cksum);
-        return major;
+       OBD_FREE_LARGE(tmpbuf, bodysize);
+       rawobj_free(&cksum);
+       return major;
 }
 
 static
 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
-                               struct ptlrpc_bulk_desc *desc,
-                               rawobj_t *token, int adj_nob)
+                              struct ptlrpc_bulk_desc *desc,
+                              rawobj_t *token, int adj_nob)
 {
-        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
-        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
-        struct krb5_header  *khdr;
-        int                  blocksize;
-        rawobj_t             cksum = RAWOBJ_EMPTY;
-        rawobj_t             cipher, plain;
-        rawobj_t             data_desc[1];
-        int                  rc;
-        __u32                major;
+       struct krb5_ctx     *kctx = gctx->internal_ctx_id;
+       struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+       struct krb5_header  *khdr;
+       int                  blocksize;
+       rawobj_t             cksum = RAWOBJ_EMPTY;
+       rawobj_t             cipher, plain;
+       rawobj_t             data_desc[1];
+       int                  rc;
+       __u32                major;
 
        LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
-        LASSERT(ke);
+       LASSERT(ke);
 
-        if (token->len < sizeof(*khdr)) {
-                CERROR("short signature: %u\n", token->len);
-                return GSS_S_DEFECTIVE_TOKEN;
-        }
+       if (token->len < sizeof(*khdr)) {
+               CERROR("short signature: %u\n", token->len);
+               return GSS_S_DEFECTIVE_TOKEN;
+       }
 
        khdr = (struct krb5_header *)token->data;
 
-        major = verify_krb5_header(kctx, khdr, 1);
-        if (major != GSS_S_COMPLETE) {
-                CERROR("bad krb5 header\n");
-                return major;
-        }
+       major = verify_krb5_header(kctx, khdr, 1);
+       if (major != GSS_S_COMPLETE) {
+               CERROR("bad krb5 header\n");
+               return major;
+       }
 
-        /* block size */
-        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-                blocksize = 1;
-                LBUG();
-        } else {
-                LASSERT(kctx->kc_keye.kb_tfm);
+       /* block size */
+       if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+               LASSERT(kctx->kc_keye.kb_tfm == NULL);
+               blocksize = 1;
+               LBUG();
+       } else {
+               LASSERT(kctx->kc_keye.kb_tfm);
                blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
-        }
-        LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
-
-        /*
-         * token format is expected as:
-         * -----------------------------------------------
-         * | krb5 header | head/tail cipher text | cksum |
-         * -----------------------------------------------
-         */
-        if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
-                         ke->ke_hash_size) {
-                CERROR("short token size: %u\n", token->len);
-                return GSS_S_DEFECTIVE_TOKEN;
-        }
+       }
+       LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
 
-        cipher.data = (__u8 *) (khdr + 1);
-        cipher.len = blocksize + sizeof(*khdr);
-        plain.data = cipher.data;
-        plain.len = cipher.len;
+       /*
+        * token format is expected as:
+        * -----------------------------------------------
+        * | krb5 header | head/tail cipher text | cksum |
+        * -----------------------------------------------
+        */
+       if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
+           ke->ke_hash_size) {
+               CERROR("short token size: %u\n", token->len);
+               return GSS_S_DEFECTIVE_TOKEN;
+       }
 
-        rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
-                               desc, &cipher, &plain, adj_nob);
-        if (rc)
-                return GSS_S_DEFECTIVE_TOKEN;
+       cipher.data = (__u8 *) (khdr + 1);
+       cipher.len = blocksize + sizeof(*khdr);
+       plain.data = cipher.data;
+       plain.len = cipher.len;
 
-        /*
-         * verify checksum, compose clear text as layout:
-         * ------------------------------------------
-         * | confounder | clear pages | krb5 header |
-         * ------------------------------------------
-         */
-        data_desc[0].data = plain.data;
-        data_desc[0].len = blocksize;
+       rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
+                              desc, &cipher, &plain, adj_nob);
+       if (rc)
+               return GSS_S_DEFECTIVE_TOKEN;
+
+       /*
+        * verify checksum, compose clear text as layout:
+        * ------------------------------------------
+        * | confounder | clear pages | krb5 header |
+        * ------------------------------------------
+        */
+       data_desc[0].data = plain.data;
+       data_desc[0].len = blocksize;
 
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
                               khdr, 1, data_desc,
                               desc->bd_iov_count,
                               GET_KIOV(desc),
-                              &cksum))
+                              &cksum, gctx->hash_func))
                return GSS_S_FAILURE;
        LASSERT(cksum.len >= ke->ke_hash_size);
 
-        if (memcmp(plain.data + blocksize + sizeof(*khdr),
-                   cksum.data + cksum.len - ke->ke_hash_size,
-                   ke->ke_hash_size)) {
-                CERROR("checksum mismatch\n");
-                rawobj_free(&cksum);
-                return GSS_S_BAD_SIG;
-        }
+       if (memcmp(plain.data + blocksize + sizeof(*khdr),
+                  cksum.data + cksum.len - ke->ke_hash_size,
+                  ke->ke_hash_size)) {
+               CERROR("checksum mismatch\n");
+               rawobj_free(&cksum);
+               return GSS_S_BAD_SIG;
+       }
 
-        rawobj_free(&cksum);
-        return GSS_S_COMPLETE;
+       rawobj_free(&cksum);
+       return GSS_S_COMPLETE;
 }
 
 int gss_display_kerberos(struct gss_ctx        *ctx,
index a5eaae2..3ee125f 100644 (file)
@@ -59,6 +59,7 @@
 #include "gss_err.h"
 #include "gss_internal.h"
 #include "gss_api.h"
+#include "gss_crypto.h"
 
 static struct list_head registered_mechs = LIST_HEAD_INIT(registered_mechs);
 static DEFINE_SPINLOCK(registered_mechs_lock);
@@ -147,43 +148,45 @@ __u32 lgss_import_sec_context(rawobj_t *input_token,
                               struct gss_api_mech *mech,
                               struct gss_ctx **ctx_id)
 {
-        OBD_ALLOC_PTR(*ctx_id);
-        if (*ctx_id == NULL)
-                return GSS_S_FAILURE;
+       OBD_ALLOC_PTR(*ctx_id);
+       if (*ctx_id == NULL)
+               return GSS_S_FAILURE;
 
-        (*ctx_id)->mech_type = lgss_mech_get(mech);
+       (*ctx_id)->mech_type = lgss_mech_get(mech);
+       (*ctx_id)->hash_func = gss_digest_hash;
 
-        LASSERT(mech);
-        LASSERT(mech->gm_ops);
-        LASSERT(mech->gm_ops->gss_import_sec_context);
-        return mech->gm_ops->gss_import_sec_context(input_token, *ctx_id);
+       LASSERT(mech);
+       LASSERT(mech->gm_ops);
+       LASSERT(mech->gm_ops->gss_import_sec_context);
+       return mech->gm_ops->gss_import_sec_context(input_token, *ctx_id);
 }
 
 __u32 lgss_copy_reverse_context(struct gss_ctx *ctx_id,
-                                struct gss_ctx **ctx_id_new)
+                               struct gss_ctx **ctx_id_new)
 {
-        struct gss_api_mech *mech = ctx_id->mech_type;
-        __u32                major;
+       struct gss_api_mech *mech = ctx_id->mech_type;
+       __u32                major;
 
-        LASSERT(mech);
+       LASSERT(mech);
 
-        OBD_ALLOC_PTR(*ctx_id_new);
-        if (*ctx_id_new == NULL)
-                return GSS_S_FAILURE;
+       OBD_ALLOC_PTR(*ctx_id_new);
+       if (*ctx_id_new == NULL)
+               return GSS_S_FAILURE;
 
-        (*ctx_id_new)->mech_type = lgss_mech_get(mech);
+       (*ctx_id_new)->mech_type = lgss_mech_get(mech);
+       (*ctx_id_new)->hash_func = ctx_id->hash_func;
 
-        LASSERT(mech);
-        LASSERT(mech->gm_ops);
-        LASSERT(mech->gm_ops->gss_copy_reverse_context);
+       LASSERT(mech);
+       LASSERT(mech->gm_ops);
+       LASSERT(mech->gm_ops->gss_copy_reverse_context);
 
-        major = mech->gm_ops->gss_copy_reverse_context(ctx_id, *ctx_id_new);
-        if (major != GSS_S_COMPLETE) {
-                lgss_mech_put(mech);
-                OBD_FREE_PTR(*ctx_id_new);
-                *ctx_id_new = NULL;
-        }
-        return major;
+       major = mech->gm_ops->gss_copy_reverse_context(ctx_id, *ctx_id_new);
+       if (major != GSS_S_COMPLETE) {
+               lgss_mech_put(mech);
+               OBD_FREE_PTR(*ctx_id_new);
+               *ctx_id_new = NULL;
+       }
+       return major;
 }
 
 /*
index b830d71..f6b62f6 100644 (file)
@@ -316,7 +316,7 @@ __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
 static
 u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count,
                 rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
-                rawobj_t *token)
+                rawobj_t *token, digest_hash hash_func)
 {
        struct ahash_request *req;
        int rc2, rc;
@@ -327,9 +327,15 @@ u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count,
                goto out_init_failed;
        }
 
-       rc2 = gss_digest_hash(req, NULL, msg_count, msgs, iov_count, iovs,
-                             token);
-       rc = cfs_crypto_hash_final(req, key->data, &key->len);
+
+       if (hash_func)
+               rc2 = hash_func(req, NULL, msg_count, msgs, iov_count,
+                               iovs);
+       else
+               rc2 = gss_digest_hash(req, NULL, msg_count, msgs, iov_count,
+                                     iovs);
+
+       rc = cfs_crypto_hash_final(req, token->data, &token->len);
        if (!rc && rc2)
                rc = rc2;
 out_init_failed:
@@ -348,14 +354,14 @@ __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
 
        return sk_make_hmac(skc->sc_hmac,
                            &skc->sc_hmac_key, message_count, messages,
-                           iov_count, iovs, token);
+                           iov_count, iovs, token, gss_context->hash_func);
 }
 
 static
 u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
                   int message_count, rawobj_t *messages,
                   int iov_count, lnet_kiov_t *iovs,
-                  rawobj_t *token)
+                  rawobj_t *token, digest_hash hash_func)
 {
        rawobj_t checksum = RAWOBJ_EMPTY;
        __u32 rc = GSS_S_FAILURE;
@@ -372,7 +378,8 @@ u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
                return rc;
 
        if (sk_make_hmac(algo, key, message_count,
-                        messages, iov_count, iovs, &checksum)) {
+                        messages, iov_count, iovs, &checksum,
+                        hash_func)) {
                CDEBUG(D_SEC, "Failed to create checksum to validate\n");
                goto cleanup;
        }
@@ -483,7 +490,8 @@ __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
        struct sk_ctx *skc = gss_context->internal_ctx_id;
 
        return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
-                             message_count, messages, iov_count, iovs, token);
+                             message_count, messages, iov_count, iovs, token,
+                             gss_context->hash_func);
 }
 
 static
@@ -529,7 +537,8 @@ __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
        skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
        skw.skw_hmac.len = sht_bytes;
        if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key,
-                        3, msgbufs, 0, NULL, &skw.skw_hmac))
+                        3, msgbufs, 0, NULL, &skw.skw_hmac,
+                        gss_context->hash_func))
                return GSS_S_FAILURE;
 
        token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
@@ -576,7 +585,7 @@ __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
        msgbufs[1] = *gss_header;
        msgbufs[2] = skw.skw_cipher;
        rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs,
-                           0, NULL, &skw.skw_hmac);
+                           0, NULL, &skw.skw_hmac, gss_context->hash_func);
        if (rc)
                return rc;
 
@@ -810,7 +819,8 @@ __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
        skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
        skw.skw_hmac.len = sht_bytes;
        if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
-                        desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac))
+                        desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac,
+                        gss_context->hash_func))
                return GSS_S_FAILURE;
 
        return GSS_S_COMPLETE;
index 6392e76..96ffd1b 100644 (file)
 #include "gss_err.h"
 #include "gss_internal.h"
 #include "gss_api.h"
+#include "gss_crypto.h"
 
 #define GSS_SVC_UPCALL_TIMEOUT  (20)
 
 static DEFINE_SPINLOCK(__ctx_index_lock);
 static __u64 __ctx_index;
 
+unsigned int krb5_allow_old_client_csum;
+
 __u64 gss_get_next_ctx_index(void)
 {
        __u64 idx;
@@ -977,6 +980,20 @@ cache_check:
                 grctx->src_ctx = &rsci->ctx;
         }
 
+       if (gw->gw_flags & LUSTRE_GSS_PACK_KCSUM) {
+               grctx->src_ctx->gsc_mechctx->hash_func = gss_digest_hash;
+       } else if (!strcmp(grctx->src_ctx->gsc_mechctx->mech_type->gm_name,
+                          "krb5") &&
+                  !krb5_allow_old_client_csum) {
+               CWARN("%s: deny connection from '%s' due to missing 'krb_csum' feature, set 'sptlrpc.gss.krb5_allow_old_client_csum=1' to allow, but recommend client upgrade: rc = %d\n",
+                     target->obd_name, libcfs_nid2str(req->rq_peer.nid),
+                     -EPROTO);
+               GOTO(out, rc = SECSVC_DROP);
+       } else {
+               grctx->src_ctx->gsc_mechctx->hash_func =
+                       gss_digest_hash_compat;
+       }
+
         if (rawobj_dup(&rsci->ctx.gsc_rvs_hdl, rvs_hdl)) {
                 CERROR("failed duplicate reverse handle\n");
                 GOTO(out, rc);
index 15e48b3..8bfdc49 100644 (file)
@@ -52,6 +52,7 @@
 
 static struct dentry *gss_debugfs_dir_lk;
 static struct dentry *gss_debugfs_dir;
+static struct proc_dir_entry *gss_lprocfs_dir;
 
 /*
  * statistic of "out-of-sequence-window"
@@ -132,6 +133,28 @@ static const struct file_operations gss_proc_secinit = {
        .write = gss_proc_write_secinit,
 };
 
+int sptlrpc_krb5_allow_old_client_csum_seq_show(struct seq_file *m, void *data)
+{
+       seq_printf(m, "%u\n", krb5_allow_old_client_csum);
+       return 0;
+}
+
+ssize_t sptlrpc_krb5_allow_old_client_csum_seq_write(struct file *file,
+                                                    const char __user *buffer,
+                                                    size_t count, loff_t *off)
+{
+       bool val;
+       int rc;
+
+       rc = kstrtobool_from_user(buffer, count, &val);
+       if (rc)
+               return rc;
+
+       krb5_allow_old_client_csum = val;
+       return count;
+}
+LPROC_SEQ_FOPS(sptlrpc_krb5_allow_old_client_csum);
+
 static struct lprocfs_vars gss_debugfs_vars[] = {
        { .name =       "replays",
          .fops =       &gss_proc_oos_fops      },
@@ -141,6 +164,12 @@ static struct lprocfs_vars gss_debugfs_vars[] = {
        { NULL }
 };
 
+static struct lprocfs_vars gss_lprocfs_vars[] = {
+       { .name =       "krb5_allow_old_client_csum",
+         .fops =       &sptlrpc_krb5_allow_old_client_csum_fops },
+       { NULL }
+};
+
 /*
  * for userspace helper lgss_keyring.
  *
@@ -187,6 +216,9 @@ void gss_exit_tunables(void)
 
        if (!IS_ERR_OR_NULL(gss_debugfs_dir))
                ldebugfs_remove(&gss_debugfs_dir);
+
+       if (!IS_ERR_OR_NULL(gss_lprocfs_dir))
+               lprocfs_remove(&gss_lprocfs_dir);
 }
 
 int gss_init_tunables(void)
@@ -212,6 +244,14 @@ int gss_init_tunables(void)
                GOTO(out, rc);
        }
 
+       gss_lprocfs_dir = lprocfs_register("gss", sptlrpc_lprocfs_dir,
+                                          gss_lprocfs_vars, NULL);
+       if (IS_ERR_OR_NULL(gss_lprocfs_dir)) {
+               rc = gss_lprocfs_dir ? PTR_ERR(gss_lprocfs_dir) : -ENOMEM;
+               gss_lprocfs_dir = NULL;
+               GOTO(out, rc);
+       }
+
        return 0;
 
 out:
index 2ba29c4..c320081 100644 (file)
@@ -258,6 +258,9 @@ static struct lprocfs_vars sptlrpc_lprocfs_vars[] = {
 struct dentry *sptlrpc_debugfs_dir;
 EXPORT_SYMBOL(sptlrpc_debugfs_dir);
 
+struct proc_dir_entry *sptlrpc_lprocfs_dir;
+EXPORT_SYMBOL(sptlrpc_lprocfs_dir);
+
 int sptlrpc_lproc_init(void)
 {
        int rc;
@@ -272,6 +275,15 @@ int sptlrpc_lproc_init(void)
                sptlrpc_debugfs_dir = NULL;
                return rc;
        }
+
+       sptlrpc_lprocfs_dir = lprocfs_register("sptlrpc", proc_lustre_root,
+                                              NULL, NULL);
+       if (IS_ERR_OR_NULL(sptlrpc_lprocfs_dir)) {
+               rc = PTR_ERR(sptlrpc_lprocfs_dir);
+               rc = sptlrpc_lprocfs_dir ? PTR_ERR(sptlrpc_lprocfs_dir)
+                       : -ENOMEM;
+               sptlrpc_lprocfs_dir = NULL;
+       }
        return 0;
 }
 
@@ -279,4 +291,7 @@ void sptlrpc_lproc_fini(void)
 {
        if (!IS_ERR_OR_NULL(sptlrpc_debugfs_dir))
                ldebugfs_remove(&sptlrpc_debugfs_dir);
+
+       if (!IS_ERR_OR_NULL(sptlrpc_lprocfs_dir))
+               lprocfs_remove(&sptlrpc_lprocfs_dir);
 }