Whamcloud - gitweb
LU-14475 log: Rewrite some log messages
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_krb5_mech.c
index 6ce908a..a544e8b 100644 (file)
@@ -670,31 +670,27 @@ out:
  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
  */
 static
-int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
-                      struct krb5_header *khdr,
-                      char *confounder,
-                      struct ptlrpc_bulk_desc *desc,
-                      rawobj_t *cipher,
-                      int adj_nob)
+int krb5_encrypt_bulk(struct crypto_sync_skcipher *tfm,
+                     struct krb5_header *khdr,
+                     char *confounder,
+                     struct ptlrpc_bulk_desc *desc,
+                     rawobj_t *cipher,
+                     int adj_nob)
 {
-        struct blkcipher_desc   ciph_desc;
-        __u8                    local_iv[16] = {0};
-        struct scatterlist      src, dst;
-       struct sg_table         sg_src, sg_dst;
-        int                     blocksize, i, rc, nob = 0;
+       __u8 local_iv[16] = {0};
+       struct scatterlist src, dst;
+       struct sg_table sg_src, sg_dst;
+       int blocksize, i, rc, nob = 0;
+       SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
-        LASSERT(desc->bd_iov_count);
+       LASSERT(desc->bd_iov_count);
        LASSERT(desc->bd_enc_vec);
 
-       blocksize = crypto_blkcipher_blocksize(tfm);
-        LASSERT(blocksize > 1);
-        LASSERT(cipher->len == blocksize + sizeof(*khdr));
-
-        ciph_desc.tfm  = tfm;
-        ciph_desc.info = local_iv;
-        ciph_desc.flags = 0;
+       blocksize = crypto_sync_skcipher_blocksize(tfm);
+       LASSERT(blocksize > 1);
+       LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
-        /* encrypt confounder */
+       /* encrypt confounder */
        rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
        if (rc != 0)
                return rc;
@@ -704,20 +700,24 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
                gss_teardown_sgtable(&sg_src);
                return rc;
        }
+       skcipher_request_set_sync_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                  blocksize, local_iv);
 
-       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
-                                        sg_src.sgl, blocksize);
+       rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
 
-        if (rc) {
-                CERROR("error to encrypt confounder: %d\n", rc);
-                return rc;
-        }
+       if (rc) {
+               CERROR("error to encrypt confounder: %d\n", rc);
+               skcipher_request_zero(req);
+               return rc;
+       }
 
-        /* encrypt clear pages */
-        for (i = 0; i < desc->bd_iov_count; i++) {
+       /* encrypt clear pages */
+       for (i = 0; i < desc->bd_iov_count; i++) {
                sg_init_table(&src, 1);
                sg_set_page(&src, desc->bd_vec[i].bv_page,
                            (desc->bd_vec[i].bv_len +
@@ -733,28 +733,36 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
                desc->bd_enc_vec[i].bv_offset = dst.offset;
                desc->bd_enc_vec[i].bv_len = dst.length;
 
-               rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
-                                                    src.length);
-                if (rc) {
-                        CERROR("error to encrypt page: %d\n", rc);
-                        return rc;
-                }
-        }
+               skcipher_request_set_crypt(req, &src, &dst,
+                                         src.length, local_iv);
+               rc = crypto_skcipher_encrypt_iv(req, &dst, &src, src.length);
+               if (rc) {
+                       CERROR("error to encrypt page: %d\n", rc);
+                       skcipher_request_zero(req);
+                       return rc;
+               }
+       }
 
-        /* encrypt krb5 header */
+       /* encrypt krb5 header */
        rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
-       if (rc != 0)
+       if (rc != 0) {
+               skcipher_request_zero(req);
                return rc;
+       }
 
        rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
                           sizeof(*khdr));
        if (rc != 0) {
                gss_teardown_sgtable(&sg_src);
+               skcipher_request_zero(req);
                return rc;
        }
 
-       rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
-                                        sizeof(*khdr));
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                  sizeof(*khdr), local_iv);
+       rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl,
+                                       sizeof(*khdr));
+       skcipher_request_zero(req);
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
@@ -789,38 +797,34 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
  *   should have been done by prep_bulk().
  */
 static
-int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
-                      struct krb5_header *khdr,
-                      struct ptlrpc_bulk_desc *desc,
-                      rawobj_t *cipher,
-                      rawobj_t *plain,
-                      int adj_nob)
+int krb5_decrypt_bulk(struct crypto_sync_skcipher *tfm,
+                     struct krb5_header *khdr,
+                     struct ptlrpc_bulk_desc *desc,
+                     rawobj_t *cipher,
+                     rawobj_t *plain,
+                     int adj_nob)
 {
-        struct blkcipher_desc   ciph_desc;
-        __u8                    local_iv[16] = {0};
-        struct scatterlist      src, dst;
-       struct sg_table         sg_src, sg_dst;
-        int                     ct_nob = 0, pt_nob = 0;
-        int                     blocksize, i, rc;
-
-        LASSERT(desc->bd_iov_count);
-       LASSERT(desc->bd_enc_vec);
-        LASSERT(desc->bd_nob_transferred);
+       __u8 local_iv[16] = {0};
+       struct scatterlist src, dst;
+       struct sg_table sg_src, sg_dst;
+       int ct_nob = 0, pt_nob = 0;
+       int blocksize, i, rc;
+       SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
-       blocksize = crypto_blkcipher_blocksize(tfm);
-        LASSERT(blocksize > 1);
-        LASSERT(cipher->len == blocksize + sizeof(*khdr));
+       LASSERT(desc->bd_iov_count);
+       LASSERT(desc->bd_enc_vec);
+       LASSERT(desc->bd_nob_transferred);
 
-        ciph_desc.tfm  = tfm;
-        ciph_desc.info = local_iv;
-        ciph_desc.flags = 0;
+       blocksize = crypto_sync_skcipher_blocksize(tfm);
+       LASSERT(blocksize > 1);
+       LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
-        if (desc->bd_nob_transferred % blocksize) {
-                CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
-                return -EPROTO;
-        }
+       if (desc->bd_nob_transferred % blocksize) {
+               CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
+               return -EPROTO;
+       }
 
-        /* decrypt head (confounder) */
+       /* decrypt head (confounder) */
        rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
        if (rc != 0)
                return rc;
@@ -831,27 +835,31 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                return rc;
        }
 
-       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
-                                        sg_src.sgl, blocksize);
+       skcipher_request_set_sync_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                  blocksize, local_iv);
+
+       rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
 
        gss_teardown_sgtable(&sg_dst);
        gss_teardown_sgtable(&sg_src);
 
-        if (rc) {
-                CERROR("error to decrypt confounder: %d\n", rc);
-                return rc;
-        }
+       if (rc) {
+               CERROR("error to decrypt confounder: %d\n", rc);
+               skcipher_request_zero(req);
+               return rc;
+       }
 
        for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
             i++) {
-               if (desc->bd_enc_vec[i].bv_offset % blocksize
-                   != 0 ||
-                   desc->bd_enc_vec[i].bv_len % blocksize
-                   != 0) {
+               if (desc->bd_enc_vec[i].bv_offset % blocksize != 0 ||
+                   desc->bd_enc_vec[i].bv_len % blocksize != 0) {
                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
                               i, desc->bd_enc_vec[i].bv_offset,
                               desc->bd_enc_vec[i].bv_len,
                               blocksize);
+                       skcipher_request_zero(req);
                        return -EFAULT;
                }
 
@@ -888,12 +896,14 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                        sg_assign_page(&dst,
                                       desc->bd_vec[i].bv_page);
 
-               rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
-                                                src.length);
-                if (rc) {
-                        CERROR("error to decrypt page: %d\n", rc);
-                        return rc;
-                }
+               skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                          src.length, local_iv);
+               rc = crypto_skcipher_decrypt_iv(req, &dst, &src, src.length);
+               if (rc) {
+                       CERROR("error to decrypt page: %d\n", rc);
+                       skcipher_request_zero(req);
+                       return rc;
+               }
 
                if (desc->bd_vec[i].bv_len % blocksize != 0) {
                        memcpy(page_address(desc->bd_vec[i].bv_page) +
@@ -908,24 +918,26 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                pt_nob += desc->bd_vec[i].bv_len;
        }
 
-        if (unlikely(ct_nob != desc->bd_nob_transferred)) {
-                CERROR("%d cipher text transferred but only %d decrypted\n",
-                       desc->bd_nob_transferred, ct_nob);
-                return -EFAULT;
-        }
+       if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+               CERROR("%d cipher text transferred but only %d decrypted\n",
+                      desc->bd_nob_transferred, ct_nob);
+               skcipher_request_zero(req);
+               return -EFAULT;
+       }
 
-        if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
-                CERROR("%d plain text expected but only %d received\n",
-                       desc->bd_nob, pt_nob);
-                return -EFAULT;
-        }
+       if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+               CERROR("%d plain text expected but only %d received\n",
+                      desc->bd_nob, pt_nob);
+               skcipher_request_zero(req);
+               return -EFAULT;
+       }
 
        /* if needed, clear up the rest unused iovs */
        if (adj_nob)
                while (i < desc->bd_iov_count)
                        desc->bd_vec[i++].bv_len = 0;
 
-        /* decrypt tail (krb5 header) */
+       /* decrypt tail (krb5 header) */
        rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
                               sizeof(*khdr));
        if (rc != 0)
@@ -938,23 +950,25 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
                return rc;
        }
 
-       rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
-                                        sizeof(*khdr));
-
+       skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+                                 src.length, local_iv);
+       rc = crypto_skcipher_decrypt_iv(req, sg_dst.sgl, sg_src.sgl,
+                                       sizeof(*khdr));
        gss_teardown_sgtable(&sg_src);
        gss_teardown_sgtable(&sg_dst);
 
-        if (rc) {
-                CERROR("error to decrypt tail: %d\n", rc);
-                return rc;
-        }
+       skcipher_request_zero(req);
+       if (rc) {
+               CERROR("error to decrypt tail: %d\n", rc);
+               return rc;
+       }
 
-        if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
-                CERROR("krb5 header doesn't match\n");
-                return -EACCES;
-        }
+       if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
+               CERROR("krb5 header doesn't match\n");
+               return -EACCES;
+       }
 
-        return 0;
+       return 0;
 }
 
 static
@@ -979,7 +993,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
        LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
        LASSERT(kctx->kc_keye.kb_tfm == NULL ||
                ke->ke_conf_size >=
-               crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+               crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm));
 
        /*
         * final token format:
@@ -1003,7 +1017,8 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                blocksize = 1;
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksize = crypto_sync_skcipher_blocksize(
+                                                       kctx->kc_keye.kb_tfm);
        }
        LASSERT(blocksize <= ke->ke_conf_size);
 
@@ -1051,7 +1066,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                rawobj_t arc4_keye = RAWOBJ_EMPTY;
-               struct crypto_blkcipher *arc4_tfm;
+               struct crypto_sync_skcipher *arc4_tfm;
 
                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
                                       NULL, 1, &cksum, 0, NULL, &arc4_keye,
@@ -1060,14 +1075,14 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-               arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
                if (IS_ERR(arc4_tfm)) {
                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-               if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
-                                           arc4_keye.len)) {
+               if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
+                                               arc4_keye.len)) {
                        CERROR("failed to set arc4 key, len %d\n",
                               arc4_keye.len);
                        GOTO(arc4_out_tfm, rc = -EACCES);
@@ -1076,7 +1091,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
                                       &cipher, 1);
 arc4_out_tfm:
-               crypto_free_blkcipher(arc4_tfm);
+               crypto_free_sync_skcipher(arc4_tfm);
 arc4_out_key:
                rawobj_free(&arc4_keye);
        } else {
@@ -1112,7 +1127,7 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
        LASSERT(desc->bd_enc_vec);
        LASSERT(kctx->kc_keye.kb_tfm);
 
-       blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+       blocksize = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
                LASSERT(desc->bd_enc_vec[i].bv_page);
@@ -1144,7 +1159,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
-       int                  blocksize;
+       int                  blocksz;
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             data_desc[1], cipher;
        __u8                 conf[GSS_MAX_CIPHER_BLOCK];
@@ -1173,10 +1188,10 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         * a tfm, currently only for arcfour-hmac */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-               blocksize = 1;
+               blocksz = 1;
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
        }
 
        /*
@@ -1184,9 +1199,9 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         * the bulk token size would be exactly (sizeof(krb5_header) +
         * blocksize + sizeof(krb5_header) + hashsize)
         */
-       LASSERT(blocksize <= ke->ke_conf_size);
-       LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
-       LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
+       LASSERT(blocksz <= ke->ke_conf_size);
+       LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
+       LASSERT(token->len >= sizeof(*khdr) + blocksz + sizeof(*khdr) + 16);
 
        /*
         * clear text layout for checksum:
@@ -1221,7 +1236,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
        data_desc[0].len = ke->ke_conf_size;
 
        cipher.data = (__u8 *)(khdr + 1);
-       cipher.len = blocksize + sizeof(*khdr);
+       cipher.len = blocksz + sizeof(*khdr);
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LBUG();
@@ -1257,7 +1272,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
        unsigned char       *tmpbuf;
-       int                  blocksize, bodysize;
+       int                  blocksz, bodysize;
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             cipher_in, plain_out;
        rawobj_t             hash_objs[3];
@@ -1283,10 +1298,10 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
        /* block size */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-               blocksize = 1;
+               blocksz = 1;
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
        }
 
        /* expected token layout:
@@ -1296,7 +1311,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
         */
        bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
 
-       if (bodysize % blocksize) {
+       if (bodysize % blocksz) {
                CERROR("odd bodysize %d\n", bodysize);
                return GSS_S_DEFECTIVE_TOKEN;
        }
@@ -1326,7 +1341,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
 
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                rawobj_t                 arc4_keye;
-               struct crypto_blkcipher *arc4_tfm;
+               struct crypto_sync_skcipher *arc4_tfm;
 
                cksum.data = token->data + token->len - ke->ke_hash_size;
                cksum.len = ke->ke_hash_size;
@@ -1338,14 +1353,14 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                        GOTO(arc4_out, rc = -EACCES);
                }
 
-               arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+               arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
                if (IS_ERR(arc4_tfm)) {
                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
                        GOTO(arc4_out_key, rc = -EACCES);
                }
 
-               if (crypto_blkcipher_setkey(arc4_tfm,
-                                           arc4_keye.data, arc4_keye.len)) {
+               if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
+                                               arc4_keye.len)) {
                        CERROR("failed to set arc4 key, len %d\n",
                               arc4_keye.len);
                        GOTO(arc4_out_tfm, rc = -EACCES);
@@ -1354,7 +1369,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
                                       &plain_out, 0);
 arc4_out_tfm:
-               crypto_free_blkcipher(arc4_tfm);
+               crypto_free_sync_skcipher(arc4_tfm);
 arc4_out_key:
                rawobj_free(&arc4_keye);
 arc4_out:
@@ -1425,7 +1440,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
        struct krb5_header  *khdr;
-       int                  blocksize;
+       int                  blocksz;
        rawobj_t             cksum = RAWOBJ_EMPTY;
        rawobj_t             cipher, plain;
        rawobj_t             data_desc[1];
@@ -1450,13 +1465,13 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
        /* block size */
        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
                LASSERT(kctx->kc_keye.kb_tfm == NULL);
-               blocksize = 1;
+               blocksz = 1;
                LBUG();
        } else {
                LASSERT(kctx->kc_keye.kb_tfm);
-               blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+               blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
        }
-       LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+       LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
 
        /*
         * token format is expected as:
@@ -1464,14 +1479,14 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         * | krb5 header | head/tail cipher text | cksum |
         * -----------------------------------------------
         */
-       if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
+       if (token->len < sizeof(*khdr) + blocksz + sizeof(*khdr) +
            ke->ke_hash_size) {
                CERROR("short token size: %u\n", token->len);
                return GSS_S_DEFECTIVE_TOKEN;
        }
 
        cipher.data = (__u8 *) (khdr + 1);
-       cipher.len = blocksize + sizeof(*khdr);
+       cipher.len = blocksz + sizeof(*khdr);
        plain.data = cipher.data;
        plain.len = cipher.len;
 
@@ -1487,7 +1502,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         * ------------------------------------------
         */
        data_desc[0].data = plain.data;
-       data_desc[0].len = blocksize;
+       data_desc[0].len = blocksz;
 
        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
                               khdr, 1, data_desc,
@@ -1497,7 +1512,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
                return GSS_S_FAILURE;
        LASSERT(cksum.len >= ke->ke_hash_size);
 
-       if (memcmp(plain.data + blocksize + sizeof(*khdr),
+       if (memcmp(plain.data + blocksz + sizeof(*khdr),
                   cksum.data + cksum.len - ke->ke_hash_size,
                   ke->ke_hash_size)) {
                CERROR("checksum mismatch\n");
@@ -1510,15 +1525,15 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
 }
 
 int gss_display_kerberos(struct gss_ctx        *ctx,
-                         char                  *buf,
-                         int                    bufsize)
+                        char                  *buf,
+                        int                    bufsize)
 {
-        struct krb5_ctx    *kctx = ctx->internal_ctx_id;
-        int                 written;
+       struct krb5_ctx    *kctx = ctx->internal_ctx_id;
+       int                 written;
 
-        written = snprintf(buf, bufsize, "krb5 (%s)",
-                           enctype2str(kctx->kc_enctype));
-        return written;
+       written = scnprintf(buf, bufsize, "krb5 (%s)",
+                           enctype2str(kctx->kc_enctype));
+       return written;
 }
 
 static struct gss_api_ops gss_kerberos_ops = {