Whamcloud - gitweb
LU-8602 gss: Properly port gss to newer crypto api.
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_sk_mech.c
index 75b80fe..9fab35a 100644 (file)
@@ -22,7 +22,7 @@
 /*
  * Copyright (C) 2013, 2015, Trustees of Indiana University
  *
- * Copyright (c) 2014, Intel Corporation.
+ * Copyright (c) 2014, 2016, Intel Corporation.
  *
  * Author: Jeremy Filizetti <jfilizet@iu.edu>
  * Author: Andrew Korty <ajk@iu.edu>
 #include <linux/slab.h>
 #include <linux/crypto.h>
 #include <linux/mutex.h>
+#include <crypto/ctr.h>
 
 #include <obd.h>
 #include <obd_class.h>
 #include <obd_support.h>
-#include <lustre/lustre_user.h>
 
 #include "gss_err.h"
 #include "gss_crypto.h"
 #include "gss_asn1.h"
 
 #define SK_INTERFACE_VERSION 1
+#define SK_MSG_VERSION 1
 #define SK_MIN_SIZE 8
+#define SK_IV_SIZE 16
 
-struct sk_ctx {
-       __u32                   sc_version;
-       __u16                   sc_hmac;
-       __u16                   sc_crypt;
-       __u32                   sc_expire;
-       rawobj_t                sc_shared_key;
-       rawobj_t                sc_iv;
-       struct gss_keyblock     sc_session_kb;
-};
+/* Starting number for reverse contexts.  It is critical to security
+ * that reverse contexts use a different range of numbers than regular
+ * contexts because they are using the same key.  Therefore the IV/nonce
+ * combination must be unique for them.  To accomplish this reverse contexts
+ * use the the negative range of a 64-bit number and regular contexts use the
+ * postive range.  If the same IV/nonce combination were reused it would leak
+ * information about the plaintext. */
+#define SK_IV_REV_START (1ULL << 63)
 
-static struct sk_crypt_type sk_crypt_types[] = {
-       [SK_CRYPT_AES_CTR] = {
-               .sct_name = "ctr(aes)",
-               .sct_bytes = 32,
-       },
+struct sk_ctx {
+       enum cfs_crypto_crypt_alg sc_crypt;
+       enum cfs_crypto_hash_alg  sc_hmac;
+       __u32                     sc_expire;
+       __u32                     sc_host_random;
+       __u32                     sc_peer_random;
+       atomic64_t                sc_iv;
+       rawobj_t                  sc_hmac_key;
+       struct gss_keyblock       sc_session_kb;
 };
 
-static struct sk_hmac_type sk_hmac_types[] = {
-       [SK_HMAC_SHA256] = {
-               .sht_name = "hmac(sha256)",
-               .sht_bytes = 32,
-       },
-       [SK_HMAC_SHA512] = {
-               .sht_name = "hmac(sha512)",
-               .sht_bytes = 64,
-       },
+struct sk_hdr {
+       __u64                   skh_version;
+       __u64                   skh_iv;
+} __attribute__((packed));
+
+/* The format of SK wire data is similar to that of RFC3686 ESP Payload
+ * (section 3) except instead of just an IV there is a struct sk_hdr.
+ * ---------------------------------------------------------------------
+ * | struct sk_hdr | ciphertext (variable size) | HMAC (variable size) |
+ * --------------------------------------------------------------------- */
+struct sk_wire {
+       rawobj_t                skw_header;
+       rawobj_t                skw_cipher;
+       rawobj_t                skw_hmac;
 };
 
 static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
@@ -82,39 +92,55 @@ static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
        return (len + blocksize - 1) & (~(blocksize - 1));
 }
 
-static int sk_init_keys(struct sk_ctx *skc)
+static int sk_fill_header(struct sk_ctx *skc, struct sk_hdr *skh)
 {
-       int rc;
-       unsigned int ivsize;
+       __u64 tmp_iv;
+       skh->skh_version = be64_to_cpu(SK_MSG_VERSION);
+
+       /* Always using inc_return so we don't use our initial numbers which
+        * could be the reuse detecting numbers */
+       tmp_iv = atomic64_inc_return(&skc->sc_iv);
+       skh->skh_iv = be64_to_cpu(tmp_iv);
+       if (tmp_iv == 0 || tmp_iv == SK_IV_REV_START) {
+               CERROR("Counter looped, connection must be reset to avoid "
+                      "plaintext information\n");
+               return GSS_S_FAILURE;
+       }
 
-       rc = gss_keyblock_init(&skc->sc_session_kb,
-                              sk_crypt_types[skc->sc_crypt].sct_name, 0);
-       if (rc)
-               return rc;
+       return GSS_S_COMPLETE;
+}
 
-       ivsize = crypto_blkcipher_ivsize(skc->sc_session_kb.kb_tfm);
-       if (skc->sc_iv.len != ivsize) {
-               CERROR("IV size for algorithm (%d) does not match provided IV "
-                      "size: %d\n", ivsize, skc->sc_iv.len);
-               return -EINVAL;
-       }
+static int sk_verify_header(struct sk_hdr *skh)
+{
+       if (cpu_to_be64(skh->skh_version) != SK_MSG_VERSION)
+               return GSS_S_DEFECTIVE_TOKEN;
 
-       crypto_blkcipher_set_iv(skc->sc_session_kb.kb_tfm,
-                                      skc->sc_iv.data, skc->sc_iv.len);
+       return GSS_S_COMPLETE;
+}
 
-       return 0;
+void sk_construct_rfc3686_iv(__u8 *iv, __u32 nonce, __u64 partial_iv)
+{
+       __u32 ctr = cpu_to_be32(1);
+
+       memcpy(iv, &nonce, CTR_RFC3686_NONCE_SIZE);
+       iv += CTR_RFC3686_NONCE_SIZE;
+       memcpy(iv, &partial_iv, CTR_RFC3686_IV_SIZE);
+       iv += CTR_RFC3686_IV_SIZE;
+       memcpy(iv, &ctr, sizeof(ctr));
 }
 
-static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc)
+static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
 {
        char *ptr = inbuf->data;
        char *end = inbuf->data + inbuf->len;
-       __u32 tmp;
+       char sk_hmac[CRYPTO_MAX_ALG_NAME];
+       char sk_crypt[CRYPTO_MAX_ALG_NAME];
+       u32 tmp;
 
        /* see sk_serialize_kctx() for format from userspace side */
        /*  1. Version */
        if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
-               CERROR("Failed to read shared key interface version");
+               CERROR("Failed to read shared key interface version\n");
                return -1;
        }
        if (tmp != SK_INTERFACE_VERSION) {
@@ -123,65 +149,80 @@ static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc)
        }
 
        /* 2. HMAC type */
-       if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
-               CERROR("Failed to read HMAC algorithm type");
+       if (gss_get_bytes(&ptr, end, &sk_hmac, sizeof(sk_hmac))) {
+               CERROR("Failed to read HMAC algorithm type\n");
                return -1;
        }
-       if (skc->sc_hmac >= SK_HMAC_MAX) {
-               CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
+
+       skc->sc_hmac = cfs_crypto_hash_alg(sk_hmac);
+       if (skc->sc_hmac != CFS_HASH_ALG_NULL &&
+           skc->sc_hmac != CFS_HASH_ALG_SHA256 &&
+           skc->sc_hmac != CFS_HASH_ALG_SHA512) {
+               CERROR("Invalid hmac type: %s\n", sk_hmac);
                return -1;
        }
 
        /* 3. crypt type */
-       if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
-               CERROR("Failed to read crypt algorithm type");
+       if (gss_get_bytes(&ptr, end, &sk_crypt, sizeof(sk_crypt))) {
+               CERROR("Failed to read crypt algorithm type\n");
                return -1;
        }
-       if (skc->sc_crypt >= SK_CRYPT_MAX) {
-               CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
+
+       skc->sc_crypt = cfs_crypto_crypt_alg(sk_crypt);
+       if (skc->sc_crypt == CFS_CRYPT_ALG_UNKNOWN) {
+               CERROR("Invalid crypt type: %s\n", sk_crypt);
                return -1;
        }
 
        /* 4. expiration time */
        if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
-               CERROR("Failed to read context expiration time");
+               CERROR("Failed to read context expiration time\n");
                return -1;
        }
-       skc->sc_expire = tmp + cfs_time_current_sec();
+       skc->sc_expire = tmp + ktime_get_real_seconds();
 
-       /* 5. Shared key */
-       if (gss_get_rawobj(&ptr, end, &skc->sc_shared_key)) {
-               CERROR("Failed to read shared key");
+       /* 5. host random is used as nonce for encryption */
+       if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
+                         sizeof(skc->sc_host_random))) {
+               CERROR("Failed to read host random\n");
                return -1;
        }
-       if (skc->sc_shared_key.len <= SK_MIN_SIZE) {
-               CERROR("Shared key must key must be larger than %d bytes\n",
-                      SK_MIN_SIZE);
+
+       /* 6. peer random is used as nonce for decryption */
+       if (gss_get_bytes(&ptr, end, &skc->sc_peer_random,
+                         sizeof(skc->sc_peer_random))) {
+               CERROR("Failed to read peer random\n");
                return -1;
        }
 
-       /* 6. IV, can be empty if not using privacy mode */
-       if (gss_get_rawobj(&ptr, end, &skc->sc_iv)) {
-               CERROR("Failed to read initialization vector ");
+       /* 7. HMAC key */
+       if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) {
+               CERROR("Failed to read HMAC key\n");
+               return -1;
+       }
+       if (skc->sc_hmac_key.len <= SK_MIN_SIZE) {
+               CERROR("HMAC key must key must be larger than %d bytes\n",
+                      SK_MIN_SIZE);
                return -1;
        }
 
-       /* 7. Session key, can be empty if not using privacy mode */
+       /* 8. Session key, can be empty if not using privacy mode */
        if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
-               CERROR("Failed to read session key");
+               CERROR("Failed to read session key\n");
                return -1;
        }
 
        return 0;
 }
 
-static void delete_sk_context(struct sk_ctx *skc)
+static void sk_delete_context(struct sk_ctx *skc)
 {
        if (!skc)
                return;
+
+       rawobj_free(&skc->sc_hmac_key);
        gss_keyblock_free(&skc->sc_session_kb);
-       rawobj_free(&skc->sc_iv);
-       rawobj_free(&skc->sc_shared_key);
+       OBD_FREE_PTR(skc);
 }
 
 static
@@ -197,14 +238,17 @@ __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
        if (!skc)
                return GSS_S_FAILURE;
 
-       if (fill_sk_context(inbuf, skc))
-               goto out_error;
+       atomic64_set(&skc->sc_iv, 0);
+
+       if (sk_fill_context(inbuf, skc))
+               goto out_err;
 
        /* Only privacy mode needs to initialize keys */
        if (skc->sc_session_kb.kb_key.len > 0) {
                privacy = true;
-               if (sk_init_keys(skc))
-                       goto out_error;
+               if (gss_keyblock_init(&skc->sc_session_kb,
+                                     cfs_crypto_crypt_name(skc->sc_crypt), 0))
+                       goto out_err;
        }
 
        gss_context->internal_ctx_id = skc;
@@ -213,9 +257,8 @@ __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context)
 
        return GSS_S_COMPLETE;
 
-out_error:
-       delete_sk_context(skc);
-       OBD_FREE_PTR(skc);
+out_err:
+       sk_delete_context(skc);
        return GSS_S_FAILURE;
 }
 
@@ -230,35 +273,39 @@ __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old,
        if (!skc_new)
                return GSS_S_FAILURE;
 
-       skc_new->sc_crypt = skc_old->sc_crypt;
        skc_new->sc_hmac = skc_old->sc_hmac;
+       skc_new->sc_crypt = skc_old->sc_crypt;
        skc_new->sc_expire = skc_old->sc_expire;
-       if (rawobj_dup(&skc_new->sc_shared_key, &skc_old->sc_shared_key))
-               goto out_error;
-       if (rawobj_dup(&skc_new->sc_iv, &skc_old->sc_iv))
-               goto out_error;
+       skc_new->sc_host_random = skc_old->sc_host_random;
+       skc_new->sc_peer_random = skc_old->sc_peer_random;
+
+       atomic64_set(&skc_new->sc_iv, SK_IV_REV_START);
+
+       if (rawobj_dup(&skc_new->sc_hmac_key, &skc_old->sc_hmac_key))
+               goto out_err;
        if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb))
-               goto out_error;
+               goto out_err;
 
        /* Only privacy mode needs to initialize keys */
        if (skc_new->sc_session_kb.kb_key.len > 0)
-               if (sk_init_keys(skc_new))
-                       goto out_error;
+               if (gss_keyblock_init(&skc_new->sc_session_kb,
+                                     cfs_crypto_crypt_name(skc_new->sc_crypt),
+                                     0))
+                       goto out_err;
 
        gss_context_new->internal_ctx_id = skc_new;
        CDEBUG(D_SEC, "successfully copied reverse sk context\n");
 
        return GSS_S_COMPLETE;
 
-out_error:
-       delete_sk_context(skc_new);
-       OBD_FREE_PTR(skc_new);
+out_err:
+       sk_delete_context(skc_new);
        return GSS_S_FAILURE;
 }
 
 static
 __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
-                            unsigned long *endtime)
+                            time64_t *endtime)
 {
        struct sk_ctx *skc = gss_context->internal_ctx_id;
 
@@ -267,26 +314,26 @@ __u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
 }
 
 static
-__u32 sk_make_checksum(char *alg_name, rawobj_t *key,
-                      int msg_count, rawobj_t *msgs,
-                      int iov_count, lnet_kiov_t *iovs,
-                      rawobj_t *token)
+u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count,
+                rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
+                rawobj_t *token)
 {
-       struct crypto_hash *tfm;
-       int rc;
+       struct cfs_crypto_hash_desc *desc;
+       int rc2, rc;
 
-       tfm = crypto_alloc_hash(alg_name, 0, 0);
-       if (!tfm)
-               return GSS_S_FAILURE;
-
-       rc = GSS_S_FAILURE;
-       LASSERT(token->len >= crypto_hash_digestsize(tfm));
-       if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
-                           token))
-               rc = GSS_S_COMPLETE;
+       desc = cfs_crypto_hash_init(algo, key->data, key->len);
+       if (IS_ERR(desc)) {
+               rc = PTR_ERR(desc);
+               goto out_init_failed;
+       }
 
-       crypto_free_hash(tfm);
-       return rc;
+       rc2 = gss_digest_hash(desc, NULL, msg_count, msgs, iov_count, iovs,
+                             token);
+       rc = cfs_crypto_hash_final(desc, key->data, &key->len);
+       if (!rc && rc2)
+               rc = rc2;
+out_init_failed:
+       return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
 }
 
 static
@@ -298,24 +345,22 @@ __u32 gss_get_mic_sk(struct gss_ctx *gss_context,
                     rawobj_t *token)
 {
        struct sk_ctx *skc = gss_context->internal_ctx_id;
-       return sk_make_checksum(sk_hmac_types[skc->sc_hmac].sht_name,
-                               &skc->sc_shared_key, message_count, messages,
-                               iov_count, iovs, token);
+
+       return sk_make_hmac(skc->sc_hmac,
+                           &skc->sc_hmac_key, message_count, messages,
+                           iov_count, iovs, token);
 }
 
 static
-__u32 sk_verify_checksum(struct sk_hmac_type *sht,
-                        rawobj_t *key,
-                        int message_count,
-                        rawobj_t *messages,
-                        int iov_count,
-                        lnet_kiov_t *iovs,
-                        rawobj_t *token)
+u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
+                  int message_count, rawobj_t *messages,
+                  int iov_count, lnet_kiov_t *iovs,
+                  rawobj_t *token)
 {
        rawobj_t checksum = RAWOBJ_EMPTY;
        __u32 rc = GSS_S_FAILURE;
 
-       checksum.len = sht->sht_bytes;
+       checksum.len = cfs_crypto_hash_digestsize(algo);
        if (token->len < checksum.len) {
                CDEBUG(D_SEC, "Token received too short, expected %d "
                       "received %d\n", token->len, checksum.len);
@@ -326,8 +371,8 @@ __u32 sk_verify_checksum(struct sk_hmac_type *sht,
        if (!checksum.data)
                return rc;
 
-       if (sk_make_checksum(sht->sht_name, key, message_count,
-                            messages, iov_count, iovs, &checksum)) {
+       if (sk_make_hmac(algo, key, message_count,
+                        messages, iov_count, iovs, &checksum)) {
                CDEBUG(D_SEC, "Failed to create checksum to validate\n");
                goto cleanup;
        }
@@ -345,6 +390,90 @@ cleanup:
        return rc;
 }
 
+/* sk_verify_bulk_hmac() differs slightly from sk_verify_hmac() because all
+ * encrypted pages in the bulk descriptor are populated although we only need
+ * to decrypt up to the number of bytes actually specified from the sender
+ * (bd_nob) otherwise the calulated HMAC will be incorrect. */
+static
+u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key,
+                       int msgcnt, rawobj_t *msgs, int iovcnt,
+                       lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
+{
+       struct cfs_crypto_hash_desc *desc;
+       rawobj_t checksum = RAWOBJ_EMPTY;
+       struct ahash_request *req;
+       struct scatterlist sg[1];
+       int rc = GSS_S_FAILURE;
+       struct sg_table sgt;
+       int bytes;
+       int i;
+
+       checksum.len = cfs_crypto_hash_digestsize(sc_hmac);
+       if (token->len < checksum.len) {
+               CDEBUG(D_SEC, "Token received too short, expected %d "
+                      "received %d\n", token->len, checksum.len);
+               return GSS_S_DEFECTIVE_TOKEN;
+       }
+
+       OBD_ALLOC_LARGE(checksum.data, checksum.len);
+       if (!checksum.data)
+               return rc;
+
+       desc = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
+       if (IS_ERR(desc))
+               goto cleanup;
+
+       req = (struct ahash_request *) desc;
+       for (i = 0; i < msgcnt; i++) {
+               if (!msgs[i].len)
+                       continue;
+
+               rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
+               if (rc != 0)
+                       goto hash_cleanup;
+
+               ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
+               rc = crypto_ahash_update(req);
+               if (rc) {
+                       gss_teardown_sgtable(&sgt);
+                       goto hash_cleanup;
+               }
+
+               gss_teardown_sgtable(&sgt);
+       }
+
+       for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
+               if (iovs[i].kiov_len == 0)
+                       continue;
+
+               bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
+               iov_bytes -= bytes;
+
+               sg_init_table(sg, 1);
+               sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
+                           iovs[i].kiov_offset);
+               ahash_request_set_crypt(req, sg, NULL, bytes);
+               rc = crypto_ahash_update(req);
+               if (rc)
+                       goto hash_cleanup;
+       }
+
+       if (memcmp(token->data, checksum.data, checksum.len)) {
+               rc = GSS_S_BAD_SIG;
+               goto hash_cleanup;
+       }
+
+       rc = GSS_S_COMPLETE;
+
+hash_cleanup:
+       cfs_crypto_hash_final(desc, checksum.data, &checksum.len);
+
+cleanup:
+       OBD_FREE_LARGE(checksum.data, checksum.len);
+
+       return rc;
+}
+
 static
 __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
                        int message_count,
@@ -354,9 +483,9 @@ __u32 gss_verify_mic_sk(struct gss_ctx *gss_context,
                        rawobj_t *token)
 {
        struct sk_ctx *skc = gss_context->internal_ctx_id;
-       return sk_verify_checksum(&sk_hmac_types[skc->sc_hmac],
-                                 &skc->sc_shared_key, message_count, messages,
-                                 iov_count, iovs, token);
+
+       return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
+                             message_count, messages, iov_count, iovs, token);
 }
 
 static
@@ -365,39 +494,47 @@ __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
                    rawobj_t *token)
 {
        struct sk_ctx *skc = gss_context->internal_ctx_id;
-       struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
-       rawobj_t msgbufs[2];
-       rawobj_t cipher;
-       rawobj_t checksum;
+       size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
+       struct sk_wire skw;
+       struct sk_hdr skh;
+       rawobj_t msgbufs[3];
+       __u8 local_iv[SK_IV_SIZE];
        unsigned int blocksize;
 
        LASSERT(skc->sc_session_kb.kb_tfm);
-       blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
 
+       blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
        if (gss_add_padding(message, message_buffer_length, blocksize))
                return GSS_S_FAILURE;
 
-       /* Only encrypting the message data */
-       cipher.data = token->data;
-       cipher.len = token->len - sht->sht_bytes;
-       if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, message,
-                             &cipher, 1))
+       memset(token->data, 0, token->len);
+
+       if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
+               return GSS_S_FAILURE;
+
+       skw.skw_header.data = token->data;
+       skw.skw_header.len = sizeof(skh);
+       memcpy(skw.skw_header.data, &skh, sizeof(skh));
+
+       sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
+       skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
+       skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
+       if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
+                             &skw.skw_cipher, 1))
                return GSS_S_FAILURE;
 
-       /* Checksum covers the GSS header followed by the encrypted message */
-       msgbufs[0].len = gss_header->len;
-       msgbufs[0].data = gss_header->data;
-       msgbufs[1].len = cipher.len;
-       msgbufs[1].data = cipher.data;
-
-       LASSERT(cipher.len + sht->sht_bytes <= token->len);
-       checksum.data = token->data + cipher.len;
-       checksum.len = sht->sht_bytes;
-       if (sk_make_checksum(sht->sht_name, &skc->sc_shared_key, 2, msgbufs, 0,
-                            NULL, &checksum))
+       /* HMAC covers the SK header, GSS header, and ciphertext */
+       msgbufs[0] = skw.skw_header;
+       msgbufs[1] = *gss_header;
+       msgbufs[2] = skw.skw_cipher;
+
+       skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
+       skw.skw_hmac.len = sht_bytes;
+       if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key,
+                        3, msgbufs, 0, NULL, &skw.skw_hmac))
                return GSS_S_FAILURE;
 
-       token->len = cipher.len + checksum.len;
+       token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
 
        return GSS_S_COMPLETE;
 }
@@ -407,40 +544,48 @@ __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header,
                      rawobj_t *token, rawobj_t *message)
 {
        struct sk_ctx *skc = gss_context->internal_ctx_id;
-       struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
-       rawobj_t msgbufs[2];
-       rawobj_t cipher;
-       rawobj_t checksum;
+       size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
+       struct sk_wire skw;
+       struct sk_hdr *skh;
+       rawobj_t msgbufs[3];
+       __u8 local_iv[SK_IV_SIZE];
        unsigned int blocksize;
        int rc;
 
        LASSERT(skc->sc_session_kb.kb_tfm);
-       blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
 
-       if (token->len < sht->sht_bytes)
+       if (token->len < sizeof(skh) + sht_bytes)
                return GSS_S_DEFECTIVE_TOKEN;
 
-       cipher.data = token->data;
-       cipher.len = token->len - sht->sht_bytes;
-       checksum.data = token->data + cipher.len;
-       checksum.len = sht->sht_bytes;
+       skw.skw_header.data = token->data;
+       skw.skw_header.len = sizeof(struct sk_hdr);
+       skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
+       skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
+       skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
+       skw.skw_hmac.len = sht_bytes;
 
-       if (cipher.len % blocksize != 0)
+       blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
+       if (skw.skw_cipher.len % blocksize != 0)
                return GSS_S_DEFECTIVE_TOKEN;
 
-       /* Checksum covers the GSS header followed by the encrypted message */
-       msgbufs[0].len = gss_header->len;
-       msgbufs[0].data = gss_header->data;
-       msgbufs[1].len = cipher.len;
-       msgbufs[1].data = cipher.data;
-       rc = sk_verify_checksum(sht, &skc->sc_shared_key, 2, msgbufs, 0, NULL,
-                              &checksum);
+       skh = (struct sk_hdr *)skw.skw_header.data;
+       rc = sk_verify_header(skh);
+       if (rc != GSS_S_COMPLETE)
+               return rc;
+
+       /* HMAC covers the SK header, GSS header, and ciphertext */
+       msgbufs[0] = skw.skw_header;
+       msgbufs[1] = *gss_header;
+       msgbufs[2] = skw.skw_cipher;
+       rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs,
+                           0, NULL, &skw.skw_hmac);
        if (rc)
                return rc;
 
-       message->len = cipher.len;
-       if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, &cipher,
-                             message, 0))
+       sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
+       message->len = skw.skw_cipher.len;
+       if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv,
+                             1, &skw.skw_cipher, message, 0))
                return GSS_S_FAILURE;
 
        return GSS_S_COMPLETE;
@@ -473,14 +618,13 @@ __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context,
        return GSS_S_COMPLETE;
 }
 
-static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm,
-                            struct ptlrpc_bulk_desc *desc,
-                            rawobj_t *cipher,
+static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
+                            struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
                             int adj_nob)
 {
        struct blkcipher_desc cdesc = {
                .tfm = tfm,
-               .info = NULL,
+               .info = iv,
                .flags = 0,
        };
        struct scatterlist ptxt;
@@ -500,8 +644,7 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm,
                            sk_block_mask(BD_GET_KIOV(desc, i).kiov_len,
                                          blocksize),
                            BD_GET_KIOV(desc, i).kiov_offset);
-               if (adj_nob)
-                       nob += ptxt.length;
+               nob += ptxt.length;
 
                sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page,
                            ptxt.length, ptxt.offset);
@@ -509,8 +652,8 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm,
                BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset;
                BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length;
 
-               rc = crypto_blkcipher_encrypt(&cdesc, &ctxt, &ptxt,
-                                             ptxt.length);
+               rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
+                                                ptxt.length);
                if (rc) {
                        CERROR("failed to encrypt page: %d\n", rc);
                        return rc;
@@ -523,14 +666,13 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm,
        return 0;
 }
 
-static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm,
-                            struct ptlrpc_bulk_desc *desc,
-                            rawobj_t *cipher,
+static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv,
+                            struct ptlrpc_bulk_desc *desc, rawobj_t *cipher,
                             int adj_nob)
 {
        struct blkcipher_desc cdesc = {
                .tfm = tfm,
-               .info = NULL,
+               .info = iv,
                .flags = 0,
        };
        struct scatterlist ptxt;
@@ -551,12 +693,13 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm,
                return GSS_S_DEFECTIVE_TOKEN;
        }
 
-       for (i = 0; i < desc->bd_iov_count; i++) {
+       for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
+            i++) {
                lnet_kiov_t *piov = &BD_GET_KIOV(desc, i);
                lnet_kiov_t *ciov = &BD_GET_ENC_KIOV(desc, i);
 
-               if (piov->kiov_offset % blocksize != 0 ||
-                   piov->kiov_len % blocksize != 0) {
+               if (ciov->kiov_offset % blocksize != 0 ||
+                   ciov->kiov_len % blocksize != 0) {
                        CERROR("Invalid bulk descriptor vector\n");
                        return GSS_S_DEFECTIVE_TOKEN;
                }
@@ -599,8 +742,8 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm,
                if (piov->kiov_len % blocksize == 0)
                        sg_assign_page(&ptxt, piov->kiov_page);
 
-               rc = crypto_blkcipher_decrypt(&cdesc, &ptxt, &ctxt,
-                                             ctxt.length);
+               rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt,
+                                                ctxt.length);
                if (rc) {
                        CERROR("Decryption failed for page: %d\n", rc);
                        return GSS_S_FAILURE;
@@ -640,25 +783,36 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm,
 
 static
 __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context,
-                        struct ptlrpc_bulk_desc *desc, rawobj_t *token,
-                        int adj_nob)
+                      struct ptlrpc_bulk_desc *desc, rawobj_t *token,
+                      int adj_nob)
 {
        struct sk_ctx *skc = gss_context->internal_ctx_id;
-       struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
-       rawobj_t cipher = RAWOBJ_EMPTY;
-       rawobj_t checksum = RAWOBJ_EMPTY;
+       size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
+       struct sk_wire skw;
+       struct sk_hdr skh;
+       __u8 local_iv[SK_IV_SIZE];
 
-       cipher.data = token->data;
-       cipher.len = token->len - sht->sht_bytes;
+       LASSERT(skc->sc_session_kb.kb_tfm);
 
-       if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob))
+       memset(token->data, 0, token->len);
+       if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE)
                return GSS_S_FAILURE;
 
-       checksum.data = token->data + cipher.len;
-       checksum.len = sht->sht_bytes;
+       skw.skw_header.data = token->data;
+       skw.skw_header.len = sizeof(skh);
+       memcpy(skw.skw_header.data, &skh, sizeof(skh));
 
-       if (sk_make_checksum(sht->sht_name, &skc->sc_shared_key, 1, &cipher, 0,
-                            NULL, &checksum))
+       sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
+       skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
+       skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
+       if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
+                           desc, &skw.skw_cipher, adj_nob))
+               return GSS_S_FAILURE;
+
+       skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
+       skw.skw_hmac.len = sht_bytes;
+       if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
+                        desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac))
                return GSS_S_FAILURE;
 
        return GSS_S_COMPLETE;
@@ -670,23 +824,39 @@ __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context,
                           rawobj_t *token, int adj_nob)
 {
        struct sk_ctx *skc = gss_context->internal_ctx_id;
-       struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
-       rawobj_t cipher = RAWOBJ_EMPTY;
-       rawobj_t checksum = RAWOBJ_EMPTY;
+       size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
+       struct sk_wire skw;
+       struct sk_hdr *skh;
+       __u8 local_iv[SK_IV_SIZE];
        int rc;
 
-       cipher.data = token->data;
-       cipher.len = token->len - sht->sht_bytes;
-       checksum.data = token->data + cipher.len;
-       checksum.len = sht->sht_bytes;
+       LASSERT(skc->sc_session_kb.kb_tfm);
+
+       if (token->len < sizeof(skh) + sht_bytes)
+               return GSS_S_DEFECTIVE_TOKEN;
+
+       skw.skw_header.data = token->data;
+       skw.skw_header.len = sizeof(struct sk_hdr);
+       skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
+       skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
+       skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
+       skw.skw_hmac.len = sht_bytes;
 
-       rc = sk_verify_checksum(&sk_hmac_types[skc->sc_hmac],
-                               &skc->sc_shared_key, 1, &cipher, 0, NULL,
-                               &checksum);
+       skh = (struct sk_hdr *)skw.skw_header.data;
+       rc = sk_verify_header(skh);
+       if (rc != GSS_S_COMPLETE)
+               return rc;
+
+       rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
+                                &skw.skw_cipher, desc->bd_iov_count,
+                                GET_ENC_KIOV(desc), desc->bd_nob,
+                                &skw.skw_hmac);
        if (rc)
                return rc;
 
-       rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob);
+       sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv);
+       rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
+                            desc, &skw.skw_cipher, adj_nob);
        if (rc)
                return rc;
 
@@ -697,8 +867,7 @@ static
 void gss_delete_sec_context_sk(void *internal_context)
 {
        struct sk_ctx *sk_context = internal_context;
-       delete_sk_context(sk_context);
-       OBD_FREE_PTR(sk_context);
+       sk_delete_context(sk_context);
 }
 
 int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize)
@@ -723,6 +892,18 @@ static struct gss_api_ops gss_sk_ops = {
 
 static struct subflavor_desc gss_sk_sfs[] = {
        {
+               .sf_subflavor   = SPTLRPC_SUBFLVR_SKN,
+               .sf_qop         = 0,
+               .sf_service     = SPTLRPC_SVC_NULL,
+               .sf_name        = "skn"
+       },
+       {
+               .sf_subflavor   = SPTLRPC_SUBFLVR_SKA,
+               .sf_qop         = 0,
+               .sf_service     = SPTLRPC_SVC_AUTH,
+               .sf_name        = "ska"
+       },
+       {
                .sf_subflavor   = SPTLRPC_SUBFLVR_SKI,
                .sf_qop         = 0,
                .sf_service     = SPTLRPC_SVC_INTG,
@@ -736,18 +917,15 @@ static struct subflavor_desc gss_sk_sfs[] = {
        },
 };
 
-/*
- * currently we leave module owner NULL
- */
 static struct gss_api_mech gss_sk_mech = {
-       .gm_owner       = NULL, /*THIS_MODULE, */
+       /* .gm_owner uses default NULL value for THIS_MODULE */
        .gm_name        = "sk",
        .gm_oid         = (rawobj_t) {
-               12,
-               "\053\006\001\004\001\311\146\215\126\001\000\001",
+               .len = 12,
+               .data = "\053\006\001\004\001\311\146\215\126\001\000\001",
        },
        .gm_ops         = &gss_sk_ops,
-       .gm_sf_num      = 2,
+       .gm_sf_num      = 4,
        .gm_sfs         = gss_sk_sfs,
 };