Whamcloud - gitweb
branch: HEAD
authorericm <ericm>
Wed, 15 Jul 2009 22:31:18 +0000 (22:31 +0000)
committerericm <ericm>
Wed, 15 Jul 2009 22:31:18 +0000 (22:31 +0000)
fix incorrect adjustment of data size in privacy mode; a little cleanup
of ctx waking up.
b=20022
r=fanyong
r=wangdi

lustre/ptlrpc/gss/gss_api.h
lustre/ptlrpc/gss/gss_bulk.c
lustre/ptlrpc/gss/gss_keyring.c
lustre/ptlrpc/gss/gss_krb5_mech.c
lustre/ptlrpc/gss/gss_mech_switch.c
lustre/ptlrpc/gss/sec_gss.c
lustre/ptlrpc/sec.c
lustre/ptlrpc/sec_bulk.c

index 3b20c99..1bc9e10 100644 (file)
@@ -83,7 +83,8 @@ __u32 lgss_wrap_bulk(
 __u32 lgss_unwrap_bulk(
                 struct gss_ctx          *gctx,
                 struct ptlrpc_bulk_desc *desc,
-                rawobj_t                *token);
+                rawobj_t                *token,
+                int                      adj_nob);
 __u32 lgss_delete_sec_context(
                 struct gss_ctx         **ctx);
 int lgss_display(
@@ -157,7 +158,8 @@ struct gss_api_ops {
         __u32 (*gss_unwrap_bulk)(
                         struct gss_ctx         *gctx,
                         struct ptlrpc_bulk_desc *desc,
-                        rawobj_t               *token);
+                        rawobj_t               *token,
+                        int                     adj_nob);
         void (*gss_delete_sec_context)(
                         void                   *ctx);
         int  (*gss_display)(
index f8723f5..933d8b3 100644 (file)
@@ -291,7 +291,8 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
                         token.len = lustre_msg_buflen(vmsg, voff) -
                                     sizeof(*bsdr);
 
-                        maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc, &token);
+                        maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
+                                               &token, 1);
                         if (maj != GSS_S_COMPLETE) {
                                 CERROR("failed to decrypt bulk read: %x\n",
                                        maj);
@@ -433,7 +434,7 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
                 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
 
                 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
-                                       desc, &token);
+                                       desc, &token, 0);
                 if (maj != GSS_S_COMPLETE) {
                         bsdv->bsd_flags |= BSD_FL_ERR;
                         CERROR("failed decrypt bulk data: %x\n", maj);
index 6913e68..a514e2a 100644 (file)
@@ -155,7 +155,6 @@ static void ctx_upcall_timeout_kr(unsigned long data)
 
         cli_ctx_expire(ctx);
         key_revoke_locked(key);
-        sptlrpc_cli_ctx_wakeup(ctx);
 }
 
 static
@@ -1289,7 +1288,6 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen)
         /* don't proceed if already refreshed */
         if (cli_ctx_is_refreshed(ctx)) {
                 CWARN("ctx already done refresh\n");
-                sptlrpc_cli_ctx_wakeup(ctx);
                 RETURN(0);
         }
 
@@ -1363,8 +1361,6 @@ out:
                         set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
         }
 
-        sptlrpc_cli_ctx_wakeup(ctx);
-
         /* let user space think it's a success */
         sptlrpc_cli_ctx_put(ctx, 1);
         RETURN(0);
index 7eb0c95..5bd9f08 100644 (file)
@@ -984,6 +984,9 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
         RETURN(0);
 }
 
+/*
+ * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
+ */
 static
 int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
                       struct krb5_header *khdr,
@@ -1063,13 +1066,26 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
 /*
  * desc->bd_nob_transferred is the size of cipher text received.
  * desc->bd_nob is the target size of plain text supposed to be.
+ *
+ * if adj_nob != 0, we adjust each page's kiov_len to the actual
+ * plain text size.
+ * - for client read: we don't know data size for each page, so
+ *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
+ *   be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
+ *   this means we DO NOT support the situation that server send an odd size
+ *   data in a page which is not the last one.
+ * - for server write: we knows exactly data size for each page being expected,
+ *   thus kiov_len is accurate already, so we should not adjust it at all.
+ *   and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
+ *   should have been done by prep_bulk().
  */
 static
 int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
                       struct krb5_header *khdr,
                       struct ptlrpc_bulk_desc *desc,
                       rawobj_t *cipher,
-                      rawobj_t *plain)
+                      rawobj_t *plain,
+                      int adj_nob)
 {
         struct blkcipher_desc   ciph_desc;
         __u8                    local_iv[16] = {0};
@@ -1104,35 +1120,42 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
                 return rc;
         }
 
-        /*
-         * decrypt clear pages. note the enc_iov is prepared by prep_bulk()
-         * which already done some sanity checkings.
-         *
-         * desc->bd_nob is the actual plain text size supposed to be
-         * transferred. desc->bd_nob_transferred is the actual cipher
-         * text received.
-         */
         for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
              i++) {
-                if (desc->bd_enc_iov[i].kiov_len == 0)
-                        continue;
+                if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
+                    desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
+                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
+                               i, desc->bd_enc_iov[i].kiov_offset,
+                               desc->bd_enc_iov[i].kiov_len, blocksize);
+                        return -EFAULT;
+                }
 
-                if (ct_nob + desc->bd_enc_iov[i].kiov_len >
-                    desc->bd_nob_transferred)
-                        desc->bd_enc_iov[i].kiov_len =
-                                desc->bd_nob_transferred - ct_nob;
+                if (adj_nob) {
+                        if (ct_nob + desc->bd_enc_iov[i].kiov_len >
+                            desc->bd_nob_transferred)
+                                desc->bd_enc_iov[i].kiov_len =
+                                        desc->bd_nob_transferred - ct_nob;
 
-                desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
-                if (pt_nob + desc->bd_enc_iov[i].kiov_len > desc->bd_nob)
-                        desc->bd_iov[i].kiov_len = desc->bd_nob - pt_nob;
+                        desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
+                        if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
+                                desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
+                } else {
+                        /* this should be guaranteed by LNET */
+                        LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
+                                desc->bd_nob_transferred);
+                        LASSERT(desc->bd_iov[i].kiov_len <=
+                                desc->bd_enc_iov[i].kiov_len);
+                }
+
+                if (desc->bd_enc_iov[i].kiov_len == 0)
+                        continue;
 
                 src.page = desc->bd_enc_iov[i].kiov_page;
                 src.offset = desc->bd_enc_iov[i].kiov_offset;
                 src.length = desc->bd_enc_iov[i].kiov_len;
 
                 dst = src;
-
-                if (desc->bd_iov[i].kiov_offset % blocksize == 0)
+                if (desc->bd_iov[i].kiov_len % blocksize == 0)
                         dst.page = desc->bd_iov[i].kiov_page;
 
                 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
@@ -1142,7 +1165,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
                         return rc;
                 }
 
-                if (desc->bd_iov[i].kiov_offset % blocksize) {
+                if (desc->bd_iov[i].kiov_len % blocksize != 0) {
                         memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
                                desc->bd_iov[i].kiov_offset,
                                cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
@@ -1154,6 +1177,23 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
                 pt_nob += desc->bd_iov[i].kiov_len;
         }
 
+        if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+                CERROR("%d cipher text transferred but only %d decrypted\n",
+                       desc->bd_nob_transferred, ct_nob);
+                return -EFAULT;
+        }
+
+        if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+                CERROR("%d plain text expected but only %d received\n",
+                       desc->bd_nob, pt_nob);
+                return -EFAULT;
+        }
+
+        /* if needed, clear up the rest unused iovs */
+        if (adj_nob)
+                while (i < desc->bd_iov_count)
+                        desc->bd_iov[i++].kiov_len = 0;
+
         /* decrypt tail (krb5 header) */
         buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
         buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
@@ -1629,7 +1669,7 @@ out_free:
 static
 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
                                struct ptlrpc_bulk_desc *desc,
-                               rawobj_t *token)
+                               rawobj_t *token, int adj_nob)
 {
         struct krb5_ctx     *kctx = gctx->internal_ctx_id;
         struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
@@ -1685,7 +1725,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
         plain.len = cipher.len;
 
         rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
-                               desc, &cipher, &plain);
+                               desc, &cipher, &plain, adj_nob);
         if (rc)
                 return GSS_S_DEFECTIVE_TOKEN;
 
index ca55fe8..f77b509 100644 (file)
@@ -312,7 +312,8 @@ __u32 lgss_wrap_bulk(struct gss_ctx *context_handle,
 
 __u32 lgss_unwrap_bulk(struct gss_ctx *context_handle,
                        struct ptlrpc_bulk_desc *desc,
-                       rawobj_t *token)
+                       rawobj_t *token,
+                       int adj_nob)
 {
         LASSERT(context_handle);
         LASSERT(context_handle->mech_type);
@@ -320,7 +321,7 @@ __u32 lgss_unwrap_bulk(struct gss_ctx *context_handle,
         LASSERT(context_handle->mech_type->gm_ops->gss_unwrap_bulk);
 
         return context_handle->mech_type->gm_ops
-                ->gss_unwrap_bulk(context_handle, desc, token);
+                ->gss_unwrap_bulk(context_handle, desc, token, adj_nob);
 }
 
 /* gss_delete_sec_context: free all resources associated with context_handle.
index 35dd68c..1556e58 100644 (file)
@@ -339,6 +339,7 @@ int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
                       ctx->cc_expire == 0 ? 0 :
                       cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
 
+                sptlrpc_cli_ctx_wakeup(ctx);
                 return 1;
         }
 
@@ -403,6 +404,8 @@ void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
                         gss_sec_install_rctx(ctx->cc_sec->ps_import,
                                              ctx->cc_sec, ctx);
         }
+
+        sptlrpc_cli_ctx_wakeup(ctx);
 }
 
 static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
@@ -1447,13 +1450,12 @@ void gss_free_reqbuf(struct ptlrpc_sec *sec,
         LASSERT(privacy);
         LASSERT(req->rq_clrbuf_len);
 
-        if (req->rq_pool &&
-            req->rq_clrbuf >= req->rq_reqbuf &&
-            (char *) req->rq_clrbuf <
+        if (req->rq_pool == NULL ||
+            req->rq_clrbuf < req->rq_reqbuf ||
+            (char *) req->rq_clrbuf >=
             (char *) req->rq_reqbuf + req->rq_reqbuf_len)
-                goto release_reqbuf;
+                OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
 
-        OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
         req->rq_clrbuf = NULL;
         req->rq_clrbuf_len = 0;
 
index beaf09d..bcb4ea6 100644 (file)
@@ -2221,9 +2221,14 @@ int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
 
         LASSERT(req->rq_bulk_write);
 
-        if (desc->bd_nob_transferred != desc->bd_nob &&
-            SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
-            SPTLRPC_BULK_SVC_PRIV) {
+        /*
+         * if it's in privacy mode, transferred should >= expected; otherwise
+         * transferred should == expected.
+         */
+        if (desc->bd_nob_transferred < desc->bd_nob ||
+            (desc->bd_nob_transferred > desc->bd_nob &&
+             SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
+             SPTLRPC_BULK_SVC_PRIV)) {
                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
                           desc->bd_nob_transferred, desc->bd_nob);
                 return -ETIMEDOUT;
index c09cf0c..9656d39 100644 (file)
@@ -206,7 +206,7 @@ static void enc_pools_release_free_pages(long npages)
         page_pools.epp_total_pages -= npages;
 
         /* max pool index after the release */
-        p_idx_max1 = page_pools.epp_total_pages == 0 ? 0 :
+        p_idx_max1 = page_pools.epp_total_pages == 0 ? -1 :
                      ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL);
 
         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;