From: Eric Mei Date: Tue, 7 Sep 2010 13:40:53 +0000 (+0400) Subject: b=23728 Doxygen style comment - sptlrpc api. X-Git-Tag: 2.0.52.0~22 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=1f0e2723f37eb86f3a88807d1200e78c10ab9565;hp=-c b=23728 Doxygen style comment - sptlrpc api. --- 1f0e2723f37eb86f3a88807d1200e78c10ab9565 diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index aebfb8b..8df763a 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -558,11 +558,11 @@ struct ptlrpc_request { /** * security and encryption data * @{ */ - struct ptlrpc_cli_ctx *rq_cli_ctx; /* client's half ctx */ - struct ptlrpc_svc_ctx *rq_svc_ctx; /* server's half ctx */ - cfs_list_t rq_ctx_chain; /* link to waited ctx */ + struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ + struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ + cfs_list_t rq_ctx_chain; /**< link to waited ctx */ - struct sptlrpc_flavor rq_flvr; /* client & server */ + struct sptlrpc_flavor rq_flvr; /**< for client & server */ enum lustre_sec_part rq_sp_from; unsigned long /* client/server security flags */ @@ -588,8 +588,6 @@ struct ptlrpc_request { /* (server side), pointed directly into req buffer */ struct ptlrpc_user_desc *rq_user_desc; - /** @} */ - /** early replies go to offset 0, regular replies go after that */ unsigned int rq_reply_off; @@ -605,6 +603,8 @@ struct ptlrpc_request { int rq_clrbuf_len; /* only in priv mode */ int rq_clrdata_len; /* only in priv mode */ + /** @} */ + /** Fields that help to see if request and reply were swabbed or not */ __u32 rq_req_swab_mask; __u32 rq_rep_swab_mask; diff --git a/lustre/include/lustre_sec.h b/lustre/include/lustre_sec.h index 7158d6a..6a0da67 100644 --- a/lustre/include/lustre_sec.h +++ b/lustre/include/lustre_sec.h @@ -37,7 +37,7 @@ #ifndef _LUSTRE_SEC_H_ #define _LUSTRE_SEC_H_ -/** \defgroup sec sec +/** \defgroup sptlrpc sptlrpc * * @{ */ @@ -45,13 +45,14 @@ /* * to avoid include */ -struct key; struct obd_import; struct obd_export; struct ptlrpc_request; struct ptlrpc_reply_state; struct ptlrpc_bulk_desc; struct brw_page; +/* Linux specific */ +struct key; struct seq_file; /* @@ -65,6 +66,20 @@ struct ptlrpc_svc_ctx; struct ptlrpc_cli_ctx; struct ptlrpc_ctx_ops; +/** + * \addtogroup flavor flavor + * + * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits + * are unused, must be set to 0 for future expansion. + *
+ * ------------------------------------------------------------------------
+ * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech)  | 4b (policy) |
+ * ------------------------------------------------------------------------
+ * 
+ * + * @{ + */ + /* * flavor constants */ @@ -92,32 +107,29 @@ enum sptlrpc_mech_gss { }; enum sptlrpc_service_type { - SPTLRPC_SVC_NULL = 0, /* no security */ - SPTLRPC_SVC_AUTH = 1, /* auth only */ - SPTLRPC_SVC_INTG = 2, /* integrity */ - SPTLRPC_SVC_PRIV = 3, /* privacy */ + SPTLRPC_SVC_NULL = 0, /**< no security */ + SPTLRPC_SVC_AUTH = 1, /**< authentication only */ + SPTLRPC_SVC_INTG = 2, /**< integrity */ + SPTLRPC_SVC_PRIV = 3, /**< privacy */ SPTLRPC_SVC_MAX, }; enum sptlrpc_bulk_type { - SPTLRPC_BULK_DEFAULT = 0, /* follow rpc flavor */ - SPTLRPC_BULK_HASH = 1, /* hash integrity */ + SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */ + SPTLRPC_BULK_HASH = 1, /**< hash integrity */ SPTLRPC_BULK_MAX, }; enum sptlrpc_bulk_service { - SPTLRPC_BULK_SVC_NULL = 0, - SPTLRPC_BULK_SVC_AUTH = 1, - SPTLRPC_BULK_SVC_INTG = 2, - SPTLRPC_BULK_SVC_PRIV = 3, + SPTLRPC_BULK_SVC_NULL = 0, /**< no security */ + SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */ + SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */ + SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */ SPTLRPC_BULK_SVC_MAX, }; /* - * rpc flavor compose/extract, represented as 32 bits. currently the - * high 12 bits are unused, must be set as 0. - * - * 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) + * compose/extract macros */ #define FLVR_POLICY_OFFSET (0) #define FLVR_MECH_OFFSET (4) @@ -212,11 +224,13 @@ enum sptlrpc_bulk_service { #define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF) #define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000) -/* +/** * extract the useful part from wire flavor */ #define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF) +/** @} flavor */ + static inline void flvr_set_svc(__u32 *flvr, __u32 svc) { LASSERT(svc < SPTLRPC_SVC_MAX); @@ -241,16 +255,26 @@ struct bulk_spec_hash { __u8 hash_alg; }; +/** + * Full description of flavors being used on a ptlrpc connection, include + * both regular RPC and bulk transfer parts. + */ struct sptlrpc_flavor { - __u32 sf_rpc; /* wire flavor - should be renamed to sf_wire */ - __u32 sf_flags; /* general flags */ - /* + /** + * wire flavor, should be renamed to sf_wire. + */ + __u32 sf_rpc; + /** + * general flags of PTLRPC_SEC_FL_* + */ + __u32 sf_flags; + /** * rpc flavor specification */ union { /* nothing for now */ } u_rpc; - /* + /** * bulk flavor specification */ union { @@ -258,6 +282,10 @@ struct sptlrpc_flavor { } u_bulk; }; +/** + * identify the RPC is generated from what part of Lustre. It's encoded into + * RPC requests and to be checked by ptlrpc service. + */ enum lustre_sec_part { LUSTRE_SP_CLI = 0, LUSTRE_SP_MDT, @@ -270,6 +298,10 @@ enum lustre_sec_part { const char *sptlrpc_part2name(enum lustre_sec_part sp); enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd); +/** + * A rule specifies a flavor to be used by a ptlrpc connection between + * two Lustre parts. + */ struct sptlrpc_rule { __u32 sr_netid; /* LNET network ID */ __u8 sr_from; /* sec_part */ @@ -278,6 +310,12 @@ struct sptlrpc_rule { struct sptlrpc_flavor sr_flvr; }; +/** + * A set of rules in memory. + * + * Rules are generated and stored on MGS, and propagated to MDT, OST, + * and client when needed. + */ struct sptlrpc_rule_set { int srs_nslot; int srs_nrule; @@ -330,31 +368,119 @@ struct vfs_cred { }; struct ptlrpc_ctx_ops { + /** + * To determine whether it's suitable to use the \a ctx for \a vcred. + */ int (*match) (struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred); + + /** + * To bring the \a ctx uptodate. + */ int (*refresh) (struct ptlrpc_cli_ctx *ctx); + + /** + * Validate the \a ctx. + */ int (*validate) (struct ptlrpc_cli_ctx *ctx); + + /** + * Force the \a ctx to die. + */ void (*die) (struct ptlrpc_cli_ctx *ctx, int grace); int (*display) (struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize); - /* - * rpc data transform + + /** + * Sign the request message using \a ctx. + * + * \pre req->rq_reqmsg point to request message. + * \pre req->rq_reqlen is the request message length. + * \post req->rq_reqbuf point to request message with signature. + * \post req->rq_reqdata_len is set to the final request message size. + * + * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign(). */ int (*sign) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); + + /** + * Verify the reply message using \a ctx. + * + * \pre req->rq_repdata point to reply message with signature. + * \pre req->rq_repdata_len is the total reply message length. + * \post req->rq_repmsg point to reply message without signature. + * \post req->rq_replen is the reply message length. + * + * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify(). + */ int (*verify) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); + + /** + * Encrypt the request message using \a ctx. + * + * \pre req->rq_reqmsg point to request message in clear text. + * \pre req->rq_reqlen is the request message length. + * \post req->rq_reqbuf point to request message. + * \post req->rq_reqdata_len is set to the final request message size. + * + * \see gss_cli_ctx_seal(). + */ int (*seal) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); + + /** + * Decrypt the reply message using \a ctx. + * + * \pre req->rq_repdata point to encrypted reply message. + * \pre req->rq_repdata_len is the total cipher text length. + * \post req->rq_repmsg point to reply message in clear text. + * \post req->rq_replen is the reply message length in clear text. + * + * \see gss_cli_ctx_unseal(). + */ int (*unseal) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); - /* - * bulk transform + + /** + * Wrap bulk request data. This is called before wrapping RPC + * request message. + * + * \pre bulk buffer is descripted by desc->bd_iov and + * desc->bd_iov_count. note for read it's just buffer, no data + * need to be sent; for write it contains data in clear text. + * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared + * (usually inside of RPC request message). + * - encryption: cipher text bulk buffer is descripted by + * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov + * count remains the same). + * - otherwise: bulk buffer is still desc->bd_iov and + * desc->bd_iov_count. + * + * \return 0: success. + * \return -ev: error code. + * + * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk(). */ int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); + + /** + * Unwrap bulk reply data. This is called after wrapping RPC + * reply message. + * + * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and + * desc->bd_iov_count, according to wrap_bulk(). + * \post final bulk data in clear text is placed in buffer described + * by desc->bd_iov and desc->bd_iov_count. + * \return +ve nob of actual bulk data in clear text. + * \return -ve error code. + * + * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk(). + */ int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); @@ -393,57 +519,165 @@ struct ptlrpc_cli_ctx { cfs_list_t cc_gc_chain; /* linked to gc chain */ }; +/** + * client side policy operation vector. + */ struct ptlrpc_sec_cops { - /* - * ptlrpc_sec constructor/destructor + /** + * Given an \a imp, create and initialize a ptlrpc_sec structure. + * \param ctx service context: + * - regular import: \a ctx should be NULL; + * - reverse import: \a ctx is obtained from incoming request. + * \param flavor specify what flavor to use. + * + * When necessary, policy module is responsible for taking reference + * on the import. + * + * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr(). */ struct ptlrpc_sec * (*create_sec) (struct obd_import *imp, struct ptlrpc_svc_ctx *ctx, struct sptlrpc_flavor *flavor); + + /** + * Destructor of ptlrpc_sec. When called, refcount has been dropped + * to 0 and all contexts has been destroyed. + * + * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr(). + */ void (*destroy_sec) (struct ptlrpc_sec *sec); - /* - * notify to-be-dead + /** + * Notify that this ptlrpc_sec is going to die. Optionally, policy + * module is supposed to set sec->ps_dying and whatever necessary + * actions. + * + * \see plain_kill_sec(), gss_sec_kill(). */ void (*kill_sec) (struct ptlrpc_sec *sec); - /* - * context + /** + * Given \a vcred, lookup and/or create its context. The policy module + * is supposed to maintain its own context cache. + * XXX currently \a create and \a remove_dead is always 1, perhaps + * should be removed completely. + * + * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr(). */ struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec, struct vfs_cred *vcred, int create, int remove_dead); + + /** + * Called then the reference of \a ctx dropped to 0. The policy module + * is supposed to destroy this context or whatever else according to + * its cache maintainance mechamism. + * + * \param sync if zero, we shouldn't wait for the context being + * destroyed completely. + * + * \see plain_release_ctx(), gss_sec_release_ctx_kr(). + */ void (*release_ctx) (struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx, int sync); + + /** + * Flush the context cache. + * + * \param uid context of which user, -1 means all contexts. + * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected + * contexts should be cleared immediately. + * \param force if zero, only idle contexts will be flushed. + * + * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr(). + */ int (*flush_ctx_cache) (struct ptlrpc_sec *sec, uid_t uid, int grace, int force); + + /** + * Called periodically by garbage collector to remove dead contexts + * from cache. + * + * \see gss_sec_gc_ctx_kr(). + */ void (*gc_ctx) (struct ptlrpc_sec *sec); - /* - * reverse context + /** + * Given an context \a ctx, install a corresponding reverse service + * context on client side. + * XXX currently it's only used by GSS module, maybe we should remove + * this from general API. */ int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx); - /* - * request/reply buffer manipulation + /** + * To allocate request buffer for \a req. + * + * \pre req->rq_reqmsg == NULL. + * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated, + * we are not supposed to free it. + * \post if success, req->rq_reqmsg point to a buffer with size + * at least \a lustre_msg_size. + * + * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf(). */ int (*alloc_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int lustre_msg_size); + + /** + * To free request buffer for \a req. + * + * \pre req->rq_reqbuf != NULL. + * + * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf(). + */ void (*free_reqbuf) (struct ptlrpc_sec *sec, struct ptlrpc_request *req); + + /** + * To allocate reply buffer for \a req. + * + * \pre req->rq_repbuf == NULL. + * \post if success, req->rq_repbuf point to a buffer with size + * req->rq_repbuf_len, the size should be large enough to receive + * reply which be transformed from \a lustre_msg_size of clear text. + * + * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf(). + */ int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int lustre_msg_size); + + /** + * To free reply buffer for \a req. + * + * \pre req->rq_repbuf != NULL. + * \post req->rq_repbuf == NULL. + * \post req->rq_repbuf_len == 0. + * + * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf(). + */ void (*free_repbuf) (struct ptlrpc_sec *sec, struct ptlrpc_request *req); + + /** + * To expand the request buffer of \a req, thus the \a segment in + * the request message pointed by req->rq_reqmsg can accommodate + * at least \a newsize of data. + * + * \pre req->rq_reqmsg->lm_buflens[segment] < newsize. + * + * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(), + * gss_enlarge_reqbuf(). + */ int (*enlarge_reqbuf) (struct ptlrpc_sec *sec, struct ptlrpc_request *req, @@ -455,24 +689,108 @@ struct ptlrpc_sec_cops { struct seq_file *seq); }; +/** + * server side policy operation vector. + */ struct ptlrpc_sec_sops { + /** + * verify an incoming request. + * + * \pre request message is pointed by req->rq_reqbuf, size is + * req->rq_reqdata_len; and the message has been unpacked to + * host byte order. + * + * \retval SECSVC_OK success, req->rq_reqmsg point to request message + * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set; + * req->rq_sp_from is decoded from request. + * \retval SECSVC_COMPLETE success, the request has been fully + * processed, and reply message has been prepared; req->rq_sp_from is + * decoded from request. + * \retval SECSVC_DROP failed, this request should be dropped. + * + * \see null_accept(), plain_accept(), gss_svc_accept_kr(). + */ int (*accept) (struct ptlrpc_request *req); + + /** + * Perform security transformation upon reply message. + * + * \pre reply message is pointed by req->rq_reply_state->rs_msg, size + * is req->rq_replen. + * \post req->rs_repdata_len is the final message size. + * \post req->rq_reply_off is set. + * + * \see null_authorize(), plain_authorize(), gss_svc_authorize(). + */ int (*authorize) (struct ptlrpc_request *req); + + /** + * Invalidate server context \a ctx. + * + * \see gss_svc_invalidate_ctx(). + */ void (*invalidate_ctx) (struct ptlrpc_svc_ctx *ctx); - /* buffer manipulation */ + + /** + * Allocate a ptlrpc_reply_state. + * + * \param msgsize size of the reply message in clear text. + * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we + * should simply use it; otherwise we'll responsible for allocating + * a new one. + * \post req->rq_reply_state != NULL; + * \post req->rq_reply_state->rs_msg != NULL; + * + * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs(). + */ int (*alloc_rs) (struct ptlrpc_request *req, int msgsize); + + /** + * Free a ptlrpc_reply_state. + */ void (*free_rs) (struct ptlrpc_reply_state *rs); + + /** + * Release the server context \a ctx. + * + * \see gss_svc_free_ctx(). + */ void (*free_ctx) (struct ptlrpc_svc_ctx *ctx); - /* reverse context */ + + /** + * Install a reverse context based on the server context \a ctx. + * + * \see gss_svc_install_rctx_kr(). + */ int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx); - /* bulk transform */ + + /** + * Prepare buffer for incoming bulk write. + * + * \pre desc->bd_iov and desc->bd_iov_count describes the buffer + * intended to receive the write. + * + * \see gss_svc_prep_bulk(). + */ int (*prep_bulk) (struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); + + /** + * Unwrap the bulk write data. + * + * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk(). + */ int (*unwrap_bulk) (struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); + + /** + * Wrap the bulk read data. + * + * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk(). + */ int (*wrap_bulk) (struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); }; @@ -491,16 +809,28 @@ struct ptlrpc_sec_policy { #define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */ #define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */ +/** + * The ptlrpc_sec represents the client side ptlrpc security facilities, + * each obd_import (both regular and reverse import) must associate with + * a ptlrpc_sec. + * + * \see sptlrpc_import_sec_adapt(). + */ struct ptlrpc_sec { struct ptlrpc_sec_policy *ps_policy; cfs_atomic_t ps_refcount; - cfs_atomic_t ps_nctx; /* statistic only */ - int ps_id; /* unique identifier */ - struct sptlrpc_flavor ps_flvr; /* flavor */ + /** statistic only */ + cfs_atomic_t ps_nctx; + /** unique identifier */ + int ps_id; + struct sptlrpc_flavor ps_flvr; enum lustre_sec_part ps_part; + /** after set, no more new context will be created */ unsigned int ps_dying:1; - struct obd_import *ps_import; /* owning import */ - cfs_spinlock_t ps_lock; /* protect ccache */ + /** owning import */ + struct obd_import *ps_import; + cfs_spinlock_t ps_lock; + /* * garbage collection */ @@ -608,7 +938,7 @@ void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg, int segment, int newsize); /* - * security type + * security policies */ int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy); int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy); @@ -823,6 +1153,6 @@ enum { LUSTRE_SEC_ALL = 3 }; -/** @} sec */ +/** @} sptlrpc */ #endif /* _LUSTRE_SEC_H_ */ diff --git a/lustre/ptlrpc/gss/gss_keyring.c b/lustre/ptlrpc/gss/gss_keyring.c index 1aa2f6f..4cd78d7 100644 --- a/lustre/ptlrpc/gss/gss_keyring.c +++ b/lustre/ptlrpc/gss/gss_keyring.c @@ -925,8 +925,7 @@ void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, static int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec, - uid_t uid, - int grace, int force) + uid_t uid, int grace, int force) { ENTRY; diff --git a/lustre/ptlrpc/gss/sec_gss.c b/lustre/ptlrpc/gss/sec_gss.c index be2322c..2a32e3c 100644 --- a/lustre/ptlrpc/gss/sec_gss.c +++ b/lustre/ptlrpc/gss/sec_gss.c @@ -1478,8 +1478,6 @@ release_reqbuf: req->rq_reqbuf_len = 0; } - req->rq_reqmsg = NULL; - EXIT; } @@ -1612,8 +1610,6 @@ void gss_free_repbuf(struct ptlrpc_sec *sec, req->rq_repbuf_len = 0; req->rq_repdata = NULL; req->rq_repdata_len = 0; - - req->rq_repmsg = NULL; } static int get_enlarged_msgsize(struct lustre_msg *msg, diff --git a/lustre/ptlrpc/sec.c b/lustre/ptlrpc/sec.c index 86c5a99..bf7db85 100644 --- a/lustre/ptlrpc/sec.c +++ b/lustre/ptlrpc/sec.c @@ -307,9 +307,10 @@ void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync) } EXPORT_SYMBOL(sptlrpc_cli_ctx_put); -/* - * expire the context immediately. - * the caller must hold at least 1 ref on the ctx. +/** + * Expire the client context immediately. + * + * \pre Caller must hold at least 1 reference on the \a ctx. */ void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx) { @@ -318,6 +319,10 @@ void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx) } EXPORT_SYMBOL(sptlrpc_cli_ctx_expire); +/** + * To wake up the threads who are waiting for this client context. Called + * after some status change happened on \a ctx. + */ void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx) { struct ptlrpc_request *req, *next; @@ -388,6 +393,13 @@ static int import_sec_validate_get(struct obd_import *imp, return 0; } +/** + * Given a \a req, find or allocate a appropriate context for it. + * \pre req->rq_cli_ctx == NULL. + * + * \retval 0 succeed, and req->rq_cli_ctx is set. + * \retval -ev error number, and req->rq_cli_ctx == NULL. + */ int sptlrpc_req_get_ctx(struct ptlrpc_request *req) { struct obd_import *imp = req->rq_import; @@ -414,9 +426,14 @@ int sptlrpc_req_get_ctx(struct ptlrpc_request *req) RETURN(0); } -/* - * if @sync == 0, this function should return quickly without sleep; - * otherwise might trigger ctx destroying rpc to server. +/** + * Drop the context for \a req. + * \pre req->rq_cli_ctx != NULL. + * \post req->rq_cli_ctx == NULL. + * + * If \a sync == 0, this function should return quickly without sleep; + * otherwise it might trigger and wait for the whole process of sending + * an context-destroying rpc to server. */ void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync) { @@ -497,12 +514,12 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req, } /** - * if current context has died, or if we resend after flavor switched, - * call this func to switch context. if no switch is needed, request - * will end up with the same context. + * If current context of \a req is dead somehow, e.g. we just switched flavor + * thus marked original contexts dead, we'll find a new context for it. if + * no switch is needed, \a req will end up with the same context. * - * request must have a context. in any case of failure, restore the - * restore the old one - a request must have a context. + * \note a request must have a context, to keep other parts of code happy. + * In any case of failure during the switching, we must restore the old one. */ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req) { @@ -607,17 +624,19 @@ void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx) cfs_spin_unlock(&ctx->cc_lock); } -/* - * the status of context could be subject to be changed by other threads at any - * time. we allow this race. but once we return with 0, the caller will - * suppose it's uptodated and keep using it until the owning rpc is done. +/** + * To refresh the context of \req, if it's not up-to-date. + * \param timeout + * - < 0: don't wait + * - = 0: wait until success or fatal error occur + * - > 0: timeout value (in seconds) * - * @timeout: - * < 0 - don't wait - * = 0 - wait until success or fatal error occur - * > 0 - timeout value + * The status of the context could be subject to be changed by other threads + * at any time. We allow this race, but once we return with 0, the caller will + * suppose it's uptodated and keep using it until the owning rpc is done. * - * return 0 only if the context is uptodated. + * \retval 0 only if the context is uptodated. + * \retval -ev error number. */ int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout) { @@ -634,7 +653,7 @@ int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout) /* * during the process a request's context might change type even - * (e.g. from gss ctx to plain ctx), so each loop we need to re-check + * (e.g. from gss ctx to null ctx), so each loop we need to re-check * everything */ again: @@ -674,24 +693,27 @@ again: RETURN(-EPERM); } - /* This is subtle. For resent message we have to keep original - * context to survive following situation: - * 1. the request sent to server - * 2. recovery was kick start - * 3. recovery finished, the request marked as resent - * 4. resend the request - * 5. old reply from server received (because xid is the same) - * 6. verify reply (has to be success) - * 7. new reply from server received, lnet drop it + /* + * There's a subtle issue for resending RPCs, suppose following + * situation: + * 1. the request was sent to server. + * 2. recovery was kicked start, after finished the request was + * marked as resent. + * 3. resend the request. + * 4. old reply from server received, we accept and verify the reply. + * this has to be success, otherwise the error will be aware + * by application. + * 5. new reply from server received, dropped by LNet. * - * Note we can't simply change xid for resent request because - * server reply on it for reply reconstruction. + * Note the xid of old & new request is the same. We can't simply + * change xid for the resent request because the server replies on + * it for reply reconstruction. * * Commonly the original context should be uptodate because we - * have a expiry nice time; And server will keep their half part - * context because we at least hold a ref of old context which - * prevent the context destroy RPC be sent. So server still can - * accept the request and finish RPC. Two cases: + * have a expiry nice time; server will keep its context because + * we at least hold a ref of old context which prevent context + * destroying RPC being sent. So server still can accept the request + * and finish the RPC. But if that's not the case: * 1. If server side context has been trimmed, a NO_CONTEXT will * be returned, gss_cli_ctx_verify/unseal will switch to new * context by force. @@ -732,7 +754,8 @@ again: goto again; } - /* Now we're sure this context is during upcall, add myself into + /* + * Now we're sure this context is during upcall, add myself into * waiting list */ cfs_spin_lock(&ctx->cc_lock); @@ -756,9 +779,10 @@ again: ctx_refresh_interrupt, req); rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi); - /* following cases we could be here: + /* + * following cases could lead us here: * - successfully refreshed; - * - interruptted; + * - interrupted; * - timedout, and we don't want recover from the failure; * - timedout, and waked up upon recovery finished; * - someone else mark this ctx dead by force; @@ -776,8 +800,10 @@ again: goto again; } -/* - * Note this could be called in two situations: +/** + * Initialize flavor settings for \a req, according to \a opcode. + * + * \note this could be called in two situations: * - new request from ptlrpc_pre_req(), with proper @opcode * - old request which changed ctx in the middle, with @opcode == 0 */ @@ -855,10 +881,10 @@ void sptlrpc_request_out_callback(struct ptlrpc_request *req) req->rq_reqbuf_len = 0; } -/* - * check whether current user have valid context for an import or not. - * might repeatedly try in case of non-fatal errors. - * return 0 on success, < 0 on failure +/** + * Given an import \a imp, check whether current user has a valid context + * or not. We may create a new context and try to refresh it, and try + * repeatedly try in case of non-fatal errors. Return 0 means success. */ int sptlrpc_import_check_ctx(struct obd_import *imp) { @@ -909,6 +935,11 @@ int sptlrpc_import_check_ctx(struct obd_import *imp) RETURN(rc); } +/** + * Used by ptlrpc client, to perform the pre-defined security transformation + * upon the request message of \a req. After this function called, + * req->rq_reqmsg is still accessible as clear text. + */ int sptlrpc_cli_wrap_request(struct ptlrpc_request *req) { struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; @@ -1013,9 +1044,13 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req) RETURN(rc); } -/* - * upon this be called, the reply buffer should have been un-posted, - * so nothing is going to change. +/** + * Used by ptlrpc client, to perform security transformation upon the reply + * message of \a req. After return successfully, req->rq_repmsg points to + * the reply message in clear text. + * + * \pre the reply buffer should have been un-posted from LNet, so nothing is + * going to change. */ int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req) { @@ -1043,14 +1078,19 @@ int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req) } /** - * Upon called, the receive buffer might be still posted, so the reply data - * might be changed at any time, no matter we're holding rq_lock or not. we - * expect the rq_reply_off be 0, rq_nob_received is the early reply size. - * - * we allocate separate ptlrpc_request and reply buffer for early reply - * processing, return 0 and \a req_ret is a duplicated ptlrpc_request. caller - * must call sptlrpc_cli_finish_early_reply() on the returned request to - * release it. if anything goes wrong \a req_ret will not be set. + * Used by ptlrpc client, to perform security transformation upon the early + * reply message of \a req. We expect the rq_reply_off is 0, and + * rq_nob_received is the early reply size. + * + * Because the receive buffer might be still posted, the reply data might be + * changed at any time, no matter we're holding rq_lock or not. For this reason + * we allocate a separate ptlrpc_request and reply buffer for early reply + * processing. + * + * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request. + * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned + * \a *req_ret to release it. + * \retval -ev error number, and \a req_ret will not be set. */ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req, struct ptlrpc_request **req_ret) @@ -1137,6 +1177,11 @@ err_req: RETURN(rc); } +/** + * Used by ptlrpc client, to release a processed early reply \a early_req. + * + * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply(). + */ void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req) { LASSERT(early_req->rq_repbuf); @@ -1351,9 +1396,12 @@ static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp, cfs_spin_unlock(&sec->ps_lock); } -/* - * for normal import, @svc_ctx should be NULL and @flvr is ignored; - * for reverse import, @svc_ctx and @flvr is from incoming request. +/** + * To get an appropriate ptlrpc_sec for the \a imp, according to the current + * configuration. Upon called, imp->imp_sec may or may not be NULL. + * + * - regular import: \a svc_ctx should be NULL and \a flvr is ignored; + * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request. */ int sptlrpc_import_sec_adapt(struct obd_import *imp, struct ptlrpc_svc_ctx *svc_ctx, @@ -1491,9 +1539,9 @@ void sptlrpc_import_flush_all_ctx(struct obd_import *imp) } EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx); -/* - * when complete successfully, req->rq_reqmsg should point to the - * right place. +/** + * Used by ptlrpc client to allocate request buffer of \a req. Upon return + * successfully, req->rq_reqmsg points to a buffer with size \a msgsize. */ int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize) { @@ -1521,6 +1569,10 @@ int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize) return rc; } +/** + * Used by ptlrpc client to free request buffer of \a req. After this + * req->rq_reqmsg is set to NULL and should not be accessed anymore. + */ void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req) { struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; @@ -1536,6 +1588,7 @@ void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req) policy = ctx->cc_sec->ps_policy; policy->sp_cops->free_reqbuf(ctx->cc_sec, req); + req->rq_reqmsg = NULL; } /* @@ -1582,13 +1635,14 @@ void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg, } EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace); -/* - * enlarge @segment of upper message req->rq_reqmsg to @newsize, all data - * will be preserved after enlargement. this must be called after rq_reqmsg has - * been intialized at least. +/** + * Used by ptlrpc client to enlarge the \a segment of request message pointed + * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be + * preserved after the enlargement. this must be called after original request + * buffer being allocated. * - * caller's attention: upon return, rq_reqmsg and rq_reqlen might have - * been changed. + * \note after this be called, rq_reqmsg and rq_reqlen might have been changed, + * so caller should refresh its local pointers if needed. */ int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req, int segment, int newsize) @@ -1611,6 +1665,11 @@ int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req, } EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf); +/** + * Used by ptlrpc client to allocate reply buffer of \a req. + * + * \note After this, req->rq_repmsg is still not accessible. + */ int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize) { struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; @@ -1629,6 +1688,10 @@ int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize) RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize)); } +/** + * Used by ptlrpc client to free reply buffer of \a req. After this + * req->rq_repmsg is set to NULL and should not be accessed anymore. + */ void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req) { struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; @@ -1646,6 +1709,7 @@ void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req) policy = ctx->cc_sec->ps_policy; policy->sp_cops->free_repbuf(ctx->cc_sec, req); + req->rq_repmsg = NULL; EXIT; } @@ -1692,6 +1756,11 @@ static int flavor_allowed(struct sptlrpc_flavor *exp, #define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10) +/** + * Given an export \a exp, check whether the flavor of incoming \a req + * is allowed by the export \a exp. Main logic is about taking care of + * changing configurations. Return 0 means success. + */ int sptlrpc_target_export_check(struct obd_export *exp, struct ptlrpc_request *req) { @@ -1952,6 +2021,17 @@ static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc) return svc_rc; } +/** + * Used by ptlrpc server, to perform transformation upon request message of + * incoming \a req. This must be the first thing to do with a incoming + * request in ptlrpc layer. + * + * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in + * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set. + * \retval SECSVC_COMPLETE success, the request has been fully processed, and + * reply message has been prepared. + * \retval SECSVC_DROP failed, this request should be dropped. + */ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req) { struct ptlrpc_sec_policy *policy; @@ -2007,8 +2087,12 @@ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req) RETURN(rc); } -int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, - int msglen) +/** + * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed, + * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to + * a buffer of \a msglen size. + */ +int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen) { struct ptlrpc_sec_policy *policy; struct ptlrpc_reply_state *rs; @@ -2042,6 +2126,12 @@ int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, RETURN(rc); } +/** + * Used by ptlrpc server, to perform transformation upon reply message. + * + * \post req->rq_reply_off is set to approriate server-controlled reply offset. + * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible. + */ int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req) { struct ptlrpc_sec_policy *policy; @@ -2060,6 +2150,9 @@ int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req) RETURN(rc); } +/** + * Used by ptlrpc server, to free reply_state. + */ void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs) { struct ptlrpc_sec_policy *policy; @@ -2123,6 +2216,10 @@ EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate); * bulk security * ****************************************/ +/** + * Perform transformation upon bulk data pointed by \a desc. This is called + * before transforming the request message. + */ int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { @@ -2140,7 +2237,8 @@ int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req, } EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk); -/* +/** + * This is called after unwrap the reply message. * return nob of actual plain text size received, or error code. */ int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req, @@ -2165,7 +2263,8 @@ int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req, } EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read); -/* +/** + * This is called after unwrap the reply message. * return 0 for success or error code. */ int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req, @@ -2200,6 +2299,9 @@ int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req, } EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write); +/** + * Performe transformation upon outgoing bulk read. + */ int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { @@ -2218,6 +2320,9 @@ int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req, } EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk); +/** + * Performe transformation upon incoming bulk write. + */ int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { @@ -2254,6 +2359,9 @@ int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req, } EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk); +/** + * Prepare buffers for incoming bulk write. + */ int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { diff --git a/lustre/ptlrpc/sec_null.c b/lustre/ptlrpc/sec_null.c index 34a9284..dd4332b 100644 --- a/lustre/ptlrpc/sec_null.c +++ b/lustre/ptlrpc/sec_null.c @@ -201,11 +201,9 @@ void null_free_reqbuf(struct ptlrpc_sec *sec, req, req->rq_reqlen, req->rq_reqbuf_len); OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len); - req->rq_reqmsg = req->rq_reqbuf = NULL; + req->rq_reqbuf = NULL; req->rq_reqbuf_len = 0; } - - req->rq_reqmsg = NULL; } static @@ -235,8 +233,6 @@ void null_free_repbuf(struct ptlrpc_sec *sec, OBD_FREE(req->rq_repbuf, req->rq_repbuf_len); req->rq_repbuf = NULL; req->rq_repbuf_len = 0; - - req->rq_repmsg = NULL; } static diff --git a/lustre/ptlrpc/sec_plain.c b/lustre/ptlrpc/sec_plain.c index e2866a1..0e2bd16 100644 --- a/lustre/ptlrpc/sec_plain.c +++ b/lustre/ptlrpc/sec_plain.c @@ -621,8 +621,6 @@ void plain_free_reqbuf(struct ptlrpc_sec *sec, req->rq_reqbuf = NULL; req->rq_reqbuf_len = 0; } - - req->rq_reqmsg = NULL; EXIT; } @@ -666,8 +664,6 @@ void plain_free_repbuf(struct ptlrpc_sec *sec, OBD_FREE(req->rq_repbuf, req->rq_repbuf_len); req->rq_repbuf = NULL; req->rq_repbuf_len = 0; - - req->rq_repmsg = NULL; EXIT; }