X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre_sec.h;h=6c4207cb4970f86212f52a9bacb9b7c3deda263e;hp=50274fc440a67a9635bf6bef4cf49486f18fdf3d;hb=68234a15f0ca73b035f91e5bcc9399ca0bca1287;hpb=744f32ac9efb1e2f2837992703c5a5e35f261e60 diff --git a/lustre/include/lustre_sec.h b/lustre/include/lustre_sec.h index 50274fc..6c4207c 100644 --- a/lustre/include/lustre_sec.h +++ b/lustre/include/lustre_sec.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,7 +24,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -37,16 +35,22 @@ #ifndef _LUSTRE_SEC_H_ #define _LUSTRE_SEC_H_ +/** \defgroup sptlrpc sptlrpc + * + * @{ + */ + /* * to avoid include */ -struct key; struct obd_import; struct obd_export; struct ptlrpc_request; struct ptlrpc_reply_state; struct ptlrpc_bulk_desc; struct brw_page; +/* Linux specific */ +struct key; struct seq_file; /* @@ -60,6 +64,20 @@ struct ptlrpc_svc_ctx; struct ptlrpc_cli_ctx; struct ptlrpc_ctx_ops; +/** + * \addtogroup flavor flavor + * + * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits + * are unused, must be set to 0 for future expansion. + *
+ * ------------------------------------------------------------------------
+ * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech)  | 4b (policy) |
+ * ------------------------------------------------------------------------
+ * 
+ * + * @{ + */ + /* * flavor constants */ @@ -87,32 +105,29 @@ enum sptlrpc_mech_gss { }; enum sptlrpc_service_type { - SPTLRPC_SVC_NULL = 0, /* no security */ - SPTLRPC_SVC_AUTH = 1, /* auth only */ - SPTLRPC_SVC_INTG = 2, /* integrity */ - SPTLRPC_SVC_PRIV = 3, /* privacy */ + SPTLRPC_SVC_NULL = 0, /**< no security */ + SPTLRPC_SVC_AUTH = 1, /**< authentication only */ + SPTLRPC_SVC_INTG = 2, /**< integrity */ + SPTLRPC_SVC_PRIV = 3, /**< privacy */ SPTLRPC_SVC_MAX, }; enum sptlrpc_bulk_type { - SPTLRPC_BULK_DEFAULT = 0, /* follow rpc flavor */ - SPTLRPC_BULK_HASH = 1, /* hash integrity */ + SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */ + SPTLRPC_BULK_HASH = 1, /**< hash integrity */ SPTLRPC_BULK_MAX, }; enum sptlrpc_bulk_service { - SPTLRPC_BULK_SVC_NULL = 0, - SPTLRPC_BULK_SVC_AUTH = 1, - SPTLRPC_BULK_SVC_INTG = 2, - SPTLRPC_BULK_SVC_PRIV = 3, + SPTLRPC_BULK_SVC_NULL = 0, /**< no security */ + SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */ + SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */ + SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */ SPTLRPC_BULK_SVC_MAX, }; /* - * rpc flavor compose/extract, represented as 32 bits. currently the - * high 12 bits are unused, must be set as 0. - * - * 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) + * compose/extract macros */ #define FLVR_POLICY_OFFSET (0) #define FLVR_MECH_OFFSET (4) @@ -207,11 +222,13 @@ enum sptlrpc_bulk_service { #define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF) #define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000) -/* +/** * extract the useful part from wire flavor */ #define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF) +/** @} flavor */ + static inline void flvr_set_svc(__u32 *flvr, __u32 svc) { LASSERT(svc < SPTLRPC_SVC_MAX); @@ -236,16 +253,26 @@ struct bulk_spec_hash { __u8 hash_alg; }; +/** + * Full description of flavors being used on a ptlrpc connection, include + * both regular RPC and bulk transfer parts. + */ struct sptlrpc_flavor { - __u32 sf_rpc; /* wire flavor - should be renamed to sf_wire */ - __u32 sf_flags; /* general flags */ - /* + /** + * wire flavor, should be renamed to sf_wire. + */ + __u32 sf_rpc; + /** + * general flags of PTLRPC_SEC_FL_* + */ + __u32 sf_flags; + /** * rpc flavor specification */ union { /* nothing for now */ } u_rpc; - /* + /** * bulk flavor specification */ union { @@ -253,6 +280,10 @@ struct sptlrpc_flavor { } u_bulk; }; +/** + * identify the RPC is generated from what part of Lustre. It's encoded into + * RPC requests and to be checked by ptlrpc service. + */ enum lustre_sec_part { LUSTRE_SP_CLI = 0, LUSTRE_SP_MDT, @@ -265,6 +296,10 @@ enum lustre_sec_part { const char *sptlrpc_part2name(enum lustre_sec_part sp); enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd); +/** + * A rule specifies a flavor to be used by a ptlrpc connection between + * two Lustre parts. + */ struct sptlrpc_rule { __u32 sr_netid; /* LNET network ID */ __u8 sr_from; /* sec_part */ @@ -273,6 +308,12 @@ struct sptlrpc_rule { struct sptlrpc_flavor sr_flvr; }; +/** + * A set of rules in memory. + * + * Rules are generated and stored on MGS, and propagated to MDT, OST, + * and client when needed. + */ struct sptlrpc_rule_set { int srs_nslot; int srs_nrule; @@ -325,31 +366,119 @@ struct vfs_cred { }; struct ptlrpc_ctx_ops { + /** + * To determine whether it's suitable to use the \a ctx for \a vcred. + */ int (*match) (struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred); + + /** + * To bring the \a ctx uptodate. + */ int (*refresh) (struct ptlrpc_cli_ctx *ctx); + + /** + * Validate the \a ctx. + */ int (*validate) (struct ptlrpc_cli_ctx *ctx); + + /** + * Force the \a ctx to die. + */ void (*die) (struct ptlrpc_cli_ctx *ctx, int grace); int (*display) (struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize); - /* - * rpc data transform + + /** + * Sign the request message using \a ctx. + * + * \pre req->rq_reqmsg point to request message. + * \pre req->rq_reqlen is the request message length. + * \post req->rq_reqbuf point to request message with signature. + * \post req->rq_reqdata_len is set to the final request message size. + * + * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign(). */ int (*sign) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); + + /** + * Verify the reply message using \a ctx. + * + * \pre req->rq_repdata point to reply message with signature. + * \pre req->rq_repdata_len is the total reply message length. + * \post req->rq_repmsg point to reply message without signature. + * \post req->rq_replen is the reply message length. + * + * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify(). + */ int (*verify) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); + + /** + * Encrypt the request message using \a ctx. + * + * \pre req->rq_reqmsg point to request message in clear text. + * \pre req->rq_reqlen is the request message length. + * \post req->rq_reqbuf point to request message. + * \post req->rq_reqdata_len is set to the final request message size. + * + * \see gss_cli_ctx_seal(). + */ int (*seal) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); + + /** + * Decrypt the reply message using \a ctx. + * + * \pre req->rq_repdata point to encrypted reply message. + * \pre req->rq_repdata_len is the total cipher text length. + * \post req->rq_repmsg point to reply message in clear text. + * \post req->rq_replen is the reply message length in clear text. + * + * \see gss_cli_ctx_unseal(). + */ int (*unseal) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); - /* - * bulk transform + + /** + * Wrap bulk request data. This is called before wrapping RPC + * request message. + * + * \pre bulk buffer is descripted by desc->bd_iov and + * desc->bd_iov_count. note for read it's just buffer, no data + * need to be sent; for write it contains data in clear text. + * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared + * (usually inside of RPC request message). + * - encryption: cipher text bulk buffer is descripted by + * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov + * count remains the same). + * - otherwise: bulk buffer is still desc->bd_iov and + * desc->bd_iov_count. + * + * \return 0: success. + * \return -ev: error code. + * + * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk(). */ int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); + + /** + * Unwrap bulk reply data. This is called after wrapping RPC + * reply message. + * + * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and + * desc->bd_iov_count, according to wrap_bulk(). + * \post final bulk data in clear text is placed in buffer described + * by desc->bd_iov and desc->bd_iov_count. + * \return +ve nob of actual bulk data in clear text. + * \return -ve error code. + * + * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk(). + */ int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); @@ -375,70 +504,178 @@ struct ptlrpc_ctx_ops { PTLRPC_CTX_ERROR) struct ptlrpc_cli_ctx { - struct hlist_node cc_cache; /* linked into ctx cache */ - atomic_t cc_refcount; + cfs_hlist_node_t cc_cache; /* linked into ctx cache */ + cfs_atomic_t cc_refcount; struct ptlrpc_sec *cc_sec; struct ptlrpc_ctx_ops *cc_ops; cfs_time_t cc_expire; /* in seconds */ unsigned int cc_early_expire:1; unsigned long cc_flags; struct vfs_cred cc_vcred; - spinlock_t cc_lock; - struct list_head cc_req_list; /* waiting reqs linked here */ - struct list_head cc_gc_chain; /* linked to gc chain */ + cfs_spinlock_t cc_lock; + cfs_list_t cc_req_list; /* waiting reqs linked here */ + cfs_list_t cc_gc_chain; /* linked to gc chain */ }; +/** + * client side policy operation vector. + */ struct ptlrpc_sec_cops { - /* - * ptlrpc_sec constructor/destructor + /** + * Given an \a imp, create and initialize a ptlrpc_sec structure. + * \param ctx service context: + * - regular import: \a ctx should be NULL; + * - reverse import: \a ctx is obtained from incoming request. + * \param flavor specify what flavor to use. + * + * When necessary, policy module is responsible for taking reference + * on the import. + * + * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr(). */ struct ptlrpc_sec * (*create_sec) (struct obd_import *imp, struct ptlrpc_svc_ctx *ctx, struct sptlrpc_flavor *flavor); + + /** + * Destructor of ptlrpc_sec. When called, refcount has been dropped + * to 0 and all contexts has been destroyed. + * + * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr(). + */ void (*destroy_sec) (struct ptlrpc_sec *sec); - /* - * notify to-be-dead + /** + * Notify that this ptlrpc_sec is going to die. Optionally, policy + * module is supposed to set sec->ps_dying and whatever necessary + * actions. + * + * \see plain_kill_sec(), gss_sec_kill(). */ void (*kill_sec) (struct ptlrpc_sec *sec); - /* - * context + /** + * Given \a vcred, lookup and/or create its context. The policy module + * is supposed to maintain its own context cache. + * XXX currently \a create and \a remove_dead is always 1, perhaps + * should be removed completely. + * + * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr(). */ struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec, struct vfs_cred *vcred, int create, int remove_dead); + + /** + * Called then the reference of \a ctx dropped to 0. The policy module + * is supposed to destroy this context or whatever else according to + * its cache maintainance mechamism. + * + * \param sync if zero, we shouldn't wait for the context being + * destroyed completely. + * + * \see plain_release_ctx(), gss_sec_release_ctx_kr(). + */ void (*release_ctx) (struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx, int sync); + + /** + * Flush the context cache. + * + * \param uid context of which user, -1 means all contexts. + * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected + * contexts should be cleared immediately. + * \param force if zero, only idle contexts will be flushed. + * + * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr(). + */ int (*flush_ctx_cache) (struct ptlrpc_sec *sec, uid_t uid, int grace, int force); + + /** + * Called periodically by garbage collector to remove dead contexts + * from cache. + * + * \see gss_sec_gc_ctx_kr(). + */ void (*gc_ctx) (struct ptlrpc_sec *sec); - /* - * reverse context + /** + * Given an context \a ctx, install a corresponding reverse service + * context on client side. + * XXX currently it's only used by GSS module, maybe we should remove + * this from general API. */ int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx); - /* - * request/reply buffer manipulation + /** + * To allocate request buffer for \a req. + * + * \pre req->rq_reqmsg == NULL. + * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated, + * we are not supposed to free it. + * \post if success, req->rq_reqmsg point to a buffer with size + * at least \a lustre_msg_size. + * + * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf(). */ int (*alloc_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int lustre_msg_size); + + /** + * To free request buffer for \a req. + * + * \pre req->rq_reqbuf != NULL. + * + * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf(). + */ void (*free_reqbuf) (struct ptlrpc_sec *sec, struct ptlrpc_request *req); + + /** + * To allocate reply buffer for \a req. + * + * \pre req->rq_repbuf == NULL. + * \post if success, req->rq_repbuf point to a buffer with size + * req->rq_repbuf_len, the size should be large enough to receive + * reply which be transformed from \a lustre_msg_size of clear text. + * + * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf(). + */ int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int lustre_msg_size); + + /** + * To free reply buffer for \a req. + * + * \pre req->rq_repbuf != NULL. + * \post req->rq_repbuf == NULL. + * \post req->rq_repbuf_len == 0. + * + * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf(). + */ void (*free_repbuf) (struct ptlrpc_sec *sec, struct ptlrpc_request *req); + + /** + * To expand the request buffer of \a req, thus the \a segment in + * the request message pointed by req->rq_reqmsg can accommodate + * at least \a newsize of data. + * + * \pre req->rq_reqmsg->lm_buflens[segment] < newsize. + * + * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(), + * gss_enlarge_reqbuf(). + */ int (*enlarge_reqbuf) (struct ptlrpc_sec *sec, struct ptlrpc_request *req, @@ -450,30 +687,114 @@ struct ptlrpc_sec_cops { struct seq_file *seq); }; +/** + * server side policy operation vector. + */ struct ptlrpc_sec_sops { + /** + * verify an incoming request. + * + * \pre request message is pointed by req->rq_reqbuf, size is + * req->rq_reqdata_len; and the message has been unpacked to + * host byte order. + * + * \retval SECSVC_OK success, req->rq_reqmsg point to request message + * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set; + * req->rq_sp_from is decoded from request. + * \retval SECSVC_COMPLETE success, the request has been fully + * processed, and reply message has been prepared; req->rq_sp_from is + * decoded from request. + * \retval SECSVC_DROP failed, this request should be dropped. + * + * \see null_accept(), plain_accept(), gss_svc_accept_kr(). + */ int (*accept) (struct ptlrpc_request *req); + + /** + * Perform security transformation upon reply message. + * + * \pre reply message is pointed by req->rq_reply_state->rs_msg, size + * is req->rq_replen. + * \post req->rs_repdata_len is the final message size. + * \post req->rq_reply_off is set. + * + * \see null_authorize(), plain_authorize(), gss_svc_authorize(). + */ int (*authorize) (struct ptlrpc_request *req); + + /** + * Invalidate server context \a ctx. + * + * \see gss_svc_invalidate_ctx(). + */ void (*invalidate_ctx) (struct ptlrpc_svc_ctx *ctx); - /* buffer manipulation */ + + /** + * Allocate a ptlrpc_reply_state. + * + * \param msgsize size of the reply message in clear text. + * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we + * should simply use it; otherwise we'll responsible for allocating + * a new one. + * \post req->rq_reply_state != NULL; + * \post req->rq_reply_state->rs_msg != NULL; + * + * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs(). + */ int (*alloc_rs) (struct ptlrpc_request *req, int msgsize); + + /** + * Free a ptlrpc_reply_state. + */ void (*free_rs) (struct ptlrpc_reply_state *rs); + + /** + * Release the server context \a ctx. + * + * \see gss_svc_free_ctx(). + */ void (*free_ctx) (struct ptlrpc_svc_ctx *ctx); - /* reverse context */ + + /** + * Install a reverse context based on the server context \a ctx. + * + * \see gss_svc_install_rctx_kr(). + */ int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx); - /* bulk transform */ + + /** + * Prepare buffer for incoming bulk write. + * + * \pre desc->bd_iov and desc->bd_iov_count describes the buffer + * intended to receive the write. + * + * \see gss_svc_prep_bulk(). + */ int (*prep_bulk) (struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); + + /** + * Unwrap the bulk write data. + * + * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk(). + */ int (*unwrap_bulk) (struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); + + /** + * Wrap the bulk read data. + * + * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk(). + */ int (*wrap_bulk) (struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); }; struct ptlrpc_sec_policy { - struct module *sp_owner; + cfs_module_t *sp_owner; char *sp_name; __u16 sp_policy; /* policy number */ struct ptlrpc_sec_cops *sp_cops; /* client ops */ @@ -486,20 +807,32 @@ struct ptlrpc_sec_policy { #define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */ #define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */ +/** + * The ptlrpc_sec represents the client side ptlrpc security facilities, + * each obd_import (both regular and reverse import) must associate with + * a ptlrpc_sec. + * + * \see sptlrpc_import_sec_adapt(). + */ struct ptlrpc_sec { struct ptlrpc_sec_policy *ps_policy; - atomic_t ps_refcount; - atomic_t ps_nctx; /* statistic only */ - int ps_id; /* unique identifier */ - struct sptlrpc_flavor ps_flvr; /* flavor */ + cfs_atomic_t ps_refcount; + /** statistic only */ + cfs_atomic_t ps_nctx; + /** unique identifier */ + int ps_id; + struct sptlrpc_flavor ps_flvr; enum lustre_sec_part ps_part; + /** after set, no more new context will be created */ unsigned int ps_dying:1; - struct obd_import *ps_import; /* owning import */ - spinlock_t ps_lock; /* protect ccache */ + /** owning import */ + struct obd_import *ps_import; + cfs_spinlock_t ps_lock; + /* * garbage collection */ - struct list_head ps_gc_list; + cfs_list_t ps_gc_list; cfs_time_t ps_gc_interval; /* in seconds */ cfs_time_t ps_gc_next; /* in seconds */ }; @@ -516,7 +849,7 @@ static inline int sec_is_rootonly(struct ptlrpc_sec *sec) struct ptlrpc_svc_ctx { - atomic_t sc_refcount; + cfs_atomic_t sc_refcount; struct ptlrpc_sec_policy *sc_policy; }; @@ -550,13 +883,6 @@ enum sptlrpc_bulk_hash_alg { BULK_HASH_ALG_MAX }; -struct sptlrpc_hash_type { - char *sht_name; - char *sht_tfm_name; - unsigned int sht_size; -}; - -const struct sptlrpc_hash_type *sptlrpc_get_hash_type(__u8 hash_alg); const char * sptlrpc_get_hash_name(__u8 hash_alg); __u8 sptlrpc_get_hash_alg(const char *algname); @@ -603,7 +929,7 @@ void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg, int segment, int newsize); /* - * security type + * security policies */ int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy); int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy); @@ -618,14 +944,14 @@ char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize); static inline struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy) { - __module_get(policy->sp_owner); + __cfs_module_get(policy->sp_owner); return policy; } static inline void sptlrpc_policy_put(struct ptlrpc_sec_policy *policy) { - module_put(policy->sp_owner); + cfs_module_put(policy->sp_owner); } /* @@ -721,7 +1047,6 @@ struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp); void sptlrpc_import_sec_put(struct obd_import *imp); int sptlrpc_import_check_ctx(struct obd_import *imp); -void sptlrpc_import_inval_all_ctx(struct obd_import *imp); void sptlrpc_import_flush_root_ctx(struct obd_import *imp); void sptlrpc_import_flush_my_ctx(struct obd_import *imp); void sptlrpc_import_flush_all_ctx(struct obd_import *imp); @@ -785,18 +1110,20 @@ int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req, int nob); int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); +#ifdef HAVE_SERVER_SUPPORT int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); +#endif /* bulk helpers (internal use only by policies) */ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, void *buf, int buflen); -int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset); +int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed); /* user descriptor helpers */ static inline int sptlrpc_user_desc_size(int ngroups) @@ -806,7 +1133,7 @@ static inline int sptlrpc_user_desc_size(int ngroups) int sptlrpc_current_user_desc_size(void); int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset); -int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset); +int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed); #define CFS_CAP_CHOWN_MASK (1 << CFS_CAP_CHOWN) @@ -819,4 +1146,6 @@ enum { LUSTRE_SEC_ALL = 3 }; +/** @} sptlrpc */ + #endif /* _LUSTRE_SEC_H_ */