struct lustre_handle imp_remote_handle;
cfs_time_t imp_next_ping; /* jiffies */
__u64 imp_last_success_conn; /* jiffies, 64-bit */
- cfs_time_t imp_next_reconnect; /* seconds */
/* all available obd_import_conn linked here */
struct list_head imp_conn_list;
rq_auth_gss:1, /* authenticated by gss */
rq_auth_remote:1, /* authed as remote user */
rq_auth_usr_root:1, /* authed as root */
- rq_auth_usr_mdt:1; /* authed as mdt */
+ rq_auth_usr_mdt:1, /* authed as mdt */
+ /* doesn't expect reply FIXME */
+ rq_no_reply:1;
uid_t rq_auth_uid; /* authed uid */
uid_t rq_auth_mapped_uid; /* authed uid mapped to */
*/
struct key;
struct obd_import;
+struct obd_export;
struct ptlrpc_request;
struct ptlrpc_reply_state;
struct ptlrpc_bulk_desc;
/*
* flavor constants
*/
-enum sptlrpc_policies {
+enum sptlrpc_policy {
SPTLRPC_POLICY_NULL = 0,
SPTLRPC_POLICY_PLAIN = 1,
SPTLRPC_POLICY_GSS = 2,
SPTLRPC_POLICY_MAX,
};
-enum sptlrpc_subpolicy_null {
- SPTLRPC_SUBPOLICY_NULL = 0,
- SPTLRPC_SUBPOLICY_NULL_MAX,
+enum sptlrpc_mech_null {
+ SPTLRPC_MECH_NULL = 0,
+ SPTLRPC_MECH_NULL_MAX,
};
-enum sptlrpc_subpolicy_plain {
- SPTLRPC_SUBPOLICY_PLAIN = 0,
- SPTLRPC_SUBPOLICY_PLAIN_MAX,
+enum sptlrpc_mech_plain {
+ SPTLRPC_MECH_PLAIN = 0,
+ SPTLRPC_MECH_PLAIN_MAX,
};
-enum sptlrpc_subpolicy_gss {
- SPTLRPC_SUBPOLICY_GSS_NONE = 0,
- SPTLRPC_SUBPOLICY_GSS_KRB5 = 1,
- SPTLRPC_SUBPOLICY_GSS_MAX,
+enum sptlrpc_mech_gss {
+ SPTLRPC_MECH_GSS_NULL = 0,
+ SPTLRPC_MECH_GSS_KRB5 = 1,
+ SPTLRPC_MECH_GSS_MAX,
};
enum sptlrpc_service_type {
- SPTLRPC_SVC_NONE = 0, /* no security */
- SPTLRPC_SVC_AUTH = 1, /* authentication */
- SPTLRPC_SVC_PRIV = 2, /* privacy */
+ SPTLRPC_SVC_NULL = 0, /* no security */
+ SPTLRPC_SVC_AUTH = 1, /* auth only */
+ SPTLRPC_SVC_INTG = 2, /* integrity */
+ SPTLRPC_SVC_PRIV = 3, /* privacy */
SPTLRPC_SVC_MAX,
};
typedef __u32 ptlrpc_sec_flavor_t;
/*
- * 8b (reserved) | 8b (flags) | 6b (policy) | 6b (subpolicy) | 4b (svc)
+ * 8b (reserved) | 8b (flags)
+ * 4b (reserved) | 4b (svc) | 4b (mech) | 4b (policy)
*/
+#define SEC_FLAVOR_POLICY_OFFSET (0)
+#define SEC_FLAVOR_MECH_OFFSET (4)
+#define SEC_FLAVOR_SVC_OFFSET (8)
+#define SEC_FLAVOR_RESERVE1_OFFSET (12)
#define SEC_FLAVOR_FLAGS_OFFSET (16)
-#define SEC_FLAVOR_POLICY_OFFSET (10)
-#define SEC_FLAVOR_SUBPOLICY_OFFSET (4)
-#define SEC_FLAVOR_SVC_OFFSET (0)
-#define SEC_MAKE_RPC_FLAVOR(policy, subpolicy, svc) \
+#define SEC_MAKE_RPC_FLAVOR(policy, mech, svc) \
(((__u32)(policy) << SEC_FLAVOR_POLICY_OFFSET) | \
- ((__u32)(subpolicy) << SEC_FLAVOR_SUBPOLICY_OFFSET) | \
+ ((__u32)(mech) << SEC_FLAVOR_MECH_OFFSET) | \
((__u32)(svc) << SEC_FLAVOR_SVC_OFFSET))
-#define SEC_MAKE_RPC_SUBFLAVOR(subpolicy, svc) \
- (((__u32)(subpolicy) << SEC_FLAVOR_SUBPOLICY_OFFSET) | \
- ((__u32)(svc) << SEC_FLAVOR_SVC_OFFSET))
+#define SEC_MAKE_RPC_SUBFLAVOR(mech, svc) \
+ ((__u32)(mech) | \
+ ((__u32)(svc) << \
+ (SEC_FLAVOR_SVC_OFFSET - SEC_FLAVOR_MECH_OFFSET)))
+
+#define SEC_FLAVOR_SUB(flavor) \
+ ((((__u32)(flavor)) >> SEC_FLAVOR_MECH_OFFSET) & 0xFF)
#define SEC_FLAVOR_POLICY(flavor) \
- ((((__u32)(flavor)) >> SEC_FLAVOR_POLICY_OFFSET) & 0x3F)
-#define SEC_FLAVOR_SUBPOLICY(flavor) \
- ((((__u32)(flavor)) >> SEC_FLAVOR_SUBPOLICY_OFFSET) & 0x3F)
+ ((((__u32)(flavor)) >> SEC_FLAVOR_POLICY_OFFSET) & 0xF)
+#define SEC_FLAVOR_MECH(flavor) \
+ ((((__u32)(flavor)) >> SEC_FLAVOR_MECH_OFFSET) & 0xF)
#define SEC_FLAVOR_SVC(flavor) \
((((__u32)(flavor)) >> SEC_FLAVOR_SVC_OFFSET) & 0xF)
-#define SEC_FLAVOR_SUB(flavor) \
- ((((__u32)(flavor)) >> SEC_FLAVOR_SVC_OFFSET) & 0x3FF)
#define SEC_FLAVOR_RPC(f) \
- (((__u32) f) & ((1 << SEC_FLAVOR_FLAGS_OFFSET) - 1))
-
-/*
- * general gss flavors
- */
-#define SPTLRPC_FLVR_GSS_NONE \
- SEC_MAKE_RPC_FLAVOR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_SUBPOLICY_GSS_NONE, \
- SPTLRPC_SVC_NONE)
-#define SPTLRPC_FLVR_GSS_AUTH \
- SEC_MAKE_RPC_FLAVOR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_SUBPOLICY_GSS_NONE, \
- SPTLRPC_SVC_AUTH)
-#define SPTLRPC_FLVR_GSS_PRIV \
- SEC_MAKE_RPC_FLAVOR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_SUBPOLICY_GSS_NONE, \
- SPTLRPC_SVC_PRIV)
+ (((__u32) (f)) & ((1 << SEC_FLAVOR_RESERVE1_OFFSET) - 1))
/*
* gss subflavors
*/
-#define SPTLRPC_SUBFLVR_KRB5 \
- SEC_MAKE_RPC_SUBFLAVOR(SPTLRPC_SUBPOLICY_GSS_KRB5, \
- SPTLRPC_SVC_NONE)
-#define SPTLRPC_SUBFLVR_KRB5I \
- SEC_MAKE_RPC_SUBFLAVOR(SPTLRPC_SUBPOLICY_GSS_KRB5, \
+#define SPTLRPC_SUBFLVR_KRB5N \
+ SEC_MAKE_RPC_SUBFLAVOR(SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_NULL)
+#define SPTLRPC_SUBFLVR_KRB5A \
+ SEC_MAKE_RPC_SUBFLAVOR(SPTLRPC_MECH_GSS_KRB5, \
SPTLRPC_SVC_AUTH)
+#define SPTLRPC_SUBFLVR_KRB5I \
+ SEC_MAKE_RPC_SUBFLAVOR(SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_INTG)
#define SPTLRPC_SUBFLVR_KRB5P \
- SEC_MAKE_RPC_SUBFLAVOR(SPTLRPC_SUBPOLICY_GSS_KRB5, \
+ SEC_MAKE_RPC_SUBFLAVOR(SPTLRPC_MECH_GSS_KRB5, \
SPTLRPC_SVC_PRIV)
/*
*/
#define SPTLRPC_FLVR_NULL \
SEC_MAKE_RPC_FLAVOR(SPTLRPC_POLICY_NULL, \
- SPTLRPC_SUBPOLICY_NULL, \
- SPTLRPC_SVC_NONE)
+ SPTLRPC_MECH_NULL, \
+ SPTLRPC_SVC_NULL)
#define SPTLRPC_FLVR_PLAIN \
SEC_MAKE_RPC_FLAVOR(SPTLRPC_POLICY_PLAIN, \
- SPTLRPC_SUBPOLICY_PLAIN, \
- SPTLRPC_SVC_NONE)
-#define SPTLRPC_FLVR_KRB5 \
+ SPTLRPC_MECH_PLAIN, \
+ SPTLRPC_SVC_NULL)
+#define SPTLRPC_FLVR_KRB5N \
SEC_MAKE_RPC_FLAVOR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_SUBPOLICY_GSS_KRB5, \
- SPTLRPC_SVC_NONE)
-#define SPTLRPC_FLVR_KRB5I \
+ SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_NULL)
+#define SPTLRPC_FLVR_KRB5A \
SEC_MAKE_RPC_FLAVOR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_SUBPOLICY_GSS_KRB5, \
+ SPTLRPC_MECH_GSS_KRB5, \
SPTLRPC_SVC_AUTH)
+#define SPTLRPC_FLVR_KRB5I \
+ SEC_MAKE_RPC_FLAVOR(SPTLRPC_POLICY_GSS, \
+ SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_INTG)
#define SPTLRPC_FLVR_KRB5P \
SEC_MAKE_RPC_FLAVOR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_SUBPOLICY_GSS_KRB5, \
+ SPTLRPC_MECH_GSS_KRB5, \
SPTLRPC_SVC_PRIV)
#define SPTLRPC_FLVR_INVALID (-1)
PTLRPC_CTX_ERROR)
struct ptlrpc_cli_ctx {
- struct hlist_node cc_hash; /* linked into hash table */
+ struct hlist_node cc_cache; /* linked into ctx cache */
atomic_t cc_refcount;
struct ptlrpc_sec *cc_sec;
struct ptlrpc_ctx_ops *cc_ops;
cfs_time_t cc_expire; /* in seconds */
+ unsigned int cc_early_expire:1;
unsigned long cc_flags;
struct vfs_cred cc_vcred;
spinlock_t cc_lock;
struct list_head cc_req_list; /* waiting reqs linked here */
+ struct list_head cc_gc_chain; /* linked to gc chain */
};
struct ptlrpc_sec_cops {
cfs_time_t ps_gc_next; /* in seconds */
};
+static inline int sec_is_reverse(struct ptlrpc_sec *sec)
+{
+ return (sec->ps_flags & PTLRPC_SEC_FL_REVERSE);
+}
+
+static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
+{
+ return (sec->ps_flags & PTLRPC_SEC_FL_ROOTONLY);
+}
+
+
struct ptlrpc_svc_ctx {
atomic_t sc_refcount;
struct ptlrpc_sec_policy *sc_policy;
}
static inline
-int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
+int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
{
return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
}
}
static inline
+int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
+{
+ return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
+}
+
+static inline
+int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
+{
+ return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
+}
+
+static inline
int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
{
return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
-void sptlrpc_req_put_ctx(struct ptlrpc_request *req);
+void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req);
void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
int sptlrpc_parse_flavor(enum lustre_part from, enum lustre_part to,
char *str, struct sec_flavor_config *conf);
+/* gc */
+void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
+void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
+void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx);
+
/* misc */
const char * sec2target_str(struct ptlrpc_sec *sec);
int sptlrpc_lprocfs_rd(char *page, char **start, off_t off, int count,
SECSVC_DROP,
};
+int sptlrpc_target_export_check(struct obd_export *exp,
+ struct ptlrpc_request *req);
int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
struct lustre_msg *rmsg, int roff,
struct lustre_msg *vmsg, int voff);
int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
- struct lustre_msg *vmsg, int voff,
- struct lustre_msg *rmsg, int roff);
+ struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
+ struct ptlrpc_bulk_sec_desc *bsdr, int rsize);
#endif /* _LUSTRE_SEC_H_ */
req->rq_self,
&remote_uuid);
- if (lustre_msg_get_op_flags(req->rq_repmsg) & MSG_CONNECT_RECONNECT) {
- LASSERT(export->exp_imp_reverse);
- sptlrpc_svc_install_rvs_ctx(export->exp_imp_reverse,
- req->rq_svc_ctx);
- GOTO(out, rc = 0);
- }
-
spin_lock_bh(&target->obd_processing_task_lock);
if (target->obd_recovering && !export->exp_in_recovery) {
spin_lock(&export->exp_lock);
struct obd_export *exp;
struct ptlrpc_service *svc;
+ if (req->rq_no_reply)
+ return;
+
svc = req->rq_rqbd->rqbd_service;
rs = req->rq_reply_state;
if (rs == NULL || !rs->rs_difficult) {
rc = llog_origin_handle_close(req);
ldlm_callback_reply(req, rc);
RETURN(0);
+ case SEC_CTX_FINI:
+ /* do nothing */
+ RETURN(0);
default:
CERROR("unknown opcode %u\n",
lustre_msg_get_opc(req->rq_reqmsg));
aa->aa_ppga)))
RETURN(-EAGAIN);
- sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk);
+ if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
+ RETURN(-EAGAIN);
rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
aa->aa_page_count, aa->aa_ppga);
if (rc < aa->aa_requested_nob)
handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
- sptlrpc_cli_unwrap_bulk_read(req, rc, aa->aa_page_count, aa->aa_ppga);
+ if (sptlrpc_cli_unwrap_bulk_read(req, rc, aa->aa_page_count,
+ aa->aa_ppga))
+ GOTO(out, rc = -EAGAIN);
if (unlikely(body->oa.o_valid & OBD_MD_FLCKSUM)) {
static int cksum_counter;
}
no_reply = rc != 0;
+ if (rc == 0) {
+ /* let client retry if unwrap failed */
+ rc = sptlrpc_svc_unwrap_bulk(req, desc);
+ }
+
repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
sizeof(*repbody));
memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa));
}
}
- sptlrpc_svc_unwrap_bulk(req, desc);
-
- /* Check if there is eviction in progress, and if so, wait for
- * it to finish */
- if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
- lwi = LWI_INTR(NULL, NULL);
- rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
- !atomic_read(&exp->exp_obd->obd_evict_inprogress),
- &lwi);
- }
- if (rc == 0 && exp->exp_failed)
- rc = -ENOTCONN;
-
/* Must commit after prep above in all cases */
rc = obd_commitrw(OBD_BRW_WRITE, exp, &repbody->oa,
objcount, ioo, npages, local_nb, oti, rc);
RETURN(request);
out_ctx:
- sptlrpc_req_put_ctx(request);
+ sptlrpc_req_put_ctx(request, 1);
out_free:
class_import_put(imp);
if (request->rq_pool)
if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
sptlrpc_cli_free_reqbuf(request);
- sptlrpc_req_put_ctx(request);
+ sptlrpc_req_put_ctx(request, !locked);
if (request->rq_pool)
__ptlrpc_free_req_to_pool(request);
* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Copyright (C) 2006 Cluster File Systems, Inc.
+ * Author: Eric Mei <ericm@clusterfs.com>
*
* This file is part of Lustre, http://www.lustre.org.
*
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
+ case SPTLRPC_SVC_NULL:
+ LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
+ msg = req->rq_reqbuf;
+ offset = msg->lm_bufcount - 1;
+ break;
case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
msg = req->rq_reqbuf;
offset = msg->lm_bufcount - 2;
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
+ case SPTLRPC_SVC_NULL:
+ vmsg = req->rq_repbuf;
+ voff = vmsg->lm_bufcount - 1;
+ LASSERT(vmsg && vmsg->lm_bufcount >= 3);
+
+ rmsg = req->rq_reqbuf;
+ roff = rmsg->lm_bufcount - 1; /* last segment */
+ LASSERT(rmsg && rmsg->lm_bufcount >= 3);
+ break;
case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
vmsg = req->rq_repbuf;
voff = vmsg->lm_bufcount - 2;
LASSERT(vmsg && vmsg->lm_bufcount >= 4);
int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
struct gss_svc_reqctx *grctx;
- struct ptlrpc_bulk_sec_desc *bsdv;
- int voff, roff, rc;
+ int rc;
ENTRY;
- LASSERT(rs);
+ LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_bulk_write);
- if (SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV) {
- LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
- LASSERT(rs->rs_repbuf->lm_bufcount >= 2);
- voff = req->rq_reqbuf->lm_bufcount - 1;
- roff = rs->rs_repbuf->lm_bufcount - 1;
- } else {
- LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
- LASSERT(rs->rs_repbuf->lm_bufcount >= 4);
- voff = req->rq_reqbuf->lm_bufcount - 2;
- roff = rs->rs_repbuf->lm_bufcount - 2;
- }
+ grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
- bsdv = lustre_msg_buf(req->rq_reqbuf, voff, sizeof(*bsdv));
- if (bsdv->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
- grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
- LASSERT(grctx->src_ctx);
- LASSERT(grctx->src_ctx->gsc_mechctx);
+ LASSERT(grctx->src_reqbsd);
+ LASSERT(grctx->src_repbsd);
+ LASSERT(grctx->src_ctx);
+ LASSERT(grctx->src_ctx->gsc_mechctx);
+ /* decrypt bulk data if it's encrypted */
+ if (grctx->src_reqbsd->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
- bsdv->bsd_priv_alg, bsdv);
+ grctx->src_reqbsd->bsd_priv_alg,
+ grctx->src_reqbsd);
if (rc) {
CERROR("bulk write: server failed to decrypt data\n");
RETURN(rc);
}
}
+ /* verify bulk data checksum */
rc = bulk_csum_svc(desc, req->rq_bulk_read,
- req->rq_reqbuf, voff, rs->rs_repbuf, roff);
+ grctx->src_reqbsd, grctx->src_reqbsd_size,
+ grctx->src_repbsd, grctx->src_repbsd_size);
RETURN(rc);
}
int gss_svc_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
struct gss_svc_reqctx *grctx;
- struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
- int voff, roff, rc;
+ int rc;
ENTRY;
- LASSERT(rs);
+ LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_bulk_read);
- if (SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV) {
- voff = req->rq_reqbuf->lm_bufcount - 1;
- roff = rs->rs_repbuf->lm_bufcount - 1;
- } else {
- voff = req->rq_reqbuf->lm_bufcount - 2;
- roff = rs->rs_repbuf->lm_bufcount - 2;
- }
+ grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+
+ LASSERT(grctx->src_reqbsd);
+ LASSERT(grctx->src_repbsd);
+ LASSERT(grctx->src_ctx);
+ LASSERT(grctx->src_ctx->gsc_mechctx);
+ /* generate bulk data checksum */
rc = bulk_csum_svc(desc, req->rq_bulk_read,
- req->rq_reqbuf, voff, rs->rs_repbuf, roff);
+ grctx->src_reqbsd, grctx->src_reqbsd_size,
+ grctx->src_repbsd, grctx->src_repbsd_size);
if (rc)
RETURN(rc);
- bsdv = lustre_msg_buf(req->rq_reqbuf, voff, sizeof(*bsdv));
- if (bsdv->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
- grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
- LASSERT(grctx->src_ctx);
- LASSERT(grctx->src_ctx->gsc_mechctx);
-
- bsdr = lustre_msg_buf(rs->rs_repbuf, roff, sizeof(*bsdr));
-
+ /* encrypt bulk data if required */
+ if (grctx->src_reqbsd->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
- bsdv->bsd_priv_alg, bsdr);
+ grctx->src_reqbsd->bsd_priv_alg,
+ grctx->src_repbsd);
if (rc)
- CERROR("bulk read: server failed to encrypt data\n");
+ CERROR("bulk read: server failed to encrypt data: "
+ "rc %d\n", rc);
}
RETURN(rc);
* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Copyright (C) 2006 Cluster File Systems, Inc.
+ * Author: Eric Mei <ericm@clusterfs.com>
*
* This file is part of the Lustre file system, http://www.lustre.org
* Lustre is a trademark of Cluster File Systems, Inc.
ghdr->gh_flags = 0;
ghdr->gh_proc = PTLRPC_GSS_PROC_INIT;
ghdr->gh_seq = 0;
- ghdr->gh_svc = PTLRPC_GSS_SVC_NONE;
+ ghdr->gh_svc = SPTLRPC_SVC_NULL;
ghdr->gh_handle.len = 0;
/* fix the user desc */
int rc;
ENTRY;
- if (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE) {
- CWARN("ctx %p(%u) is reverse, don't send destroy rpc\n",
- ctx, ctx->cc_vcred.vc_uid);
- RETURN(0);
- }
-
- /* FIXME
- * this could be called when import being tearing down, thus import's
- * spinlock is held. A more clean solution might be: let gss worker
- * thread handle the ctx destroying; don't wait reply for fini rpc.
- */
- if (imp->imp_invalid) {
- CWARN("ctx %p(%u): skip because import is invalid\n",
- ctx, ctx->cc_vcred.vc_uid);
- RETURN(0);
- }
- RETURN(0); // XXX remove after using gss worker thread
-
- if (test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ||
- !test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags)) {
- CWARN("ctx %p(%u->%s) already dead, don't send destroy rpc\n",
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
+ CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
+ "don't send destroy rpc\n", ctx,
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
RETURN(0);
}
might_sleep();
- CWARN("client destroy ctx %p(%u->%s)\n",
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ CDEBUG(D_SEC, "%s ctx %p(%u->%s)\n",
+ sec_is_reverse(ctx->cc_sec) ?
+ "server finishing reverse" : "client finishing forward",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
/* context's refcount could be 0, steal one */
atomic_inc(&ctx->cc_refcount);
pud->pud_ngroups = 0;
}
- req->rq_replen = lustre_msg_size_v2(1, &buflens);
-
- rc = ptlrpc_queue_wait(req);
+ req->rq_phase = RQ_PHASE_RPC;
+ rc = ptl_send_rpc(req, 1);
if (rc) {
- CWARN("ctx %p(%u): rpc error %d, destroy locally\n",
- ctx, ctx->cc_vcred.vc_uid, rc);
+ CWARN("ctx %p(%u->%s): rpc error %d, destroy locally\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ rc);
}
ptlrpc_req_finished(req);
return expiry;
}
-/* we try to force reconnect import 20m eariler than real expiry.
- * kerberos 5 usually allow 5m time skew, but which is adjustable,
- * so if we set krb5 to allow > 20m time skew, we have chance that
- * server's reverse ctx expired but client still hasn't start to
- * refresh it -- it's BAD. So here we actually put a limit on the
- * enviroment of krb5 (or other authentication mechanism)
- */
-#define GSS_MAX_TIME_SKEW (20 * 60)
-
-static inline
-unsigned long gss_round_imp_reconnect(unsigned long expiry)
-{
- unsigned long now = get_seconds();
- unsigned long nice = GSS_MAX_TIME_SKEW + __TIMEOUT_DELTA;
-
- while (nice && (now + nice >= expiry))
- nice = nice / 2;
-
- return (expiry - nice);
-}
-
/*
* Max encryption element in block cipher algorithms.
*/
PTLRPC_GSS_PROC_ERR = 4,
};
-enum ptlrpc_gss_svc {
- PTLRPC_GSS_SVC_NONE = 1,
- PTLRPC_GSS_SVC_INTEGRITY = 2,
- PTLRPC_GSS_SVC_PRIVACY = 3,
-};
-
enum ptlrpc_gss_tgt {
LUSTRE_GSS_TGT_MDS = 0,
LUSTRE_GSS_TGT_OSS = 1,
+ LUSTRE_GSS_TGT_MGS = 2,
};
static inline
};
struct gss_svc_reqctx {
- struct ptlrpc_svc_ctx src_base;
- struct gss_wire_ctx src_wirectx;
- struct gss_svc_ctx *src_ctx;
- unsigned int src_init:1,
- src_init_continue:1,
- src_err_notify:1;
- int src_reserve_len;
+ struct ptlrpc_svc_ctx src_base;
+ /*
+ * context
+ */
+ struct gss_wire_ctx src_wirectx;
+ struct gss_svc_ctx *src_ctx;
+ /*
+ * record place of bulk_sec_desc in request/reply buffer
+ */
+ struct ptlrpc_bulk_sec_desc *src_reqbsd;
+ int src_reqbsd_size;
+ struct ptlrpc_bulk_sec_desc *src_repbsd;
+ int src_repbsd_size;
+ /*
+ * flags
+ */
+ unsigned int src_init:1,
+ src_init_continue:1,
+ src_err_notify:1;
+ int src_reserve_len;
};
struct gss_cli_ctx {
int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
struct ptlrpc_cli_ctx *ctx);
+void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize);
+
/* gss_keyring.c */
extern struct ptlrpc_sec_policy gss_policy_keyring;
int __init gss_init_keyring(void);
LASSERT(timer);
- CWARN("ctx %p: start timer %lds\n", ctx, timeout);
+ CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
timeout = timeout * HZ + cfs_time_current();
init_timer(timer);
add_timer(timer);
}
+/*
+ * caller should make sure no race with other threads
+ */
static
void ctx_clear_timer_kr(struct ptlrpc_cli_ctx *ctx)
{
struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
struct timer_list *timer = gctx_kr->gck_timer;
- CWARN("ctx %p, key %p\n", ctx, gctx_kr->gck_key);
if (timer == NULL)
return;
+ CDEBUG(D_SEC, "ctx %p, key %p\n", ctx, gctx_kr->gck_key);
+
gctx_kr->gck_timer = NULL;
del_singleshot_timer_sync(timer);
struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
int rc;
- CWARN("destroying ctx %p\n", ctx);
+ CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
/* at this time the association with key has been broken. */
LASSERT(sec);
atomic_inc(&ctx->cc_refcount);
set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- hlist_add_head(&ctx->cc_hash, &gsec_kr->gsk_clist);
+ hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
if (is_root)
gsec_kr->gsk_root_ctx = ctx;
if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
return 0;
- CWARN("ctx %p(%d) unlist\n", ctx, atomic_read(&ctx->cc_refcount));
-
/*
* drop ref inside spin lock to prevent race with other operations
*/
if (gsec_kr->gsk_root_ctx == ctx)
gsec_kr->gsk_root_ctx = NULL;
- hlist_del_init(&ctx->cc_hash);
+ hlist_del_init(&ctx->cc_cache);
atomic_dec(&ctx->cc_refcount);
spin_unlock_if(&sec->ps_lock, !locked);
}
/*
- * since this called, nobody else could touch the ctx in @freelist
+ * caller should hold one ref on contexts in freelist.
*/
static void dispose_ctx_list_kr(struct hlist_head *freelist)
{
struct hlist_node *pos, *next;
struct ptlrpc_cli_ctx *ctx;
- hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_hash) {
- hlist_del_init(&ctx->cc_hash);
+ hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
+ hlist_del_init(&ctx->cc_cache);
+
+ /*
+ * we need to wakeup waiting reqs here. the context might
+ * be forced released before upcall finished, then the
+ * late-arrived downcall can't find the ctx even.
+ */
+ sptlrpc_cli_ctx_wakeup(ctx);
- atomic_inc(&ctx->cc_refcount);
unbind_ctx_kr(ctx);
ctx_put_kr(ctx);
}
spin_lock(&sec->ps_lock);
ctx = gsec_kr->gsk_root_ctx;
+
+ if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
+ struct hlist_node *node;
+ struct ptlrpc_cli_ctx *tmp;
+ /*
+ * reverse ctx, search root ctx in list, choose the one
+ * with shortest expire time, which is most possibly have
+ * an established peer ctx at client side.
+ */
+ hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist, cc_cache) {
+ if (ctx == NULL || ctx->cc_expire == 0 ||
+ ctx->cc_expire > tmp->cc_expire) {
+ ctx = tmp;
+ /* promote to be root_ctx */
+ gsec_kr->gsk_root_ctx = ctx;
+ }
+ }
+ }
+
if (ctx) {
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
return ctx;
}
-static void sec_replace_root_ctx_kr(struct ptlrpc_sec *sec,
- struct ptlrpc_cli_ctx *new_ctx,
- struct key *key)
+#define RVS_CTX_EXPIRE_NICE (10)
+
+static
+void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *new_ctx,
+ struct key *key)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct ptlrpc_cli_ctx *root_ctx;
- struct hlist_head freelist = HLIST_HEAD_INIT;
+ struct hlist_node *hnode;
+ struct ptlrpc_cli_ctx *ctx;
+ cfs_time_t now;
ENTRY;
- spin_lock(&sec->ps_lock);
+ LASSERT(sec_is_reverse(sec));
- if (gsec_kr->gsk_root_ctx) {
- root_ctx = gsec_kr->gsk_root_ctx;
+ spin_lock(&sec->ps_lock);
- set_bit(PTLRPC_CTX_DEAD_BIT, &root_ctx->cc_flags);
+ now = cfs_time_current_sec();
- if (ctx_unlist_kr(root_ctx, 1))
- hlist_add_head(&root_ctx->cc_hash, &freelist);
+ /* set all existing ctxs short expiry */
+ hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
+ if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
+ ctx->cc_early_expire = 1;
+ ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
+ }
}
- /*
- * at this time, we can't guarantee the gsk_root_ctx is NULL, because
- * another thread might clear the HASHED flag of root ctx earlier,
- * and waiting for spinlock which is held by us. But anyway we just
- * install the new root ctx.
+ /* if there's root_ctx there, instead obsolete the current
+ * immediately, we leave it continue operating for a little while.
+ * hopefully when the first backward rpc with newest ctx send out,
+ * the client side already have the peer ctx well established.
*/
- ctx_enlist_kr(new_ctx, 1, 1);
+ ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1);
if (key)
bind_key_ctx(key, new_ctx);
spin_unlock(&sec->ps_lock);
-
- dispose_ctx_list_kr(&freelist);
}
static void construct_key_desc(void *buf, int bufsize,
RETURN(NULL);
gsec_kr->gsk_id = atomic_inc_return(&gss_sec_id_kr);
- INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
+ CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
gsec_kr->gsk_root_ctx = NULL;
mutex_init(&gsec_kr->gsk_root_uc_lock);
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
struct gss_sec *gsec = sec2gsec(sec);
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- CWARN("destroy %s@%p\n", sec->ps_policy->sp_name, sec);
+ CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
LASSERT(hlist_empty(&gsec_kr->gsk_clist));
LASSERT(gsec_kr->gsk_root_ctx == NULL);
* Only lookup directly for REVERSE sec, which should
* always succeed.
*/
- if (ctx || (sec->ps_flags & PTLRPC_SEC_FL_REVERSE))
+ if (ctx || sec_is_reverse(sec))
RETURN(ctx);
}
ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
- CWARN("installed key %p <-> ctx %p (sec %p)\n",
- key, ctx, sec);
+ CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
+ key, ctx, sec);
} else {
/*
* we'd prefer to call key_revoke(), but we more like
struct ptlrpc_cli_ctx *ctx,
int sync)
{
- CWARN("ctx %p\n", ctx);
- ctx_destroy_kr(ctx);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+
+ if (sync)
+ ctx_destroy_kr(ctx);
+ else {
+ atomic_inc(&ctx->cc_refcount);
+ sptlrpc_gc_add_ctx(ctx);
+ }
}
/*
char desc[24];
/* nothing to do for reverse or rootonly sec */
- if (sec->ps_flags & (PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY))
+ if (sec_is_reverse(sec) || sec_is_rootonly(sec))
return;
construct_key_desc(desc, sizeof(desc), sec, uid);
down_write(&key->sem);
- CWARN("invalidating key %p - ctx %p\n", key, key->payload.data);
kill_key_locked(key);
/* kill_key_locked() should usually revoke the key, but we
int grace, int force)
{
struct gss_sec_keyring *gsec_kr;
- struct hlist_head freelist = HLIST_HEAD_INIT;
+ struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
struct hlist_node *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
spin_lock(&sec->ps_lock);
hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_kr->gsk_clist, cc_hash) {
+ &gsec_kr->gsk_clist, cc_cache) {
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
if (!grace)
clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
- if (ctx_unlist_kr(ctx, 1)) {
- hlist_add_head(&ctx->cc_hash, &freelist);
- CWARN("unlisted ctx %p\n", ctx);
- } else
- CWARN("ctx %p: unlist return 0, let it go\n", ctx);
+ atomic_inc(&ctx->cc_refcount);
+
+ if (ctx_unlist_kr(ctx, 1))
+ hlist_add_head(&ctx->cc_cache, &freelist);
+ else {
+ LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
+ atomic_dec(&ctx->cc_refcount);
+ }
}
spin_unlock(&sec->ps_lock);
{
ENTRY;
- CWARN("sec %p(%d, busy %d), uid %d, grace %d, force %d\n",
- sec, atomic_read(&sec->ps_refcount), atomic_read(&sec->ps_busy),
- uid, grace, force);
+ CDEBUG(D_SEC, "sec %p(%d, busy %d), uid %d, grace %d, force %d\n",
+ sec, atomic_read(&sec->ps_refcount), atomic_read(&sec->ps_busy),
+ uid, grace, force);
if (uid != -1 && uid != 0)
flush_user_ctx_cache_kr(sec, uid, grace, force);
void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_head freelist = HLIST_HEAD_INIT;
+ struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
struct hlist_node *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
spin_lock(&sec->ps_lock);
hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_kr->gsk_clist, cc_hash) {
+ &gsec_kr->gsk_clist, cc_cache) {
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ atomic_inc(&ctx->cc_refcount);
+
if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
- hlist_add_head(&ctx->cc_hash, &freelist);
+ hlist_add_head(&ctx->cc_cache, &freelist);
CWARN("unhashed ctx %p\n", ctx);
+ } else {
+ LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
+ atomic_dec(&ctx->cc_refcount);
}
}
spin_unlock(&sec->ps_lock);
spin_lock(&sec->ps_lock);
hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_kr->gsk_clist, cc_hash) {
- struct key *key;
- int len;
+ &gsec_kr->gsk_clist, cc_cache) {
+ struct gss_cli_ctx *gctx;
+ struct key *key;
+ char flags_str[40];
+ int len;
+ gctx = ctx2gctx(ctx);
key = ctx2gctx_keyring(ctx)->gck_key;
- len = snprintf(buf, bufsize, "%p(%d): expire %ld(%ld), "
- "uid %u, flags 0x%lx, key %08x(%d)\n",
+ gss_cli_ctx_flags2str(ctx->cc_flags,
+ flags_str, sizeof(flags_str));
+
+ len = snprintf(buf, bufsize, "%p(%d): uid %u, exp %ld(%ld)s, "
+ "fl %s, seq %d, win %u, key %08x(%d), ",
ctx, atomic_read(&ctx->cc_refcount),
+ ctx->cc_vcred.vc_uid,
ctx->cc_expire,
ctx->cc_expire - cfs_time_current_sec(),
- ctx->cc_vcred.vc_uid,
- ctx->cc_flags,
+ flags_str,
+ atomic_read(&gctx->gc_seq),
+ gctx->gc_win,
key ? key->serial : 0,
key ? atomic_read(&key->usage) : 0);
buf += len;
bufsize -= len;
- if (bufsize < len)
+ if (bufsize <= 0)
+ break;
+
+ if (gctx->gc_mechctx)
+ len = lgss_display(gctx->gc_mechctx, buf, bufsize);
+ else
+ len = snprintf(buf, bufsize, "mech N/A\n");
+
+ written += len;
+ buf += len;
+ bufsize -= len;
+
+ if (bufsize <= 0)
break;
}
spin_unlock(&sec->ps_lock);
return 1;
}
- if (cli_ctx_is_uptodate(ctx))
+ if (cli_ctx_is_ready(ctx))
return 0;
return 1;
}
return rc;
}
- sec_replace_root_ctx_kr(sec, cli_ctx, NULL);
+ rvs_sec_install_root_ctx_kr(sec, cli_ctx, NULL);
ctx_put_kr(cli_ctx);
goto err_put;
}
- sec_replace_root_ctx_kr(sec, cli_ctx, key);
+ rvs_sec_install_root_ctx_kr(sec, cli_ctx, key);
ctx_put_kr(cli_ctx);
up_write(&key->sem);
/* XXX */
key->perm |= KEY_POS_ALL | KEY_USR_ALL;
- CWARN("key %p instantiated, ctx %p\n", key, key->payload.data);
+ CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key, key->payload.data);
RETURN(0);
}
goto out;
}
- CWARN("secwin is %d\n", gctx->gc_win);
if (gctx->gc_win == 0) {
__u32 nego_rpc_err, nego_gss_err;
if (rc == 0) {
gss_cli_ctx_uptodate(gctx);
} else {
+ /*
+ * this will also revoke the key. has to be done before
+ * wakeup waiters otherwise they can find the stale key
+ */
+ kill_key_locked(key);
+
cli_ctx_expire(ctx);
if (rc != -ERESTART)
set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
-
- /* this will also revoke the key. has to be done before
- * wakeup waiters otherwise they can find the stale key
- */
- kill_key_locked(key);
}
sptlrpc_cli_ctx_wakeup(ctx);
{
ENTRY;
LASSERT(key->payload.data == NULL);
- CWARN("destroy key %p\n", key);
+ CDEBUG(D_SEC, "destroy key %p\n", key);
EXIT;
}
.refresh = gss_cli_ctx_refresh_kr,
.validate = gss_cli_ctx_validate_kr,
.die = gss_cli_ctx_die_kr,
- .display = gss_cli_ctx_display,
.sign = gss_cli_ctx_sign,
.verify = gss_cli_ctx_verify,
.seal = gss_cli_ctx_seal,
knew->kc_cfx = kctx->kc_cfx;
knew->kc_seed_init = kctx->kc_seed_init;
knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
-#if 0
knew->kc_endtime = kctx->kc_endtime;
-#else
- /* FIXME reverse context don't expire for now */
- knew->kc_endtime = INT_MAX;
-#endif
+
memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
knew->kc_seq_send = kctx->kc_seq_recv;
knew->kc_seq_recv = kctx->kc_seq_send;
struct krb5_ctx *kctx = ctx->internal_ctx_id;
int written;
- written = snprintf(buf, bufsize,
- " mech: krb5\n"
- " enctype: %s\n",
- enctype2str(kctx->kc_enctype));
+ written = snprintf(buf, bufsize, "mech: krb5 (%s)\n",
+ enctype2str(kctx->kc_enctype));
return written;
}
static struct subflavor_desc gss_kerberos_sfs[] = {
{
- .sf_subflavor = SPTLRPC_SUBFLVR_KRB5,
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
.sf_qop = 0,
- .sf_service = SPTLRPC_SVC_NONE,
- .sf_name = "krb5"
+ .sf_service = SPTLRPC_SVC_NULL,
+ .sf_name = "krb5n"
},
{
- .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
.sf_qop = 0,
.sf_service = SPTLRPC_SVC_AUTH,
+ .sf_name = "krb5a"
+ },
+ {
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
+ .sf_qop = 0,
+ .sf_service = SPTLRPC_SVC_INTG,
.sf_name = "krb5i"
},
{
.gm_oid = (rawobj_t)
{9, "\052\206\110\206\367\022\001\002\002"},
.gm_ops = &gss_kerberos_ops,
- .gm_sf_num = 3,
+ .gm_sf_num = 4,
.gm_sfs = gss_kerberos_sfs,
};
#include "gss_internal.h"
#include "gss_api.h"
-static LIST_HEAD(registered_mechs);
+static CFS_LIST_HEAD(registered_mechs);
static spinlock_t registered_mechs_lock = SPIN_LOCK_UNLOCKED;
int lgss_mech_register(struct gss_api_mech *gm)
{
set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
atomic_inc(&ctx->cc_refcount);
- hlist_add_head(&ctx->cc_hash, hash);
+ hlist_add_head(&ctx->cc_cache, hash);
}
/*
LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(!hlist_unhashed(&ctx->cc_hash));
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
if (atomic_dec_and_test(&ctx->cc_refcount)) {
- __hlist_del(&ctx->cc_hash);
- hlist_add_head(&ctx->cc_hash, freelist);
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, freelist);
} else
- hlist_del_init(&ctx->cc_hash);
+ hlist_del_init(&ctx->cc_cache);
}
/*
struct ptlrpc_cli_ctx *ctx;
while (!hlist_empty(head)) {
- ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_hash);
+ ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_cache);
LASSERT(atomic_read(&ctx->cc_refcount) == 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
- hlist_del_init(&ctx->cc_hash);
+ hlist_del_init(&ctx->cc_cache);
ctx_destroy_pf(ctx->cc_sec, ctx);
}
}
{
if (ctx_check_death_pf(ctx, NULL))
return 1;
- if (cli_ctx_is_uptodate(ctx))
+ if (cli_ctx_is_ready(ctx))
return 0;
return 1;
}
spin_lock(&ctx->cc_sec->ps_lock);
if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
- LASSERT(!hlist_unhashed(&ctx->cc_hash));
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
LASSERT(atomic_read(&ctx->cc_refcount) > 1);
- hlist_del_init(&ctx->cc_hash);
+ hlist_del_init(&ctx->cc_cache);
if (atomic_dec_and_test(&ctx->cc_refcount))
LBUG();
}
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx;
struct hlist_node *pos, *next;
- HLIST_HEAD(freelist);
+ CFS_HLIST_HEAD(freelist);
unsigned int hash;
ENTRY;
spin_lock(&gsec->gs_base.ps_lock);
hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[hash], cc_hash) {
+ &gsec_pf->gsp_chash[hash], cc_cache) {
if (!ctx_match_pf(ctx, &new->cc_vcred))
continue;
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i], cc_hash)
+ &gsec_pf->gsp_chash[i], cc_cache)
ctx_check_death_locked_pf(ctx, freelist);
}
gsec_pf->gsp_chash_size = hash_size;
for (i = 0; i < hash_size; i++)
- INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
+ CFS_INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs,
imp, ctx, flavor, flags))
struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
struct hlist_head *hash_head;
struct hlist_node *pos, *next;
- HLIST_HEAD(freelist);
+ CFS_HLIST_HEAD(freelist);
unsigned int hash, gc = 0, found = 0;
ENTRY;
gc = 1;
}
- hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_hash) {
+ hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
if (gc == 0 &&
ctx_check_death_locked_pf(ctx,
remove_dead ? &freelist : NULL))
if (found) {
if (new && new != ctx) {
/* lost the race, just free it */
- hlist_add_head(&new->cc_hash, &freelist);
+ hlist_add_head(&new->cc_cache, &freelist);
new = NULL;
}
/* hot node, move to head */
- if (hash_head->first != &ctx->cc_hash) {
- __hlist_del(&ctx->cc_hash);
- hlist_add_head(&ctx->cc_hash, hash_head);
+ if (hash_head->first != &ctx->cc_cache) {
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, hash_head);
}
} else {
/* don't allocate for reverse sec */
- if (sec->ps_flags & PTLRPC_SEC_FL_REVERSE) {
+ if (sec_is_reverse(sec)) {
spin_unlock(&sec->ps_lock);
RETURN(NULL);
}
int sync)
{
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
- LASSERT(hlist_unhashed(&ctx->cc_hash));
+ LASSERT(hlist_unhashed(&ctx->cc_cache));
/* if required async, we must clear the UPTODATE bit to prevent extra
* rpcs during destroy procedure.
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx;
struct hlist_node *pos, *next;
- HLIST_HEAD(freelist);
+ CFS_HLIST_HEAD(freelist);
int i, busy = 0;
ENTRY;
spin_lock(&sec->ps_lock);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i], cc_hash) {
+ &gsec_pf->gsp_chash[i], cc_cache) {
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
RETURN(-ENOMEM);
/* initialize pipefs base msg */
- INIT_LIST_HEAD(&gmsg->gum_base.list);
+ CFS_INIT_LIST_HEAD(&gmsg->gum_base.list);
gmsg->gum_base.data = &gmsg->gum_data;
gmsg->gum_base.len = sizeof(gmsg->gum_data);
gmsg->gum_base.copied = 0;
.refresh = gss_cli_ctx_refresh_pf,
.validate = gss_cli_ctx_validate_pf,
.die = gss_cli_ctx_die_pf,
- .display = gss_cli_ctx_display,
.sign = gss_cli_ctx_sign,
.verify = gss_cli_ctx_verify,
.seal = gss_cli_ctx_seal,
}
de_pipes[MECH_KRB5] = de;
- INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
+ CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
upcall_locks[MECH_KRB5] = SPIN_LOCK_UNLOCKED;
return 0;
* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Copyright (C) 2004 Cluster File Systems, Inc.
+ * Author: Eric Mei <ericm@clusterfs.com>
*
* This file is part of Lustre, http://www.lustre.org.
*
static struct cache_detail rsi_cache = {
.hash_size = RSI_HASHMAX,
.hash_table = rsi_table,
- .name = "auth.ptlrpcs.init",
+ .name = "auth.sptlrpc.init",
.cache_put = rsi_put,
.cache_request = rsi_request,
.cache_parse = rsi_parse,
static struct cache_detail rsc_cache = {
.hash_size = RSC_HASHMAX,
.hash_table = rsc_table,
- .name = "auth.ptlrpcs.context",
+ .name = "auth.sptlrpc.context",
.cache_put = rsc_put,
.cache_parse = rsc_parse,
};
if (rscp)
rsc_put(&rscp->h, &rsc_cache);
- CWARN("client installed reverse svc ctx to %s: idx "LPX64"\n",
- imp->imp_obd->u.cli.cl_target_uuid.uuid,
- gsec->gs_rvs_hdl);
-
- imp->imp_next_reconnect = gss_round_imp_reconnect(ctx_expiry);
- CWARN("import(%s) to %s: set force reconnect at %lu(%lds valid time)\n",
- ptlrpc_import_state_name(imp->imp_state),
- imp->imp_obd->u.cli.cl_target_uuid.uuid,
- imp->imp_next_reconnect,
- (long) (imp->imp_next_reconnect - get_seconds()));
+ CDEBUG(D_SEC, "client installed reverse svc ctx to %s: idx "LPX64"\n",
+ imp->imp_obd->u.cli.cl_target_uuid.uuid, gsec->gs_rvs_hdl);
RETURN(0);
}
-#if 0
-static int
-gss_svc_unseal_request(struct ptlrpc_request *req,
- struct rsc *rsci,
- struct gss_wire_cred *gc,
- __u32 *vp, __u32 vlen)
-{
- struct ptlrpcs_wire_hdr *sec_hdr;
- struct gss_ctx *ctx = rsci->mechctx;
- rawobj_t cipher_text, plain_text;
- __u32 major;
- ENTRY;
-
- sec_hdr = (struct ptlrpcs_wire_hdr *) req->rq_reqbuf;
-
- if (vlen < 4) {
- CERROR("vlen only %u\n", vlen);
- RETURN(GSS_S_CALL_BAD_STRUCTURE);
- }
-
- cipher_text.len = le32_to_cpu(*vp++);
- cipher_text.data = (__u8 *) vp;
- vlen -= 4;
-
- if (cipher_text.len > vlen) {
- CERROR("cipher claimed %u while buf only %u\n",
- cipher_text.len, vlen);
- RETURN(GSS_S_CALL_BAD_STRUCTURE);
- }
-
- plain_text = cipher_text;
-
- major = lgss_unwrap(ctx, GSS_C_QOP_DEFAULT, &cipher_text, &plain_text);
- if (major) {
- CERROR("unwrap error 0x%x\n", major);
- RETURN(major);
- }
-
- if (gss_check_seq_num(&rsci->seqdata, gc->gc_seq)) {
- CERROR("discard replayed request %p(o%u,x"LPU64",t"LPU64")\n",
- req, req->rq_reqmsg->opc, req->rq_xid,
- req->rq_reqmsg->transno);
- RETURN(GSS_S_DUPLICATE_TOKEN);
- }
-
- req->rq_reqmsg = (struct lustre_msg *) (vp);
- req->rq_reqlen = plain_text.len;
-
- CDEBUG(D_SEC, "msg len %d\n", req->rq_reqlen);
-
- RETURN(GSS_S_COMPLETE);
-}
-#endif
-
static
struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
{
rsci->target = target;
- CWARN("server create rsc %p(%u->%s)\n",
- rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
+ CDEBUG(D_SEC, "server create rsc %p(%u->%s)\n",
+ rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
if (rsip->out_handle.len > PTLRPC_GSS_MAX_HANDLE_SIZE) {
CERROR("handle size %u too large\n", rsip->out_handle.len);
rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
rsip->out_token.len, 0);
- if (rsci->ctx.gsc_usr_mds)
- CWARN("user from %s authenticated as mds\n",
- libcfs_nid2str(req->rq_peer.nid));
-
rc = SECSVC_OK;
out:
#include <linux/crypto.h>
+
+static inline int msg_last_segidx(struct lustre_msg *msg)
+{
+ LASSERT(msg->lm_bufcount > 0);
+ return msg->lm_bufcount - 1;
+}
+static inline int msg_last_seglen(struct lustre_msg *msg)
+{
+ return msg->lm_buflens[msg_last_segidx(msg)];
+}
+
/********************************************
* wire data swabber *
********************************************/
static
int gss_sign_msg(struct lustre_msg *msg,
struct gss_ctx *mechctx,
- __u32 proc, __u32 seq,
+ __u32 proc, __u32 seq, __u32 svc,
rawobj_t *handle)
{
struct gss_header *ghdr;
rawobj_t text[3], mic;
- int textcnt, mic_idx = msg->lm_bufcount - 1;
+ int textcnt, max_textcnt, mic_idx;
__u32 major;
- LASSERT(msg->lm_bufcount >= 3);
+ LASSERT(msg->lm_bufcount >= 2);
/* gss hdr */
LASSERT(msg->lm_buflens[0] >=
ghdr->gh_flags = 0;
ghdr->gh_proc = proc;
ghdr->gh_seq = seq;
- ghdr->gh_svc = PTLRPC_GSS_SVC_INTEGRITY;
+ ghdr->gh_svc = svc;
if (!handle) {
/* fill in a fake one */
ghdr->gh_handle.len = 0;
memcpy(ghdr->gh_handle.data, handle->data, handle->len);
}
+ /* no actual signature for null mode */
+ if (svc == SPTLRPC_SVC_NULL)
+ return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
+
/* MIC */
- for (textcnt = 0; textcnt < mic_idx; textcnt++) {
+ mic_idx = msg_last_segidx(msg);
+ max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
+
+ for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
text[textcnt].len = msg->lm_buflens[textcnt];
text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
}
*/
static
__u32 gss_verify_msg(struct lustre_msg *msg,
- struct gss_ctx *mechctx)
+ struct gss_ctx *mechctx,
+ __u32 svc)
{
- rawobj_t text[3];
- rawobj_t mic;
- int textcnt, mic_idx = msg->lm_bufcount - 1;
- __u32 major;
+ rawobj_t text[3], mic;
+ int textcnt, max_textcnt;
+ int mic_idx;
+ __u32 major;
+
+ LASSERT(msg->lm_bufcount >= 2);
+
+ if (svc == SPTLRPC_SVC_NULL)
+ return GSS_S_COMPLETE;
- for (textcnt = 0; textcnt < mic_idx; textcnt++) {
+ mic_idx = msg_last_segidx(msg);
+ max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
+
+ for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
text[textcnt].len = msg->lm_buflens[textcnt];
text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
}
if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
cfs_time_t now;
- clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ if (!ctx->cc_early_expire)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
now = cfs_time_current_sec();
if (ctx->cc_expire && cfs_time_aftereq(now, ctx->cc_expire))
void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
{
- struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
- unsigned long ctx_expiry;
+ struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
+ unsigned long ctx_expiry;
if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
CERROR("ctx %p(%u): unable to inquire, expire it now\n",
*/
set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
- CWARN("%s ctx %p(%u->%s), will expire at %lu(%lds lifetime)\n",
- (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE ?
- "server installed reverse" : "client refreshed"),
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
- ctx->cc_expire, (long) (ctx->cc_expire - get_seconds()));
+ if (sec_is_reverse(ctx->cc_sec))
+ CDEBUG(D_SEC, "server installed reverse ctx %p, "
+ "will expire at %lu(%lds lifetime)\n",
+ ctx, ctx->cc_expire,
+ ctx->cc_expire - cfs_time_current_sec());
+ else
+ CWARN("client refreshed ctx %p(%u->%s), will expire at "
+ "%lu(%lds lifetime)\n", ctx, ctx->cc_vcred.vc_uid,
+ sec2target_str(ctx->cc_sec), ctx->cc_expire,
+ ctx->cc_expire - cfs_time_current_sec());
+
+ /*
+ * install reverse svc ctx, but only for forward connection
+ * and root context
+ */
+ if (!sec_is_reverse(ctx->cc_sec) && ctx->cc_vcred.vc_uid == 0) {
+ gss_sec_install_rctx(ctx->cc_sec->ps_import,
+ ctx->cc_sec, ctx);
+ }
}
static
return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
}
-static
void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
{
buf[0] = '\0';
+ if (flags & PTLRPC_CTX_NEW)
+ strncat(buf, "new,", bufsize);
if (flags & PTLRPC_CTX_UPTODATE)
strncat(buf, "uptodate,", bufsize);
if (flags & PTLRPC_CTX_DEAD)
buf[strlen(buf) - 1] = '\0';
}
-int gss_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
-{
- struct gss_cli_ctx *gctx;
- char flags_str[40];
- int written;
-
- gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
-
- gss_cli_ctx_flags2str(ctx->cc_flags, flags_str, sizeof(flags_str));
-
- written = snprintf(buf, bufsize,
- "UID %d:\n"
- " flags: %s\n"
- " seqwin: %d\n"
- " sequence: %d\n",
- ctx->cc_vcred.vc_uid,
- flags_str,
- gctx->gc_win,
- atomic_read(&gctx->gc_seq));
-
- if (gctx->gc_mechctx) {
- written += lgss_display(gctx->gc_mechctx,
- buf + written, bufsize - written);
- }
-
- return written;
-}
-
int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
struct ptlrpc_request *req)
{
struct gss_cli_ctx *gctx;
- __u32 seq;
+ __u32 seq, svc;
int rc;
ENTRY;
LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
+ LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
LASSERT(req->rq_cli_ctx == ctx);
/* nothing to do for context negotiation RPCs */
RETURN(0);
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+ svc = SEC_FLAVOR_SVC(req->rq_sec_flavor);
redo:
seq = atomic_inc_return(&gctx->gc_seq);
rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
- gctx->gc_proc, seq, &gctx->gc_handle);
+ gctx->gc_proc, seq, svc,
+ &gctx->gc_handle);
if (rc < 0)
RETURN(rc);
* of them we should repack this rpc, because sent it too late might
* lead to the sequence number fall behind the window on server and
* be dropped. also applies to gss_cli_ctx_seal().
+ *
+ * Note: null mode dosen't check sequence number.
*/
- if (atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+ if (svc != SPTLRPC_SVC_NULL &&
+ atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
int behind = atomic_read(&gctx->gc_seq) - seq;
gss_stat_oos_record_cli(behind);
CWARN("server respond error (%08x/%08x) for ctx fini\n",
errhdr->gh_major, errhdr->gh_minor);
rc = -EINVAL;
- } else if (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE) {
+ } else if (sec_is_reverse(ctx->cc_sec)) {
CWARN("reverse server respond error (%08x/%08x)\n",
errhdr->gh_major, errhdr->gh_minor);
rc = -EINVAL;
RETURN(0);
}
- if (msg->lm_bufcount < 3 || msg->lm_bufcount > 4) {
+ if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
RETURN(-EPROTO);
}
RETURN(-EPROTO);
}
- if (ghdr->gh_svc != PTLRPC_GSS_SVC_INTEGRITY) {
- CERROR("unexpected svc %d\n", ghdr->gh_svc);
+ if (ghdr->gh_svc != reqhdr->gh_svc) {
+ CERROR("svc %u mismatch, expect %u\n",
+ ghdr->gh_svc, reqhdr->gh_svc);
RETURN(-EPROTO);
}
if (lustre_msg_swabbed(msg))
gss_header_swabber(ghdr);
- major = gss_verify_msg(msg, gctx->gc_mechctx);
+ major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
if (major != GSS_S_COMPLETE)
RETURN(-EPERM);
ghdr->gh_flags = 0;
ghdr->gh_proc = gctx->gc_proc;
ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
- ghdr->gh_svc = PTLRPC_GSS_SVC_PRIVACY;
+ ghdr->gh_svc = SPTLRPC_SVC_PRIV;
ghdr->gh_handle.len = gctx->gc_handle.len;
memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
sec->ps_import = class_import_get(imp);
sec->ps_lock = SPIN_LOCK_UNLOCKED;
atomic_set(&sec->ps_busy, 0);
- INIT_LIST_HEAD(&sec->ps_gc_list);
+ CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
if (!ctx) {
sec->ps_gc_interval = GSS_GC_INTERVAL;
sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
} else {
- LASSERT(sec->ps_flags & PTLRPC_SEC_FL_REVERSE);
+ LASSERT(sec_is_reverse(sec));
/* never do gc on reverse sec */
sec->ps_gc_interval = 0;
flags & PTLRPC_SEC_FL_BULK)
sptlrpc_enc_pool_add_user();
- CWARN("create %s%s@%p\n", (ctx ? "reverse " : ""),
- policy->sp_name, gsec);
+ CDEBUG(D_SEC, "create %s%s@%p\n", (ctx ? "reverse " : ""),
+ policy->sp_name, gsec);
return 0;
}
gctx->gc_win = 0;
atomic_set(&gctx->gc_seq, 0);
- INIT_HLIST_NODE(&ctx->cc_hash);
+ CFS_INIT_HLIST_NODE(&ctx->cc_cache);
atomic_set(&ctx->cc_refcount, 0);
ctx->cc_sec = sec;
ctx->cc_ops = ctxops;
ctx->cc_flags = PTLRPC_CTX_NEW;
ctx->cc_vcred = *vcred;
spin_lock_init(&ctx->cc_lock);
- INIT_LIST_HEAD(&ctx->cc_req_list);
+ CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
+ CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
/* take a ref on belonging sec */
atomic_inc(&sec->ps_busy);
- CWARN("%s@%p: create ctx %p(%u->%s)\n",
- sec->ps_policy->sp_name, ctx->cc_sec,
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
+ sec->ps_policy->sp_name, ctx->cc_sec,
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
return 0;
}
gss_cli_ctx_finalize(gctx);
}
- CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
- sec->ps_policy->sp_name, ctx->cc_sec,
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ if (sec_is_reverse(sec))
+ CDEBUG(D_SEC, "reverse sec %p: destroy ctx %p\n",
+ ctx->cc_sec, ctx);
+ else
+ CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
+ sec->ps_policy->sp_name, ctx->cc_sec,
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
if (atomic_dec_and_test(&sec->ps_busy)) {
LASSERT(atomic_read(&sec->ps_refcount) == 0);
}
static
-int gss_alloc_reqbuf_auth(struct ptlrpc_sec *sec,
+int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
struct ptlrpc_request *req,
- int msgsize)
+ int svc, int msgsize)
{
struct sec_flavor_config *conf;
- int bufsize, txtsize;
- int buflens[5], bufcnt = 2;
+ int bufsize, txtsize;
+ int buflens[5], bufcnt = 2;
ENTRY;
/*
+ * on-wire data layout:
* - gss header
* - lustre message
- * - user descriptor
- * - bulk sec descriptor
- * - signature
+ * - user descriptor (optional)
+ * - bulk sec descriptor (optional)
+ * - signature (optional)
+ * - svc == NULL: NULL
+ * - svc == AUTH: signature of gss header
+ * - svc == INTG: signature of all above
+ *
+ * if this is context negotiation, reserver fixed space
+ * at the last (signature) segment regardless of svc mode.
*/
+
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ txtsize = buflens[0];
+
buflens[1] = msgsize;
- txtsize = buflens[0] + buflens[1];
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[1];
if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
buflens[bufcnt] = sptlrpc_current_user_desc_size();
- txtsize += buflens[bufcnt];
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[bufcnt];
bufcnt++;
}
conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
buflens[bufcnt] = bulk_sec_desc_size(conf->sfc_bulk_csum, 1,
req->rq_bulk_read);
- txtsize += buflens[bufcnt];
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[bufcnt];
bufcnt++;
}
- buflens[bufcnt++] = req->rq_ctx_init ? GSS_CTX_INIT_MAX_LEN :
- gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
+ if (req->rq_ctx_init)
+ buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
+ else if (svc != SPTLRPC_SVC_NULL)
+ buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
bufsize = lustre_msg_size_v2(bufcnt, buflens);
int msgsize)
{
struct sec_flavor_config *conf;
- int ibuflens[3], ibufcnt;
- int buflens[3];
- int clearsize, wiresize;
+ int ibuflens[3], ibufcnt;
+ int buflens[3];
+ int clearsize, wiresize;
ENTRY;
LASSERT(req->rq_clrbuf == NULL);
/* Inner (clear) buffers
* - lustre message
- * - user descriptor
- * - bulk checksum
+ * - user descriptor (optional)
+ * - bulk checksum (optional)
*/
+
ibufcnt = 1;
ibuflens[0] = msgsize;
* - signature of gss header
* - cipher text
*/
+
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
buflens[2] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
struct ptlrpc_request *req,
int msgsize)
{
+ int svc = SEC_FLAVOR_SVC(req->rq_sec_flavor);
+
LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
(req->rq_bulk_read || req->rq_bulk_write));
- switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
- case SPTLRPC_SVC_NONE:
+ switch (svc) {
+ case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
- return gss_alloc_reqbuf_auth(sec, req, msgsize);
+ case SPTLRPC_SVC_INTG:
+ return gss_alloc_reqbuf_intg(sec, req, svc, msgsize);
case SPTLRPC_SVC_PRIV:
return gss_alloc_reqbuf_priv(sec, req, msgsize);
default:
- LBUG();
+ LASSERTF(0, "bad flavor %x\n", req->rq_sec_flavor);
+ return 0;
}
- return 0;
}
void gss_free_reqbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req)
{
- int privacy;
+ int privacy;
ENTRY;
LASSERT(!req->rq_pool || req->rq_reqbuf);
EXIT;
}
-int gss_alloc_repbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int msgsize)
+static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
+{
+ bufsize = size_roundup_power2(bufsize);
+
+ OBD_ALLOC(req->rq_repbuf, bufsize);
+ if (!req->rq_repbuf)
+ return -ENOMEM;
+
+ req->rq_repbuf_len = bufsize;
+ return 0;
+}
+
+static
+int gss_alloc_repbuf_intg(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int svc, int msgsize)
{
struct sec_flavor_config *conf;
- int privacy = (SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV);
- int bufsize, txtsize;
- int buflens[4], bufcnt;
- ENTRY;
+ int txtsize;
+ int buflens[4], bufcnt = 2;
- LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
- (req->rq_bulk_read || req->rq_bulk_write));
+ /*
+ * on-wire data layout:
+ * - gss header
+ * - lustre message
+ * - bulk sec descriptor (optional)
+ * - signature (optional)
+ * - svc == NULL: NULL
+ * - svc == AUTH: signature of gss header
+ * - svc == INTG: signature of all above
+ *
+ * if this is context negotiation, reserver fixed space
+ * at the last (signature) segment regardless of svc mode.
+ */
- if (privacy) {
- bufcnt = 1;
- buflens[0] = msgsize;
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
- buflens[bufcnt++] = bulk_sec_desc_size(
- conf->sfc_bulk_csum, 0,
- req->rq_bulk_read);
- }
- txtsize = lustre_msg_size_v2(bufcnt, buflens);
- txtsize += GSS_MAX_CIPHER_BLOCK;
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ txtsize = buflens[0];
- bufcnt = 3;
- buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
- buflens[2] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
- } else {
- bufcnt = 2;
- buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = msgsize;
- txtsize = buflens[0] + buflens[1];
+ buflens[1] = msgsize;
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[1];
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
- buflens[bufcnt] = bulk_sec_desc_size(
- conf->sfc_bulk_csum, 0,
- req->rq_bulk_read);
+ if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
+ conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
+ buflens[bufcnt] = bulk_sec_desc_size(conf->sfc_bulk_csum, 0,
+ req->rq_bulk_read);
+ if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
- bufcnt++;
- }
- buflens[bufcnt++] = req->rq_ctx_init ? GSS_CTX_INIT_MAX_LEN :
- gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
+ bufcnt++;
}
- bufsize = lustre_msg_size_v2(bufcnt, buflens);
- bufsize = size_roundup_power2(bufsize);
+ if (req->rq_ctx_init)
+ buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
+ else if (svc != SPTLRPC_SVC_NULL)
+ buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
- OBD_ALLOC(req->rq_repbuf, bufsize);
- if (!req->rq_repbuf)
- return -ENOMEM;
+ return do_alloc_repbuf(req, lustre_msg_size_v2(bufcnt, buflens));
+}
- req->rq_repbuf_len = bufsize;
- return 0;
+static
+int gss_alloc_repbuf_priv(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ struct sec_flavor_config *conf;
+ int txtsize;
+ int buflens[3], bufcnt;
+
+ /* Inner (clear) buffers
+ * - lustre message
+ * - bulk checksum (optional)
+ */
+
+ bufcnt = 1;
+ buflens[0] = msgsize;
+
+ if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
+ conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
+ buflens[bufcnt++] = bulk_sec_desc_size(
+ conf->sfc_bulk_csum, 0,
+ req->rq_bulk_read);
+ }
+ txtsize = lustre_msg_size_v2(bufcnt, buflens);
+ txtsize += GSS_MAX_CIPHER_BLOCK;
+
+ /* Wrapper (wire) buffers
+ * - gss header
+ * - signature of gss header
+ * - cipher text
+ */
+
+ bufcnt = 3;
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
+ buflens[2] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
+
+ return do_alloc_repbuf(req, lustre_msg_size_v2(bufcnt, buflens));
+}
+
+int gss_alloc_repbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ int svc = SEC_FLAVOR_SVC(req->rq_sec_flavor);
+ ENTRY;
+
+ LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
+ (req->rq_bulk_read || req->rq_bulk_write));
+
+ switch (svc) {
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ return gss_alloc_repbuf_intg(sec, req, svc, msgsize);
+ case SPTLRPC_SVC_PRIV:
+ return gss_alloc_repbuf_priv(sec, req, msgsize);
+ default:
+ LASSERTF(0, "bad flavor %x\n", req->rq_sec_flavor);
+ return 0;
+ }
}
void gss_free_repbuf(struct ptlrpc_sec *sec,
return newmsg_size;
}
-static inline int msg_last_seglen(struct lustre_msg *msg)
-{
- return msg->lm_buflens[msg->lm_bufcount - 1];
-}
-
static
-int gss_enlarge_reqbuf_auth(struct ptlrpc_sec *sec,
+int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
struct ptlrpc_request *req,
+ int svc,
int segment, int newsize)
{
struct lustre_msg *newbuf;
- int txtsize, sigsize, i;
+ int txtsize, sigsize = 0, i;
int newmsg_size, newbuf_size;
/*
- * embedded msg is at seg 1; signature is at the last seg
+ * gss header is at seg 0;
+ * embedded msg is at seg 1;
+ * signature (if any) is at the last seg
*/
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
- /* compute new embedded msg size */
+ /* 1. compute new embedded msg size */
newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
- /* compute new wrapper msg size */
- for (txtsize = 0, i = 0; i < req->rq_reqbuf->lm_bufcount; i++)
- txtsize += req->rq_reqbuf->lm_buflens[i];
- txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
+ /* 2. compute new wrapper msg size */
+ if (svc == SPTLRPC_SVC_NULL) {
+ /* no signature, get size directly */
+ newbuf_size = get_enlarged_msgsize(req->rq_reqbuf,
+ 1, newmsg_size);
+ } else {
+ txtsize = req->rq_reqbuf->lm_buflens[0];
+
+ if (svc == SPTLRPC_SVC_INTG) {
+ for (i = 1; i < req->rq_reqbuf->lm_bufcount; i++)
+ txtsize += req->rq_reqbuf->lm_buflens[i];
+ txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
+ }
+
+ sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
+ LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
- sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
- LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
- newbuf_size = get_enlarged_msgsize2(req->rq_reqbuf, 1, newmsg_size,
- req->rq_reqbuf->lm_bufcount - 1,
- sigsize);
+ newbuf_size = get_enlarged_msgsize2(
+ req->rq_reqbuf,
+ 1, newmsg_size,
+ msg_last_segidx(req->rq_reqbuf),
+ sigsize);
+ }
/* request from pool should always have enough buffer */
LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
}
- _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
- req->rq_reqbuf->lm_bufcount - 1, sigsize);
+ /* do enlargement, from wrapper to embedded, from end to begin */
+ if (svc != SPTLRPC_SVC_NULL)
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
+ msg_last_segidx(req->rq_reqbuf),
+ sigsize);
+
_sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
_sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
struct ptlrpc_request *req,
int segment, int newsize)
{
+ int svc = SEC_FLAVOR_SVC(req->rq_sec_flavor);
+
LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
- switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
+ switch (svc) {
+ case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
- return gss_enlarge_reqbuf_auth(sec, req, segment, newsize);
+ case SPTLRPC_SVC_INTG:
+ return gss_enlarge_reqbuf_intg(sec, req, svc, segment, newsize);
case SPTLRPC_SVC_PRIV:
return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
default:
static
int gss_svc_sign(struct ptlrpc_request *req,
struct ptlrpc_reply_state *rs,
- struct gss_svc_reqctx *grctx)
+ struct gss_svc_reqctx *grctx,
+ int svc)
{
int rc;
ENTRY;
rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
PTLRPC_GSS_PROC_DATA, grctx->src_wirectx.gw_seq,
- NULL);
+ svc, NULL);
if (rc < 0)
RETURN(rc);
CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
libcfs_nid2str(req->rq_peer.nid));
+ req->rq_ctx_init = 1;
+
if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
CERROR("proc %u: invalid handle length %u\n",
gw->gw_proc, gw->gw_handle.len);
if (rc != SECSVC_OK)
RETURN(rc);
+ if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_root)
+ CWARN("user from %s authenticated as %s\n",
+ libcfs_nid2str(req->rq_peer.nid),
+ grctx->src_ctx->gsc_usr_mds ? "mds" : "root");
+ else
+ CWARN("accept user %u from %s\n", grctx->src_ctx->gsc_uid,
+ libcfs_nid2str(req->rq_peer.nid));
+
if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
if (reqbuf->lm_bufcount < 4) {
CERROR("missing user descriptor\n");
*/
static
int gss_svc_verify_request(struct ptlrpc_request *req,
- struct gss_svc_ctx *gctx,
+ struct gss_svc_reqctx *grctx,
struct gss_wire_ctx *gw,
__u32 *major)
{
+ struct gss_svc_ctx *gctx = grctx->src_ctx;
struct lustre_msg *msg = req->rq_reqbuf;
int offset = 2;
ENTRY;
*major = GSS_S_COMPLETE;
- if (msg->lm_bufcount < 3) {
+ if (msg->lm_bufcount < 2) {
CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
RETURN(-EINVAL);
}
+ if (gw->gw_svc == SPTLRPC_SVC_NULL)
+ goto verified;
+
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
*major = GSS_S_DUPLICATE_TOKEN;
RETURN(-EACCES);
}
- *major = gss_verify_msg(msg, gctx->gsc_mechctx);
+ *major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
if (*major != GSS_S_COMPLETE)
RETURN(-EACCES);
RETURN(-EACCES);
}
+verified:
/* user descriptor */
if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
- if (msg->lm_bufcount < (offset + 1 + 1)) {
+ if (msg->lm_bufcount < (offset + 1)) {
CERROR("no user desc included\n");
RETURN(-EINVAL);
}
/* check bulk cksum data */
if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- if (msg->lm_bufcount < (offset + 1 + 1)) {
+ if (msg->lm_bufcount < (offset + 1)) {
CERROR("no bulk checksum included\n");
RETURN(-EINVAL);
}
if (bulk_sec_desc_unpack(msg, offset))
RETURN(-EINVAL);
+
+ grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
+ grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
}
req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
static
int gss_svc_unseal_request(struct ptlrpc_request *req,
- struct gss_svc_ctx *gctx,
+ struct gss_svc_reqctx *grctx,
struct gss_wire_ctx *gw,
__u32 *major)
{
+ struct gss_svc_ctx *gctx = grctx->src_ctx;
struct lustre_msg *msg = req->rq_reqbuf;
int msglen, offset = 1;
ENTRY;
if (bulk_sec_desc_unpack(msg, offset))
RETURN(-EINVAL);
+
+ grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
+ grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
}
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
}
switch (gw->gw_svc) {
- case PTLRPC_GSS_SVC_INTEGRITY:
- rc = gss_svc_verify_request(req, grctx->src_ctx, gw, &major);
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ rc = gss_svc_verify_request(req, grctx, gw, &major);
break;
- case PTLRPC_GSS_SVC_PRIVACY:
- rc = gss_svc_unseal_request(req, grctx->src_ctx, gw, &major);
+ case SPTLRPC_SVC_PRIV:
+ rc = gss_svc_unseal_request(req, grctx, gw, &major);
break;
default:
CERROR("unsupported gss service %d\n", gw->gw_svc);
struct gss_wire_ctx *gw)
{
struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
- int replen = sizeof(struct ptlrpc_body);
__u32 major;
ENTRY;
+ req->rq_ctx_fini = 1;
+ req->rq_no_reply = 1;
+
grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
if (!grctx->src_ctx) {
CWARN("invalid gss context handle for destroy.\n");
RETURN(SECSVC_DROP);
}
- if (gw->gw_svc != PTLRPC_GSS_SVC_INTEGRITY) {
+ if (gw->gw_svc != SPTLRPC_SVC_INTG) {
CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
RETURN(SECSVC_DROP);
}
- if (gss_svc_verify_request(req, grctx->src_ctx, gw, &major))
- RETURN(SECSVC_DROP);
-
- if (lustre_pack_reply_v2(req, 1, &replen, NULL))
+ if (gss_svc_verify_request(req, grctx, gw, &major))
RETURN(SECSVC_DROP);
- CWARN("gss svc destroy ctx %p(%u->%s)\n", grctx->src_ctx,
- grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
+ CWARN("destroy svc ctx %p(%u->%s)\n", grctx->src_ctx,
+ grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
gss_svc_upcall_destroy_ctx(grctx->src_ctx);
{
struct gss_svc_reqctx *grctx;
struct ptlrpc_reply_state *rs;
- struct ptlrpc_bulk_sec_desc *bsd;
- int privacy;
+ int privacy, svc, bsd_off = 0;
int ibuflens[2], ibufcnt = 0;
int buflens[4], bufcnt;
int txtsize, wmsg_size, rs_size;
RETURN(-EPROTO);
}
+ svc = SEC_FLAVOR_SVC(req->rq_sec_flavor);
+
grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
if (gss_svc_reqctx_is_special(grctx))
privacy = 0;
else
- privacy = (SEC_FLAVOR_SVC(req->rq_sec_flavor) ==
- SPTLRPC_SVC_PRIV);
+ privacy = (svc == SPTLRPC_SVC_PRIV);
if (privacy) {
/* Inner buffer */
ibuflens[0] = msglen;
if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
- bsd = lustre_msg_buf(req->rq_reqbuf,
- req->rq_reqbuf->lm_bufcount - 1,
- sizeof(*bsd));
+ LASSERT(grctx->src_reqbsd);
+ bsd_off = ibufcnt;
ibuflens[ibufcnt++] = bulk_sec_desc_size(
- bsd->bsd_csum_alg, 0,
- req->rq_bulk_read);
+ grctx->src_reqbsd->bsd_csum_alg,
+ 0, req->rq_bulk_read);
}
txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
bufcnt = 2;
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
buflens[1] = msglen;
- txtsize = buflens[0] + buflens[1];
+
+ txtsize = buflens[0];
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[1];
if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
- bsd = lustre_msg_buf(req->rq_reqbuf,
- req->rq_reqbuf->lm_bufcount - 2,
- sizeof(*bsd));
+ LASSERT(grctx->src_reqbsd);
+ bsd_off = bufcnt;
buflens[bufcnt] = bulk_sec_desc_size(
- bsd->bsd_csum_alg, 0,
- req->rq_bulk_read);
- txtsize += buflens[bufcnt];
+ grctx->src_reqbsd->bsd_csum_alg,
+ 0, req->rq_bulk_read);
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[bufcnt];
bufcnt++;
}
- buflens[bufcnt++] = gss_svc_payload(grctx, txtsize, 0);
+
+ if (gss_svc_reqctx_is_special(grctx) ||
+ svc != SPTLRPC_SVC_NULL)
+ buflens[bufcnt++] = gss_svc_payload(grctx, txtsize, 0);
}
wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
rs->rs_repbuf_len = wmsg_size;
+ /* initialize the buffer */
if (privacy) {
lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
rs->rs_repbuf->lm_secflvr = req->rq_sec_flavor;
- rs->rs_msg = (struct lustre_msg *)
- lustre_msg_buf(rs->rs_repbuf, 1, 0);
+ rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 1, 0);
+ }
+
+ if (bsd_off) {
+ grctx->src_repbsd = lustre_msg_buf(rs->rs_repbuf, bsd_off, 0);
+ grctx->src_repbsd_size = lustre_msg_buflen(rs->rs_repbuf,
+ bsd_off);
}
gss_svc_reqctx_addref(grctx);
}
LASSERT(cipher_obj.len <= cipher_buflen);
+ /*
+ * we are about to override data at rs->rs_repbuf, nullify pointers
+ * to which to catch further illegal usage.
+ */
+ grctx->src_repbsd = NULL;
+ grctx->src_repbsd_size = 0;
+
/* now the real wire data */
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
buflens[1] = gss_estimate_payload(gctx->gsc_mechctx, buflens[0], 0);
ghdr->gh_flags = 0;
ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
ghdr->gh_seq = grctx->src_wirectx.gw_seq;
- ghdr->gh_svc = PTLRPC_GSS_SVC_PRIVACY;
+ ghdr->gh_svc = SPTLRPC_SVC_PRIV;
ghdr->gh_handle.len = 0;
/* header signature */
LASSERT(grctx->src_ctx);
switch (gw->gw_svc) {
- case PTLRPC_GSS_SVC_INTEGRITY:
- rc = gss_svc_sign(req, rs, grctx);
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ rc = gss_svc_sign(req, rs, grctx, gw->gw_svc);
break;
- case PTLRPC_GSS_SVC_PRIVACY:
+ case SPTLRPC_SVC_PRIV:
rc = gss_svc_seal(req, rs, grctx);
break;
default:
LASSERT(rs->rs_svc_ctx);
grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
+ /* paranoid, maybe not necessary */
+ grctx->src_reqbsd = NULL;
+ grctx->src_repbsd = NULL;
+
gss_svc_reqctx_decref(grctx);
rs->rs_svc_ctx = NULL;
if (rc)
GOTO(out, rc);
- rc = sptlrpc_cli_install_rvs_ctx(imp, request->rq_cli_ctx);
- if (rc)
- GOTO(out, rc);
-
LASSERT(imp->imp_conn_current);
msg_flags = lustre_msg_get_op_flags(request->rq_repmsg);
void sptlrpc_lproc_fini(void);
/* sec_gc.c */
-void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
-void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
int sptlrpc_gc_start_thread(void);
void sptlrpc_gc_stop_thread(void);
return SPTLRPC_FLVR_NULL;
if (!strcmp(name, "plain"))
return SPTLRPC_FLVR_PLAIN;
- if (!strcmp(name, "krb5"))
- return SPTLRPC_FLVR_KRB5;
+ if (!strcmp(name, "krb5n"))
+ return SPTLRPC_FLVR_KRB5N;
if (!strcmp(name, "krb5i"))
return SPTLRPC_FLVR_KRB5I;
if (!strcmp(name, "krb5p"))
return "null";
case SPTLRPC_FLVR_PLAIN:
return "plain";
- case SPTLRPC_FLVR_KRB5:
- return "krb5";
+ case SPTLRPC_FLVR_KRB5N:
+ return "krb5n";
+ case SPTLRPC_FLVR_KRB5A:
+ return "krb5a";
case SPTLRPC_FLVR_KRB5I:
return "krb5i";
case SPTLRPC_FLVR_KRB5P:
return "krb5p";
default:
CERROR("invalid flavor 0x%x(p%u,s%u,v%u)\n", flavor,
- SEC_FLAVOR_POLICY(flavor), SEC_FLAVOR_SUBPOLICY(flavor),
+ SEC_FLAVOR_POLICY(flavor), SEC_FLAVOR_MECH(flavor),
SEC_FLAVOR_SVC(flavor));
}
return "UNKNOWN";
RETURN(0);
}
-void sptlrpc_req_put_ctx(struct ptlrpc_request *req)
+/*
+ * if @sync == 0, this function should return quickly without sleep;
+ * otherwise might trigger ctx destroying rpc to server.
+ */
+void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
{
ENTRY;
spin_unlock(&req->rq_cli_ctx->cc_lock);
}
- /* this could be called with spinlock hold, use async mode */
- sptlrpc_cli_ctx_put(req->rq_cli_ctx, 0);
+ sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
req->rq_cli_ctx = NULL;
EXIT;
}
spin_unlock(&ctx->cc_lock);
sptlrpc_cli_ctx_get(ctx);
- sptlrpc_req_put_ctx(req);
+ sptlrpc_req_put_ctx(req, 0);
rc = sptlrpc_req_get_ctx(req);
if (!rc) {
LASSERT(req->rq_cli_ctx);
static
void ctx_refresh_interrupt(void *data)
{
- /* do nothing */
+ struct ptlrpc_request *req = data;
+
+ spin_lock(&req->rq_lock);
+ req->rq_intr = 1;
+ spin_unlock(&req->rq_lock);
}
static
LASSERT(ctx);
- /* skip reverse ctxs */
- if (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE)
- RETURN(0);
-
/* skip special ctxs */
if (cli_ctx_is_eternal(ctx) || req->rq_ctx_init || req->rq_ctx_fini)
RETURN(0);
LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
again:
+ LASSERT(ctx->cc_ops->validate);
+ if (ctx->cc_ops->validate(ctx) == 0) {
+ req_off_ctx_list(req, ctx);
+ RETURN(0);
+ }
+
if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
req->rq_err = 1;
req_off_ctx_list(req, ctx);
goto again;
}
- LASSERT(ctx->cc_ops->validate);
- if (ctx->cc_ops->validate(ctx) == 0) {
- req_off_ctx_list(req, ctx);
- RETURN(0);
- }
-
/* Now we're sure this context is during upcall, add myself into
* waiting list
*/
req->rq_restart = 0;
spin_unlock(&req->rq_lock);
- lwi = LWI_TIMEOUT_INTR(timeout == 0 ? LONG_MAX : timeout * HZ,
- ctx_refresh_timeout, ctx_refresh_interrupt, req);
+ lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
+ ctx_refresh_interrupt, req);
rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
/* five cases we are here:
req->rq_sec_flavor = req->rq_cli_ctx->cc_sec->ps_flavor;
- /* force SVC_NONE for context initiation rpc, SVC_AUTH for context
+ /* force SVC_NULL for context initiation rpc, SVC_INTG for context
* destruction rpc
*/
if (unlikely(req->rq_ctx_init)) {
req->rq_sec_flavor = SEC_MAKE_RPC_FLAVOR(
SEC_FLAVOR_POLICY(req->rq_sec_flavor),
- SEC_FLAVOR_SUBPOLICY(req->rq_sec_flavor),
- SEC_FLAVOR_SVC(SPTLRPC_SVC_NONE));
+ SEC_FLAVOR_MECH(req->rq_sec_flavor),
+ SPTLRPC_SVC_NULL);
} else if (unlikely(req->rq_ctx_fini)) {
req->rq_sec_flavor = SEC_MAKE_RPC_FLAVOR(
SEC_FLAVOR_POLICY(req->rq_sec_flavor),
- SEC_FLAVOR_SUBPOLICY(req->rq_sec_flavor),
- SEC_FLAVOR_SVC(SPTLRPC_SVC_AUTH));
+ SEC_FLAVOR_MECH(req->rq_sec_flavor),
+ SPTLRPC_SVC_INTG);
}
conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
spin_lock_init(&req->rq_lock);
atomic_set(&req->rq_refcount, 10000);
- INIT_LIST_HEAD(&req->rq_ctx_chain);
+ CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
init_waitqueue_head(&req->rq_reply_waitq);
req->rq_import = imp;
req->rq_cli_ctx = ctx;
}
switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
- case SPTLRPC_SVC_NONE:
+ case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
LASSERT(ctx->cc_ops->sign);
rc = ctx->cc_ops->sign(ctx, req);
break;
}
switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
- case SPTLRPC_SVC_NONE:
+ case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
LASSERT(ctx->cc_ops->verify);
rc = ctx->cc_ops->verify(ctx, req);
break;
LASSERT(atomic_read(&sec->ps_busy) == 0);
LASSERT(policy->sp_cops->destroy_sec);
- CWARN("%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
+ CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
policy->sp_cops->destroy_sec(sec);
sptlrpc_policy_put(policy);
sec = imp->imp_sec;
policy = sec->ps_policy;
- if (!atomic_dec_and_test(&sec->ps_refcount)) {
- sptlrpc_policy_put(policy);
- goto out;
- }
+ if (atomic_dec_and_test(&sec->ps_refcount)) {
+ sec_cop_flush_ctx_cache(sec, -1, 1, 1);
+ sptlrpc_gc_del_sec(sec);
- sec_cop_flush_ctx_cache(sec, -1, 1, 1);
- sptlrpc_gc_del_sec(sec);
-
- if (atomic_dec_and_test(&sec->ps_busy))
- sec_cop_destroy_sec(sec);
- else {
- CWARN("delay to destroy %s@%p: busy contexts\n",
- policy->sp_name, sec);
+ if (atomic_dec_and_test(&sec->ps_busy))
+ sec_cop_destroy_sec(sec);
+ else {
+ CWARN("delay destroying busy sec %s %p\n",
+ policy->sp_name, sec);
+ }
+ } else {
+ sptlrpc_policy_put(policy);
}
-out:
imp->imp_sec = NULL;
}
if (imp == NULL || imp->imp_sec == NULL)
return;
- sec_cop_flush_ctx_cache(imp->imp_sec, -1, 0, 1);
+ sec_cop_flush_ctx_cache(imp->imp_sec, -1, 1, 1);
}
EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
* server side security *
****************************************/
+int sptlrpc_target_export_check(struct obd_export *exp,
+ struct ptlrpc_request *req)
+{
+ if (!req->rq_auth_gss ||
+ (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt))
+ return 0;
+
+ if (!req->rq_ctx_init)
+ return 0;
+
+ LASSERT(exp->exp_imp_reverse);
+ sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse, req->rq_svc_ctx);
+ return 0;
+}
+
int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
{
struct ptlrpc_sec_policy *policy;
switch (rpc_flavor) {
case SPTLRPC_FLVR_NULL:
case SPTLRPC_FLVR_PLAIN:
+ case SPTLRPC_FLVR_KRB5N:
+ case SPTLRPC_FLVR_KRB5A:
break;
case SPTLRPC_FLVR_KRB5P:
conf->sfc_bulk_priv = BULK_PRIV_ALG_ARC4;
case SPTLRPC_FLVR_PLAIN:
conf->sfc_bulk_csum = BULK_CSUM_ALG_MD5;
break;
+ case SPTLRPC_FLVR_KRB5N:
+ case SPTLRPC_FLVR_KRB5A:
case SPTLRPC_FLVR_KRB5I:
case SPTLRPC_FLVR_KRB5P:
conf->sfc_bulk_csum = BULK_CSUM_ALG_SHA1;
static __u32 __flavors[] = {
SPTLRPC_FLVR_NULL,
SPTLRPC_FLVR_PLAIN,
+ SPTLRPC_FLVR_KRB5N,
+ SPTLRPC_FLVR_KRB5A,
SPTLRPC_FLVR_KRB5I,
SPTLRPC_FLVR_KRB5P,
};
{
if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
return "*";
- if (sec->ps_flags & PTLRPC_SEC_FL_REVERSE)
+ if (sec_is_reverse(sec))
return "c";
return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
}
* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Copyright (C) 2006 Cluster File Systems, Inc.
+ * Author: Eric Mei <ericm@clusterfs.com>
*
* This file is part of Lustre, http://www.lustre.org.
*
EXPORT_SYMBOL(bulk_csum_cli_reply);
int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
- struct lustre_msg *vmsg, int voff,
- struct lustre_msg *rmsg, int roff)
+ struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
+ struct ptlrpc_bulk_sec_desc *bsdr, int rsize)
{
- struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
- int vsize, rsize, rc;
-
- vsize = vmsg->lm_buflens[voff];
- rsize = rmsg->lm_buflens[roff];
- bsdv = lustre_msg_buf(vmsg, voff, 0);
- bsdr = lustre_msg_buf(rmsg, roff, 0);
+ int rc;
LASSERT(vsize >= sizeof(*bsdv));
LASSERT(rsize >= sizeof(*bsdr));
* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Copyright (C) 2007 Cluster File Systems, Inc.
+ * Author: Eric Mei <ericm@clusterfs.com>
*
* This file is part of Lustre, http://www.lustre.org.
*
#ifdef __KERNEL__
static DECLARE_MUTEX(sec_gc_mutex);
-static LIST_HEAD(sec_gc_list);
+static CFS_LIST_HEAD(sec_gc_list);
static spinlock_t sec_gc_list_lock = SPIN_LOCK_UNLOCKED;
+static CFS_LIST_HEAD(sec_gc_ctx_list);
+static spinlock_t sec_gc_ctx_list_lock = SPIN_LOCK_UNLOCKED;
+
static struct ptlrpc_thread sec_gc_thread;
static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
+
void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
{
- CWARN("add sec %p(%s)\n", sec, sec->ps_policy->sp_name);
if (!list_empty(&sec->ps_gc_list)) {
CERROR("sec %p(%s) already in gc list\n",
sec, sec->ps_policy->sp_name);
spin_lock(&sec_gc_list_lock);
list_add_tail(&sec_gc_list, &sec->ps_gc_list);
spin_unlock(&sec_gc_list_lock);
+
+ CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
}
+EXPORT_SYMBOL(sptlrpc_gc_add_sec);
void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
{
- CWARN("del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
if (list_empty(&sec->ps_gc_list))
return;
mutex_down(&sec_gc_mutex);
mutex_up(&sec_gc_mutex);
atomic_dec(&sec_gc_wait_del);
+
+ CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+}
+EXPORT_SYMBOL(sptlrpc_gc_del_sec);
+
+void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
+{
+ LASSERT(list_empty(&ctx->cc_gc_chain));
+
+ CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ spin_lock(&sec_gc_ctx_list_lock);
+ list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
+ spin_unlock(&sec_gc_ctx_list_lock);
+
+ sec_gc_thread.t_flags |= SVC_SIGNAL;
+ cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
+}
+EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
+
+static void sec_process_ctx_list(void)
+{
+ struct ptlrpc_cli_ctx *ctx;
+
+again:
+ spin_lock(&sec_gc_ctx_list_lock);
+ if (!list_empty(&sec_gc_ctx_list)) {
+ ctx = list_entry(sec_gc_ctx_list.next,
+ struct ptlrpc_cli_ctx, cc_gc_chain);
+ list_del_init(&ctx->cc_gc_chain);
+ spin_unlock(&sec_gc_ctx_list_lock);
+
+ LASSERT(ctx->cc_sec);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 1);
+ CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ sptlrpc_cli_ctx_put(ctx, 1);
+
+ goto again;
+ }
+ spin_unlock(&sec_gc_ctx_list_lock);
}
static void sec_do_gc(struct ptlrpc_sec *sec)
return;
}
- CWARN("check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+ CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+
if (time_after(sec->ps_gc_next, now))
return;
while (1) {
struct ptlrpc_sec *sec, *next;
+ thread->t_flags &= ~SVC_SIGNAL;
+ sec_process_ctx_list();
again:
mutex_down(&sec_gc_mutex);
list_for_each_entry_safe(sec, next, &sec_gc_list, ps_gc_list) {
lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
- thread->t_flags & SVC_STOPPING,
+ thread->t_flags & (SVC_STOPPING | SVC_SIGNAL),
&lwi);
if (thread->t_flags & SVC_STOPPING) {
* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Copyright (C) 2006 Cluster File Systems, Inc.
+ * Author: Eric Mei <ericm@clusterfs.com>
*
* This file is part of Lustre, http://www.lustre.org.
*
written += sec->ps_policy->sp_cops->display(
sec, page + written, count - written);
}
-#if 0
- /*
- * list contexts
- */
- if (sec->ps_policy->sp_policy != SPTLRPC_POLICY_GSS)
- goto out;
-
- written += snprintf(page + written, count - written,
- "GSS contexts ==>\n");
-
- spin_lock(&sec->ps_lock);
- for (i = 0; i < sec->ps_ccache_size; i++) {
- hlist_for_each_entry_safe(ctx, pos, next,
- &sec->ps_ccache[i], cc_hash) {
- if (written >= count)
- break;
- written += sptlrpc_cli_ctx_display(ctx, page + written,
- count - written);
- }
- }
- spin_unlock(&sec->ps_lock);
-#endif
out:
return written;
* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Copyright (C) 2004-2006 Cluster File Systems, Inc.
+ * Author: Eric Mei <ericm@clusterfs.com>
*
* This file is part of Lustre, http://www.lustre.org.
*
null_sec.ps_gc_interval = 0;
null_sec.ps_gc_next = 0;
- hlist_add_head(&null_cli_ctx.cc_hash, &__list);
+ hlist_add_head(&null_cli_ctx.cc_cache, &__list);
atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */
null_cli_ctx.cc_sec = &null_sec;
null_cli_ctx.cc_ops = &null_ctx_ops;
null_cli_ctx.cc_vcred.vc_uid = 0;
spin_lock_init(&null_cli_ctx.cc_lock);
INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
+ INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
}
int sptlrpc_null_init(void)
* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Copyright (C) 2006 Cluster File Systems, Inc.
+ * Author: Eric Mei <ericm@clusterfs.com>
*
* This file is part of Lustre, http://www.lustre.org.
*
#include <lustre_sec.h>
static struct ptlrpc_sec_policy plain_policy;
+static struct ptlrpc_ctx_ops plain_ctx_ops;
static struct ptlrpc_sec plain_sec;
static struct ptlrpc_cli_ctx plain_cli_ctx;
static struct ptlrpc_svc_ctx plain_svc_ctx;
+/****************************************
+ * cli_ctx apis *
+ ****************************************/
+
static
int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
{
req->rq_repbuf->lm_bufcount - 1);
}
-static struct ptlrpc_ctx_ops plain_ctx_ops = {
- .refresh = plain_ctx_refresh,
- .sign = plain_ctx_sign,
- .verify = plain_ctx_verify,
- .wrap_bulk = plain_cli_wrap_bulk,
- .unwrap_bulk = plain_cli_unwrap_bulk,
-};
-
-static struct ptlrpc_svc_ctx plain_svc_ctx = {
- .sc_refcount = ATOMIC_INIT(1),
- .sc_policy = &plain_policy,
-};
+/****************************************
+ * sec apis *
+ ****************************************/
static
struct ptlrpc_sec* plain_create_sec(struct obd_import *imp,
RETURN(0);
}
+/****************************************
+ * service apis *
+ ****************************************/
+
+static struct ptlrpc_svc_ctx plain_svc_ctx = {
+ .sc_refcount = ATOMIC_INIT(1),
+ .sc_policy = &plain_policy,
+};
+
static
int plain_accept(struct ptlrpc_request *req)
{
int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ int voff, roff;
LASSERT(rs);
+ voff = req->rq_reqbuf->lm_bufcount - 1;
+ roff = rs->rs_repbuf->lm_bufcount - 1;
+
return bulk_csum_svc(desc, req->rq_bulk_read,
- req->rq_reqbuf, req->rq_reqbuf->lm_bufcount - 1,
- rs->rs_repbuf, rs->rs_repbuf->lm_bufcount - 1);
+ lustre_msg_buf(req->rq_reqbuf, voff, 0),
+ lustre_msg_buflen(req->rq_reqbuf, voff),
+ lustre_msg_buf(rs->rs_repbuf, roff, 0),
+ lustre_msg_buflen(rs->rs_repbuf, roff));
}
static
int plain_svc_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ int voff, roff;
LASSERT(rs);
+ voff = req->rq_reqbuf->lm_bufcount - 1;
+ roff = rs->rs_repbuf->lm_bufcount - 1;
+
return bulk_csum_svc(desc, req->rq_bulk_read,
- req->rq_reqbuf, req->rq_reqbuf->lm_bufcount - 1,
- rs->rs_repbuf, rs->rs_repbuf->lm_bufcount - 1);
+ lustre_msg_buf(req->rq_reqbuf, voff, 0),
+ lustre_msg_buflen(req->rq_reqbuf, voff),
+ lustre_msg_buf(rs->rs_repbuf, roff, 0),
+ lustre_msg_buflen(rs->rs_repbuf, roff));
}
+static struct ptlrpc_ctx_ops plain_ctx_ops = {
+ .refresh = plain_ctx_refresh,
+ .sign = plain_ctx_sign,
+ .verify = plain_ctx_verify,
+ .wrap_bulk = plain_cli_wrap_bulk,
+ .unwrap_bulk = plain_cli_unwrap_bulk,
+};
+
static struct ptlrpc_sec_cops plain_sec_cops = {
.create_sec = plain_create_sec,
.destroy_sec = plain_destroy_sec,
plain_sec.ps_flags = 0;
spin_lock_init(&plain_sec.ps_lock);
atomic_set(&plain_sec.ps_busy, 1); /* for "plain_cli_ctx" */
- INIT_LIST_HEAD(&plain_sec.ps_gc_list);
+ CFS_INIT_LIST_HEAD(&plain_sec.ps_gc_list);
plain_sec.ps_gc_interval = 0;
plain_sec.ps_gc_next = 0;
- hlist_add_head(&plain_cli_ctx.cc_hash, &__list);
+ hlist_add_head(&plain_cli_ctx.cc_cache, &__list);
atomic_set(&plain_cli_ctx.cc_refcount, 1); /* for hash */
plain_cli_ctx.cc_sec = &plain_sec;
plain_cli_ctx.cc_ops = &plain_ctx_ops;
PTLRPC_CTX_UPTODATE;
plain_cli_ctx.cc_vcred.vc_uid = 0;
spin_lock_init(&plain_cli_ctx.cc_lock);
- INIT_LIST_HEAD(&plain_cli_ctx.cc_req_list);
+ CFS_INIT_LIST_HEAD(&plain_cli_ctx.cc_req_list);
+ CFS_INIT_LIST_HEAD(&plain_cli_ctx.cc_gc_chain);
}
int sptlrpc_plain_init(void)
ptlrpc_error(request);
goto put_conn;
}
+
+ rc = sptlrpc_target_export_check(request->rq_export, request);
+ if (unlikely(rc)) {
+ DEBUG_REQ(D_ERROR, request,
+ "DROPPING req with illeagle security flavor");
+ goto put_conn;
+ }
+
ptlrpc_update_export_timer(request->rq_export, timediff/500000);
export = class_export_rpc_get(request->rq_export);
}
reintegrate_clients || return 1
client_df || return 3
+ sleep 2 # give it a little time for fully recovered before next test
}
run_test 3 "Thirdb Failure Mode: MDS/CLIENT `date`"
###################################################
test_26b() { # bug 10140 - evict dead exports by pinger
client_df
zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2"
+ sleep 1 # wait connections being established
MDS_FILE=$LPROC/mdt/${mds1_svc}/num_exports
MDS_NEXP1="`do_facet $SINGLEMDS cat $MDS_FILE | cut -d' ' -f2`"
OST_FILE=$LPROC/obdfilter/${ost1_svc}/num_exports
SAVE_PWD=$PWD
-#
-# check pre-set $SEC
-#
-if [ ! -z $SEC ]; then
- if [ "$SEC" != "krb5i" -a "$SEC" != "krb5p" ]; then
- echo "SEC=$SEC is invalid, this script only run in gss mode (krb5i/krb5p)"
- exit 1
- fi
-fi
-
export SEC=${SEC:-krb5p}
export KRB5_CCACHE_DIR=/tmp
export KRB5_CRED=$KRB5_CCACHE_DIR/krb5cc_$RUNAS_ID
export KRB5_CRED_SAVE=$KRB5_CCACHE_DIR/krb5cc.sanity.save
-echo "Using security flavor $SEC"
+#
+# check pre-set $SEC
+#
+case "x$SEC" in
+ xkrb5*)
+ echo "Using ptlrpc security flavor $SEC"
+ ;;
+ *)
+ echo "SEC=$SEC is invalid, it has to be gss/krb5 flavor"
+ exit 1
+ ;;
+esac
LUSTRE=${LUSTRE:-`dirname $0`/..}
. $LUSTRE/tests/test-framework.sh
# cleanup all cred/ctx and touch
$RUNAS kdestroy
- $RUNAS $LFS flushctx
+ $RUNAS $LFS flushctx || error "can't flush ctx"
$RUNAS touch $MOUNT/f2_2 && error "unexpected success"
# restore and touch
test_5() {
local file1=$MOUNT/f5_1
local file2=$MOUNT/f5_2
- local wait_time=120
+ local wait_time=`expr $TIMEOUT + $TIMEOUT`
# current access should be ok
$RUNAS touch $file1 || error "can't touch $file1"
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
-if [ ! -z "$USING_KRB5" ]; then
+if $GSS_KRB5; then
$RUNAS -u $ID1 krb5_login.sh || exit 1
$RUNAS -u $ID2 krb5_login.sh || exit 1
fi
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/local.sh}
-if [ ! -z "$USING_KRB5" ]; then
+if $GSS_KRB5; then
$RUNAS krb5_login.sh || exit 1
$RUNAS -u $(($RUNAS_ID + 1)) krb5_login.sh || exit 1
fi
test_69() {
[ $(grep -c obdfilter $LPROC/devices) -eq 0 ] && \
skip "skipping test for remote OST" && return
- [ ! -z "$USING_KRB5" ] && \
- skip "gss with bulk security will triger oops. re-enable this after b10091 get fixed" && return
+ $GSS && skip "gss with bulk security will triger oops. re-enable this after b10091 get fixed" && return
f="$DIR/$tfile"
touch $f
[ "$UID" != 0 ] && skip "must run as root" && return
[ -z "$(grep acl $LPROC/mdc/*-mdc-*/connect_flags)" ] && skip "must have acl enabled" && return
[ -z "$(which setfacl 2>/dev/null)" ] && skip "could not find setfacl" && return
- [ ! -z "$USING_KRB5" ] && skip "could not run under gss" && return
+ $GSS && skip "could not run under gss" && return
SAVE_UMASK=`umask`
umask 0022
SANITYLOG=${TESTSUITELOG:-$TMP/$(basename $0 .sh).log}
FAIL_ON_ERROR=false
-if [ ! -z "$USING_KRB5" ]; then
+if $GSS_KRB5; then
$RUNAS krb5_login.sh || exit 1
fi
export VERBOSE=false
export GMNALNID=${GMNALNID:-/usr/sbin/gmlndnid}
export CATASTROPHE=${CATASTROPHE:-/proc/sys/lnet/catastrophe}
+export GSS=false
+export GSS_KRB5=false
+export GSS_PIPEFS=false
#export PDSH="pdsh -S -Rssh -w"
# eg, assert_env LUSTRE MDSNODES OSTNODES CLIENTS
case "x$SEC" in
xkrb5*)
echo "Using GSS/krb5 ptlrpc security flavor"
- export USING_KRB5="y"
+ GSS=true
+ GSS_KRB5=true
;;
esac
# must be testing a "make install" or "rpm" installation
# note failed to load ptlrpc_gss is considered not fatal
if [ "$BASE" == "ptlrpc_gss" ]; then
- modprobe $BASE $@ || echo "gss/krb5 is not supported"
+ modprobe $BASE $@ 2>/dev/null || echo "gss/krb5 is not supported"
else
modprobe $BASE $@
fi
# starting on MDT
for num in `seq $MDSCOUNT`; do
do_facet mds$num "$LSVCGSSD -v"
- if [ "x$GSS_PIPEFS" == "xy" ]; then
+ if $GSS_PIPEFS; then
do_facet mds$num "$LGSSD -v"
fi
done
done
# starting on client
# FIXME: is "client" the right facet name?
- if [ "x$GSS_PIPEFS" == "xy" ]; then
+ if $GSS_PIPEFS; then
do_facet client "$LGSSD -v"
fi
#
for num in `seq $MDSCOUNT`; do
check_gss_daemon_facet mds$num lsvcgssd
- if [ "x$GSS_PIPEFS" == "xy" ]; then
+ if $GSS_PIPEFS; then
check_gss_daemon_facet mds$num lgssd
fi
done
for num in `seq $OSTCOUNT`; do
check_gss_daemon_facet ost$num lsvcgssd
done
- if [ "x$GSS_PIPEFS" == "xy" ]; then
+ if $GSS_PIPEFS; then
check_gss_daemon_facet client lgssd
fi
}
OST_MOUNT_OPTS=$OST_MOUNT_OPTS,sec=$SEC
fi
- if [ ! -z $USING_KRB5 ]; then
+ if $GSS; then
start_gss_daemons
fi
}
cleanup_krb5_env() {
- if [ ! -z $USING_KRB5 ]; then
+ if $GSS; then
stop_gss_daemons
# maybe cleanup credential cache?
fi
* log facilities *
****************************************/
-loglevel_t g_log_level = LL_INFO;
+loglevel_t g_log_level = LL_WARN;
static const char *log_prefix[] = {
[LL_ERR] = "ERROR",
#endif
#include "lsupport.h"
+const char * lustre_svc_name[] =
+{
+ [LUSTRE_GSS_SVC_MDS] = "MDS",
+ [LUSTRE_GSS_SVC_OSS] = "OSS",
+};
+
/****************************************
* exclusive startup *
****************************************/
#define LUSTRE_GSS_SVC_MDS 0
#define LUSTRE_GSS_SVC_OSS 1
+extern const char * lustre_svc_name[];
+
struct lgssd_upcall_data {
uint32_t seq;
uint32_t uid;
diff -Nrup nfs-utils-1.0.11/configure.in nfs-utils-1.0.11.lustre/configure.in
--- nfs-utils-1.0.11/configure.in 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/configure.in 2007-05-23 14:35:45.000000000 -0600
++++ nfs-utils-1.0.11.lustre/configure.in 2007-06-29 12:29:20.000000000 -0600
@@ -18,61 +18,14 @@ AC_ARG_WITH(release,
RELEASE=$withval,
RELEASE=1)
diff -Nrup nfs-utils-1.0.11/Makefile.am nfs-utils-1.0.11.lustre/Makefile.am
--- nfs-utils-1.0.11/Makefile.am 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/Makefile.am 2007-05-23 14:35:45.000000000 -0600
++++ nfs-utils-1.0.11.lustre/Makefile.am 2007-06-29 12:29:20.000000000 -0600
@@ -1,6 +1,6 @@
## Process this file with automake to produce Makefile.in
diff -Nrup nfs-utils-1.0.11/utils/gssd/cacheio.c nfs-utils-1.0.11.lustre/utils/gssd/cacheio.c
--- nfs-utils-1.0.11/utils/gssd/cacheio.c 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/cacheio.c 2007-05-23 14:36:28.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/cacheio.c 2007-06-29 12:32:27.000000000 -0600
@@ -240,7 +240,8 @@ int qword_get(char **bpp, char *dest, in
return -1;
while (*bp == ' ') bp++;
diff -Nrup nfs-utils-1.0.11/utils/gssd/context.c nfs-utils-1.0.11.lustre/utils/gssd/context.c
--- nfs-utils-1.0.11/utils/gssd/context.c 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/context.c 2007-05-23 14:36:29.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/context.c 2007-06-29 12:32:28.000000000 -0600
@@ -33,11 +33,14 @@
#include <syslog.h>
#include <string.h>
int
diff -Nrup nfs-utils-1.0.11/utils/gssd/context.h nfs-utils-1.0.11.lustre/utils/gssd/context.h
---- nfs-utils-1.0.11/utils/gssd/context.h 2007-05-23 14:35:21.000000000 -0600
-+++ nfs-utils-1.0.11.lustre/utils/gssd/context.h 2007-05-23 14:36:30.000000000 -0600
+--- nfs-utils-1.0.11/utils/gssd/context.h 2007-06-29 12:28:01.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/context.h 2007-06-29 12:32:29.000000000 -0600
@@ -31,8 +31,6 @@
#ifndef _CONTEXT_H_
#define _CONTEXT_H_
diff -Nrup nfs-utils-1.0.11/utils/gssd/context_heimdal.c nfs-utils-1.0.11.lustre/utils/gssd/context_heimdal.c
--- nfs-utils-1.0.11/utils/gssd/context_heimdal.c 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/context_heimdal.c 2007-05-23 14:36:30.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/context_heimdal.c 2007-06-29 12:32:29.000000000 -0600
@@ -43,8 +43,13 @@
#ifdef HAVE_COM_ERR_H
#include <com_err.h>
int write_heimdal_keyblock(char **p, char *end, krb5_keyblock *key)
diff -Nrup nfs-utils-1.0.11/utils/gssd/context_lucid.c nfs-utils-1.0.11.lustre/utils/gssd/context_lucid.c
---- nfs-utils-1.0.11/utils/gssd/context_lucid.c 2007-05-23 14:35:21.000000000 -0600
-+++ nfs-utils-1.0.11.lustre/utils/gssd/context_lucid.c 2007-05-23 14:36:31.000000000 -0600
+--- nfs-utils-1.0.11/utils/gssd/context_lucid.c 2007-06-29 12:28:01.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/context_lucid.c 2007-06-29 12:32:30.000000000 -0600
@@ -41,11 +41,7 @@
#include <syslog.h>
#include <string.h>
1, &return_ctx);
if (maj_stat != GSS_S_COMPLETE) {
diff -Nrup nfs-utils-1.0.11/utils/gssd/context_mit.c nfs-utils-1.0.11.lustre/utils/gssd/context_mit.c
---- nfs-utils-1.0.11/utils/gssd/context_mit.c 2007-05-23 14:35:21.000000000 -0600
-+++ nfs-utils-1.0.11.lustre/utils/gssd/context_mit.c 2007-05-23 14:36:32.000000000 -0600
+--- nfs-utils-1.0.11/utils/gssd/context_mit.c 2007-06-29 12:28:01.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/context_mit.c 2007-06-29 12:32:30.000000000 -0600
@@ -39,10 +39,14 @@
#include <errno.h>
#include <gssapi/gssapi.h>
/* Only applicable flag for this is initiator */
diff -Nrup nfs-utils-1.0.11/utils/gssd/context_spkm3.c nfs-utils-1.0.11.lustre/utils/gssd/context_spkm3.c
--- nfs-utils-1.0.11/utils/gssd/context_spkm3.c 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/context_spkm3.c 2007-05-23 14:36:32.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/context_spkm3.c 2007-06-29 12:32:31.000000000 -0600
@@ -33,8 +33,6 @@
#include <syslog.h>
#include <string.h>
#include "err_util.h"
diff -Nrup nfs-utils-1.0.11/utils/gssd/err_util.c nfs-utils-1.0.11.lustre/utils/gssd/err_util.c
--- nfs-utils-1.0.11/utils/gssd/err_util.c 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/err_util.c 2007-05-23 14:36:33.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/err_util.c 2007-06-29 12:32:31.000000000 -0600
@@ -32,6 +32,8 @@
#include <stdarg.h>
#include <syslog.h>
+
diff -Nrup nfs-utils-1.0.11/utils/gssd/err_util.h nfs-utils-1.0.11.lustre/utils/gssd/err_util.h
--- nfs-utils-1.0.11/utils/gssd/err_util.h 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/err_util.h 2007-05-23 14:36:33.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/err_util.h 2007-06-29 12:32:32.000000000 -0600
@@ -33,5 +33,6 @@
void initerr(char *progname, int verbosity, int fg);
#endif /* _ERR_UTIL_H_ */
diff -Nrup nfs-utils-1.0.11/utils/gssd/gss_clnt_send_err.c nfs-utils-1.0.11.lustre/utils/gssd/gss_clnt_send_err.c
--- nfs-utils-1.0.11/utils/gssd/gss_clnt_send_err.c 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/gss_clnt_send_err.c 2007-05-23 14:35:45.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/gss_clnt_send_err.c 2007-06-29 12:29:20.000000000 -0600
@@ -47,6 +47,7 @@
#include "gssd.h"
#include "write_bytes.h"
}
+#endif
diff -Nrup nfs-utils-1.0.11/utils/gssd/gssd.c nfs-utils-1.0.11.lustre/utils/gssd/gssd.c
---- nfs-utils-1.0.11/utils/gssd/gssd.c 2007-05-23 14:35:21.000000000 -0600
-+++ nfs-utils-1.0.11.lustre/utils/gssd/gssd.c 2007-05-23 14:36:34.000000000 -0600
+--- nfs-utils-1.0.11/utils/gssd/gssd.c 2007-06-29 12:28:01.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/gssd.c 2007-06-29 12:32:36.000000000 -0600
@@ -38,9 +38,12 @@
#include "config.h"
+ return 0;
}
diff -Nrup nfs-utils-1.0.11/utils/gssd/gssd.h nfs-utils-1.0.11.lustre/utils/gssd/gssd.h
---- nfs-utils-1.0.11/utils/gssd/gssd.h 2007-05-23 14:35:21.000000000 -0600
-+++ nfs-utils-1.0.11.lustre/utils/gssd/gssd.h 2007-05-23 14:36:34.000000000 -0600
+--- nfs-utils-1.0.11/utils/gssd/gssd.h 2007-06-29 12:28:01.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/gssd.h 2007-06-29 12:32:37.000000000 -0600
@@ -48,8 +48,13 @@
#define GSSD_DEFAULT_CRED_PREFIX "krb5cc_"
#define GSSD_DEFAULT_MACHINE_CRED_SUFFIX "machine"
#endif /* _RPC_GSSD_H_ */
diff -Nrup nfs-utils-1.0.11/utils/gssd/gssd_main_loop.c nfs-utils-1.0.11.lustre/utils/gssd/gssd_main_loop.c
---- nfs-utils-1.0.11/utils/gssd/gssd_main_loop.c 2007-05-23 14:35:21.000000000 -0600
-+++ nfs-utils-1.0.11.lustre/utils/gssd/gssd_main_loop.c 2007-05-23 14:36:35.000000000 -0600
+--- nfs-utils-1.0.11/utils/gssd/gssd_main_loop.c 2007-06-29 12:28:01.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/gssd_main_loop.c 2007-06-29 12:32:38.000000000 -0600
@@ -94,11 +94,13 @@ scan_poll_results(int ret)
};
return;
}
diff -Nrup nfs-utils-1.0.11/utils/gssd/gssd_proc.c nfs-utils-1.0.11.lustre/utils/gssd/gssd_proc.c
---- nfs-utils-1.0.11/utils/gssd/gssd_proc.c 2007-05-23 14:35:21.000000000 -0600
-+++ nfs-utils-1.0.11.lustre/utils/gssd/gssd_proc.c 2007-05-23 14:36:35.000000000 -0600
+--- nfs-utils-1.0.11/utils/gssd/gssd_proc.c 2007-06-29 12:28:01.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/gssd_proc.c 2007-06-29 12:32:38.000000000 -0600
@@ -43,7 +43,6 @@
#endif
#include "config.h"
}
diff -Nrup nfs-utils-1.0.11/utils/gssd/gss_util.c nfs-utils-1.0.11.lustre/utils/gssd/gss_util.c
--- nfs-utils-1.0.11/utils/gssd/gss_util.c 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/gss_util.c 2007-05-23 14:36:37.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/gss_util.c 2007-06-29 12:32:40.000000000 -0600
@@ -87,9 +87,16 @@
#ifdef HAVE_COM_ERR_H
#include <com_err.h>
+
diff -Nrup nfs-utils-1.0.11/utils/gssd/gss_util.h nfs-utils-1.0.11.lustre/utils/gssd/gss_util.h
--- nfs-utils-1.0.11/utils/gssd/gss_util.h 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/gss_util.h 2007-05-23 14:36:37.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/gss_util.h 2007-06-29 12:32:41.000000000 -0600
@@ -32,14 +32,14 @@
#define _GSS_UTIL_H_
#endif /* _GSS_UTIL_H_ */
diff -Nrup nfs-utils-1.0.11/utils/gssd/krb5_util.c nfs-utils-1.0.11.lustre/utils/gssd/krb5_util.c
---- nfs-utils-1.0.11/utils/gssd/krb5_util.c 2007-05-23 14:35:21.000000000 -0600
-+++ nfs-utils-1.0.11.lustre/utils/gssd/krb5_util.c 2007-05-23 14:36:38.000000000 -0600
+--- nfs-utils-1.0.11/utils/gssd/krb5_util.c 2007-06-29 12:28:01.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/krb5_util.c 2007-06-29 12:32:42.000000000 -0600
@@ -99,12 +99,15 @@
#include <rpc/rpc.h>
#include <sys/types.h>
}
+#endif
diff -Nrup nfs-utils-1.0.11/utils/gssd/krb5_util.h nfs-utils-1.0.11.lustre/utils/gssd/krb5_util.h
---- nfs-utils-1.0.11/utils/gssd/krb5_util.h 2007-05-23 14:35:21.000000000 -0600
-+++ nfs-utils-1.0.11.lustre/utils/gssd/krb5_util.h 2007-05-23 14:36:39.000000000 -0600
+--- nfs-utils-1.0.11/utils/gssd/krb5_util.h 2007-06-29 12:28:01.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/krb5_util.h 2007-06-29 12:32:42.000000000 -0600
@@ -10,6 +10,8 @@
struct gssd_k5_kt_princ {
struct gssd_k5_kt_princ *next;
#endif /* KRB5_UTIL_H */
diff -Nrup nfs-utils-1.0.11/utils/gssd/lsupport.c nfs-utils-1.0.11.lustre/utils/gssd/lsupport.c
--- nfs-utils-1.0.11/utils/gssd/lsupport.c 1969-12-31 17:00:00.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/lsupport.c 2007-05-23 14:36:40.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/lsupport.c 2007-06-29 12:32:43.000000000 -0600
@@ -0,0 +1,787 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+}
diff -Nrup nfs-utils-1.0.11/utils/gssd/lsupport.h nfs-utils-1.0.11.lustre/utils/gssd/lsupport.h
--- nfs-utils-1.0.11/utils/gssd/lsupport.h 1969-12-31 17:00:00.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/lsupport.h 2007-05-23 14:36:41.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/lsupport.h 2007-06-29 12:32:43.000000000 -0600
@@ -0,0 +1,89 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+#endif /* __LIBCFS_H__ */
diff -Nrup nfs-utils-1.0.11/utils/gssd/Makefile.am nfs-utils-1.0.11.lustre/utils/gssd/Makefile.am
--- nfs-utils-1.0.11/utils/gssd/Makefile.am 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/Makefile.am 2007-05-23 14:35:45.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/Makefile.am 2007-06-29 12:29:20.000000000 -0600
@@ -1,17 +1,11 @@
## Process this file with automake to produce Makefile.in
-
diff -Nrup nfs-utils-1.0.11/utils/gssd/svcgssd.c nfs-utils-1.0.11.lustre/utils/gssd/svcgssd.c
--- nfs-utils-1.0.11/utils/gssd/svcgssd.c 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/svcgssd.c 2007-05-23 14:36:41.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/svcgssd.c 2007-06-29 12:32:44.000000000 -0600
@@ -43,7 +43,6 @@
#include <sys/types.h>
#include <sys/stat.h>
printerr(1, "exiting on signal %d\n", signal);
exit(1);
}
+@@ -155,7 +177,7 @@ sig_hup(int signal)
+ static void
+ usage(char *progname)
+ {
+- fprintf(stderr, "usage: %s [-n] [-f] [-v] [-r] [-i]\n",
++ fprintf(stderr, "usage: %s [-n] [-f] [-v] [-r] [-m] [-o]\n",
+ progname);
+ exit(1);
+ }
@@ -166,9 +188,8 @@ main(int argc, char *argv[])
int get_creds = 1;
int fg = 0;
}
diff -Nrup nfs-utils-1.0.11/utils/gssd/svcgssd.h nfs-utils-1.0.11.lustre/utils/gssd/svcgssd.h
--- nfs-utils-1.0.11/utils/gssd/svcgssd.h 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/svcgssd.h 2007-05-23 14:36:42.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/svcgssd.h 2007-06-29 12:32:45.000000000 -0600
@@ -35,9 +35,20 @@
#include <sys/queue.h>
#include <gssapi/gssapi.h>
#endif /* _RPC_SVCGSSD_H_ */
diff -Nrup nfs-utils-1.0.11/utils/gssd/svcgssd_main_loop.c nfs-utils-1.0.11.lustre/utils/gssd/svcgssd_main_loop.c
--- nfs-utils-1.0.11/utils/gssd/svcgssd_main_loop.c 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/svcgssd_main_loop.c 2007-05-23 14:36:42.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/svcgssd_main_loop.c 2007-06-29 12:32:45.000000000 -0600
@@ -46,46 +46,66 @@
#include "svcgssd.h"
#include "err_util.h"
+ struct timespec halfsec = { .tv_sec = 0, .tv_nsec = 500000000 };
-#define NULLRPC_FILE "/proc/net/rpc/auth.rpcsec.init/channel"
-+#define NULLRPC_FILE "/proc/net/rpc/auth.ptlrpcs.init/channel"
++#define NULLRPC_FILE "/proc/net/rpc/auth.sptlrpc.init/channel"
- f = fopen(NULLRPC_FILE, "rw");
-
}
diff -Nrup nfs-utils-1.0.11/utils/gssd/svcgssd_proc.c nfs-utils-1.0.11.lustre/utils/gssd/svcgssd_proc.c
--- nfs-utils-1.0.11/utils/gssd/svcgssd_proc.c 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/gssd/svcgssd_proc.c 2007-05-23 14:36:44.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/gssd/svcgssd_proc.c 2007-06-29 12:32:46.000000000 -0600
@@ -35,7 +35,6 @@
#include <sys/param.h>
extern char * mech2file(gss_OID mech);
-#define SVCGSSD_CONTEXT_CHANNEL "/proc/net/rpc/auth.rpcsec.context/channel"
-#define SVCGSSD_INIT_CHANNEL "/proc/net/rpc/auth.rpcsec.init/channel"
-+#define SVCGSSD_CONTEXT_CHANNEL "/proc/net/rpc/auth.ptlrpcs.context/channel"
-+#define SVCGSSD_INIT_CHANNEL "/proc/net/rpc/auth.ptlrpcs.init/channel"
++#define SVCGSSD_CONTEXT_CHANNEL "/proc/net/rpc/auth.sptlrpc.context/channel"
++#define SVCGSSD_INIT_CHANNEL "/proc/net/rpc/auth.sptlrpc.init/channel"
#define TOKEN_BUF_SIZE 8192
if (ctx != GSS_C_NO_CONTEXT)
diff -Nrup nfs-utils-1.0.11/utils/Makefile.am nfs-utils-1.0.11.lustre/utils/Makefile.am
--- nfs-utils-1.0.11/utils/Makefile.am 2007-02-21 21:50:03.000000000 -0700
-+++ nfs-utils-1.0.11.lustre/utils/Makefile.am 2007-05-23 14:35:45.000000000 -0600
++++ nfs-utils-1.0.11.lustre/utils/Makefile.am 2007-06-29 12:29:20.000000000 -0600
@@ -2,30 +2,6 @@
OPTDIRS =
struct pollfd pollfd;
struct timespec halfsec = { .tv_sec = 0, .tv_nsec = 500000000 };
-#define NULLRPC_FILE "/proc/net/rpc/auth.ptlrpcs.init/channel"
+#define NULLRPC_FILE "/proc/net/rpc/auth.sptlrpc.init/channel"
while (1) {
int save_err;
#include "lsupport.h"
extern char * mech2file(gss_OID mech);
-#define SVCGSSD_CONTEXT_CHANNEL "/proc/net/rpc/auth.ptlrpcs.context/channel"
-#define SVCGSSD_INIT_CHANNEL "/proc/net/rpc/auth.ptlrpcs.init/channel"
+#define SVCGSSD_CONTEXT_CHANNEL "/proc/net/rpc/auth.sptlrpc.context/channel"
+#define SVCGSSD_INIT_CHANNEL "/proc/net/rpc/auth.sptlrpc.init/channel"
#define TOKEN_BUF_SIZE 8192
return -1;
}
memcpy(sname, name.value, name.length);
- printerr(1, "authenticated %s from %016llx\n", sname, nid);
+ printerr(1, "%s: authenticated %s from %016llx\n",
+ lustre_svc_name[lustre_svc], sname, nid);
gss_release_buffer(&min_stat, &name);
if (lustre_svc == LUSTRE_GSS_SVC_MDS)
qword_get(&cp, (char *) &lustre_svc, sizeof(lustre_svc));
qword_get(&cp, (char *) &nid, sizeof(nid));
qword_get(&cp, (char *) &handle_seq, sizeof(handle_seq));
- printerr(1, "handling req: svc %u, nid %016llx, idx %llx\n",
+ printerr(2, "handling req: svc %u, nid %016llx, idx %llx\n",
lustre_svc, nid, handle_seq);
in_handle.length = (size_t) qword_get(&cp, in_handle.value,