*
* Modifications for Lustre
*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Author: Eric Mei <ericm@clusterfs.com>
*/
#include <linux/slab.h>
#include <linux/dcache.h>
#include <linux/fs.h>
-#include <linux/random.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
#else
int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount));
- if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+ if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
if (!ctx->cc_early_expire)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
* someone else, in which case nobody will make further use
* of it. we don't care, and mark it UPTODATE will help
* destroying server side context when it be destroied. */
- set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
if (sec_is_reverse(ctx->cc_sec)) {
CWARN("server installed reverse ctx %p idx "LPX64", "
*/
switch (phase) {
case 0:
- if (test_bit(seq_num % win_size, window))
+ if (cfs_test_bit(seq_num % win_size, window))
goto replay;
break;
case 1:
{
int rc = 0;
- spin_lock(&ssd->ssd_lock);
+ cfs_spin_lock(&ssd->ssd_lock);
if (set == 0) {
/*
gss_stat_oos_record_svc(2, 0);
}
exit:
- spin_unlock(&ssd->ssd_lock);
+ cfs_spin_unlock(&ssd->ssd_lock);
return rc;
}
flags |= LUSTRE_GSS_PACK_USER;
redo:
- seq = atomic_inc_return(&gctx->gc_seq);
+ seq = cfs_atomic_inc_return(&gctx->gc_seq);
rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
ctx->cc_sec->ps_part,
*
* Note: null mode dosen't check sequence number. */
if (svc != SPTLRPC_SVC_NULL &&
- atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
- int behind = atomic_read(&gctx->gc_seq) - seq;
+ cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+ int behind = cfs_atomic_read(&gctx->gc_seq) - seq;
gss_stat_oos_record_cli(behind);
CWARN("req %p: %u behind, retry signing\n", req, behind);
ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
redo:
- ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
/* buffer objects */
hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
LASSERT(token.len <= buflens[1]);
/* see explain in gss_cli_ctx_sign() */
- if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
+ if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
GSS_SEQ_REPACK_THRESHOLD)) {
- int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
+ int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
gss_stat_oos_record_cli(behind);
CWARN("req %p: %u behind, retry sealing\n", req, behind);
- ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
goto redo;
}
return -EOPNOTSUPP;
}
- spin_lock_init(&gsec->gs_lock);
+ cfs_spin_lock_init(&gsec->gs_lock);
gsec->gs_rvs_hdl = 0ULL;
/* initialize upper ptlrpc_sec */
sec = &gsec->gs_base;
sec->ps_policy = policy;
- atomic_set(&sec->ps_refcount, 0);
- atomic_set(&sec->ps_nctx, 0);
+ cfs_atomic_set(&sec->ps_refcount, 0);
+ cfs_atomic_set(&sec->ps_nctx, 0);
sec->ps_id = sptlrpc_get_next_secid();
sec->ps_flvr = *sf;
sec->ps_import = class_import_get(imp);
- spin_lock_init(&sec->ps_lock);
+ cfs_spin_lock_init(&sec->ps_lock);
CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
if (!svcctx) {
ENTRY;
LASSERT(sec->ps_import);
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
if (gsec->gs_mech) {
lgss_mech_put(gsec->gs_mech);
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
gctx->gc_win = 0;
- atomic_set(&gctx->gc_seq, 0);
+ cfs_atomic_set(&gctx->gc_seq, 0);
CFS_INIT_HLIST_NODE(&ctx->cc_cache);
- atomic_set(&ctx->cc_refcount, 0);
+ cfs_atomic_set(&ctx->cc_refcount, 0);
ctx->cc_sec = sec;
ctx->cc_ops = ctxops;
ctx->cc_expire = 0;
ctx->cc_flags = PTLRPC_CTX_NEW;
ctx->cc_vcred = *vcred;
- spin_lock_init(&ctx->cc_lock);
+ cfs_spin_lock_init(&ctx->cc_lock);
CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
/* take a ref on belonging sec, balanced in ctx destroying */
- atomic_inc(&sec->ps_refcount);
+ cfs_atomic_inc(&sec->ps_refcount);
/* statistic only */
- atomic_inc(&sec->ps_nctx);
+ cfs_atomic_inc(&sec->ps_nctx);
CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
sec->ps_policy->sp_name, ctx->cc_sec,
{
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- LASSERT(atomic_read(&sec->ps_nctx) > 0);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
LASSERT(ctx->cc_sec == sec);
/*
* asynchronous which finished by request_out_callback(). so
* we add refcount, whoever drop finally drop the refcount to
* 0 should responsible for the rest of destroy. */
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
gss_do_ctx_fini_rpc(gctx);
gss_cli_ctx_finalize(gctx);
- if (!atomic_dec_and_test(&ctx->cc_refcount))
+ if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
return 1;
}
req->rq_reqbuf_len = 0;
}
- req->rq_reqmsg = NULL;
-
EXIT;
}
OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
req->rq_repbuf = NULL;
req->rq_repbuf_len = 0;
-
- req->rq_repmsg = NULL;
+ req->rq_repdata = NULL;
+ req->rq_repdata_len = 0;
}
static int get_enlarged_msgsize(struct lustre_msg *msg,
static inline
void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
{
- LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
- atomic_inc(&grctx->src_base.sc_refcount);
+ LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
+ cfs_atomic_inc(&grctx->src_base.sc_refcount);
}
static inline
void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
{
- LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
- if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
+ if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount))
gss_svc_reqctx_free(grctx);
}
RETURN(SECSVC_DROP);
grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
- atomic_set(&grctx->src_base.sc_refcount, 1);
+ cfs_atomic_set(&grctx->src_base.sc_refcount, 1);
req->rq_svc_ctx = &grctx->src_base;
gw = &grctx->src_wirectx;
void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->sc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0);
gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
}
* each reverse root ctx will record its latest sequence number on its
* buddy svcctx before be destroied, so here we continue use it.
*/
- atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
+ cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
CERROR("failed to dup svc handle\n");