/* undo which did by md_intent_lock */
if (it_disposition(it, DISP_OPEN_CREATE) &&
!it_open_error(DISP_OPEN_CREATE, it)) {
- LASSERT(request);
- LASSERT(cfs_atomic_read(&request->rq_refcount) > 1);
- CDEBUG(D_INODE, "dec a ref of req %p\n", request);
- ptlrpc_req_finished(request);
+ LASSERT(request);
+ LASSERT(atomic_read(&request->rq_refcount) > 1);
+ CDEBUG(D_INODE, "dec a ref of req %p\n", request);
+ ptlrpc_req_finished(request);
}
return it_open_error(DISP_OPEN_OPEN, it);
}
struct lwp_device *m = lu2lwp_dev(lu);
ENTRY;
- if (cfs_atomic_read(&lu->ld_ref) && lu->ld_site) {
+ if (atomic_read(&lu->ld_ref) && lu->ld_site) {
LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
lu_site_print(env, lu->ld_site, &msgdata, lu_cdebug_printer);
}
ENTRY;
- if (cfs_atomic_read(&lu->ld_ref) && lu->ld_site) {
+ if (atomic_read(&lu->ld_ref) && lu->ld_site) {
LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
lu_site_print(env, lu->ld_site, &msgdata, lu_cdebug_printer);
}
/* underlying shared device */
struct dt_device *otr_dev;
/* how many users of this tracker */
- cfs_atomic_t otr_refcount;
+ atomic_t otr_refcount;
};
struct osp_precreate {
LASSERT(d);
CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req,
- cfs_atomic_read(&req->rq_refcount),
+ atomic_read(&req->rq_refcount),
rc, (unsigned) req->rq_transno);
LASSERT(rc || req->rq_transno);
mutex_lock(&osp_id_tracker_sem);
cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
if (tr->otr_dev == d->opd_storage) {
- LASSERT(cfs_atomic_read(&tr->otr_refcount));
- cfs_atomic_inc(&tr->otr_refcount);
+ LASSERT(atomic_read(&tr->otr_refcount));
+ atomic_inc(&tr->otr_refcount);
d->opd_syn_tracker = tr;
found = tr;
break;
tr->otr_dev = d->opd_storage;
tr->otr_next_id = 1;
tr->otr_committed_id = 0;
- cfs_atomic_set(&tr->otr_refcount, 1);
+ atomic_set(&tr->otr_refcount, 1);
CFS_INIT_LIST_HEAD(&tr->otr_wakeup_list);
cfs_list_add(&tr->otr_list, &osp_id_tracker_list);
tr->otr_tx_cb.dtc_txn_commit =
osp_sync_remove_from_tracker(d);
mutex_lock(&osp_id_tracker_sem);
- if (cfs_atomic_dec_and_test(&tr->otr_refcount)) {
+ if (atomic_dec_and_test(&tr->otr_refcount)) {
dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
LASSERT(cfs_list_empty(&tr->otr_wakeup_list));
cfs_list_del(&tr->otr_list);
init_waitqueue_head(&request->rq_reply_waitq);
init_waitqueue_head(&request->rq_set_waitq);
request->rq_xid = ptlrpc_next_xid();
- cfs_atomic_set(&request->rq_refcount, 1);
+ atomic_set(&request->rq_refcount, 1);
lustre_msg_set_opc(request->rq_reqmsg, opcode);
OBD_ALLOC(set, sizeof *set);
if (!set)
RETURN(NULL);
- cfs_atomic_set(&set->set_refcount, 1);
+ atomic_set(&set->set_refcount, 1);
CFS_INIT_LIST_HEAD(&set->set_requests);
init_waitqueue_head(&set->set_waitq);
- cfs_atomic_set(&set->set_new_count, 0);
- cfs_atomic_set(&set->set_remaining, 0);
+ atomic_set(&set->set_new_count, 0);
+ atomic_set(&set->set_remaining, 0);
spin_lock_init(&set->set_new_req_lock);
CFS_INIT_LIST_HEAD(&set->set_new_requests);
CFS_INIT_LIST_HEAD(&set->set_cblist);
ENTRY;
/* Requests on the set should either all be completed, or all be new */
- expected_phase = (cfs_atomic_read(&set->set_remaining) == 0) ?
+ expected_phase = (atomic_read(&set->set_remaining) == 0) ?
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
cfs_list_for_each (tmp, &set->set_requests) {
struct ptlrpc_request *req =
n++;
}
- LASSERTF(cfs_atomic_read(&set->set_remaining) == 0 ||
- cfs_atomic_read(&set->set_remaining) == n, "%d / %d\n",
- cfs_atomic_read(&set->set_remaining), n);
+ LASSERTF(atomic_read(&set->set_remaining) == 0 ||
+ atomic_read(&set->set_remaining) == n, "%d / %d\n",
+ atomic_read(&set->set_remaining), n);
cfs_list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
rq_set_chain);
cfs_list_del_init(&req->rq_set_chain);
- LASSERT(req->rq_phase == expected_phase);
+ LASSERT(req->rq_phase == expected_phase);
- if (req->rq_phase == RQ_PHASE_NEW) {
- ptlrpc_req_interpret(NULL, req, -EBADR);
- cfs_atomic_dec(&set->set_remaining);
- }
+ if (req->rq_phase == RQ_PHASE_NEW) {
+ ptlrpc_req_interpret(NULL, req, -EBADR);
+ atomic_dec(&set->set_remaining);
+ }
spin_lock(&req->rq_lock);
req->rq_set = NULL;
ptlrpc_req_finished (req);
}
- LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
+ LASSERT(atomic_read(&set->set_remaining) == 0);
- ptlrpc_reqset_put(set);
- EXIT;
+ ptlrpc_reqset_put(set);
+ EXIT;
}
EXPORT_SYMBOL(ptlrpc_set_destroy);
/* The set takes over the caller's request reference */
cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
- cfs_atomic_inc(&set->set_remaining);
+ atomic_inc(&set->set_remaining);
req->rq_queued_time = cfs_time_current();
if (req->rq_reqmsg != NULL)
req->rq_set = set;
req->rq_queued_time = cfs_time_current();
cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
- count = cfs_atomic_inc_return(&set->set_new_count);
+ count = atomic_inc_return(&set->set_new_count);
spin_unlock(&set->set_new_req_lock);
/* Only need to call wakeup once for the first entry. */
/* probably doesn't need to be a D_ERROR after initial testing */
DEBUG_REQ(D_ERROR, req, "send limit expired ");
*status = -EIO;
- } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
- imp->imp_state == LUSTRE_IMP_CONNECTING) {
- /* allow CONNECT even if import is invalid */ ;
- if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
- DEBUG_REQ(D_ERROR, req, "invalidate in flight");
- *status = -EIO;
- }
+ } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
+ imp->imp_state == LUSTRE_IMP_CONNECTING) {
+ /* allow CONNECT even if import is invalid */ ;
+ if (atomic_read(&imp->imp_inval_count) != 0) {
+ DEBUG_REQ(D_ERROR, req, "invalidate in flight");
+ *status = -EIO;
+ }
} else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
if (!imp->imp_deactive)
DEBUG_REQ(D_NET, req, "IMP_INVALID");
*status = -EIO;
} else if (req->rq_send_state != imp->imp_state) {
/* invalidate in progress - any requests should be drop */
- if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
+ if (atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
} else if (imp->imp_dlm_fake || req->rq_no_delay) {
ptlrpc_import_state_name(imp->imp_state));
LASSERT(cfs_list_empty(&req->rq_list));
cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
+ atomic_inc(&req->rq_import->imp_inflight);
spin_unlock(&imp->imp_lock);
RETURN(0);
}
LASSERT(cfs_list_empty(&req->rq_list));
cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
+ atomic_inc(&req->rq_import->imp_inflight);
spin_unlock(&imp->imp_lock);
lustre_msg_set_status(req->rq_reqmsg, current_pid());
LASSERT(set->set_producer != NULL);
- remaining = cfs_atomic_read(&set->set_remaining);
+ remaining = atomic_read(&set->set_remaining);
/* populate the ->set_requests list with requests until we
* reach the maximum number of RPCs in flight for this set */
- while (cfs_atomic_read(&set->set_remaining) < set->set_max_inflight) {
+ while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
rc = set->set_producer(set, set->set_producer_arg);
if (rc == -ENOENT) {
/* no more RPC to produce */
}
}
- RETURN((cfs_atomic_read(&set->set_remaining) - remaining));
+ RETURN((atomic_read(&set->set_remaining) - remaining));
}
/**
int force_timer_recalc = 0;
ENTRY;
- if (cfs_atomic_read(&set->set_remaining) == 0)
+ if (atomic_read(&set->set_remaining) == 0)
RETURN(1);
cfs_list_for_each_safe(tmp, next, &set->set_requests) {
* allow sending this rpc and returns *status != 0. */
if (!cfs_list_empty(&req->rq_list)) {
cfs_list_del_init(&req->rq_list);
- cfs_atomic_dec(&imp->imp_inflight);
+ atomic_dec(&imp->imp_inflight);
}
spin_unlock(&imp->imp_lock);
- cfs_atomic_dec(&set->set_remaining);
+ atomic_dec(&set->set_remaining);
wake_up_all(&imp->imp_recovery_waitq);
if (set->set_producer) {
set->set_rc = req->rq_status;
ptlrpc_req_finished(req);
}
- }
+ }
- /* If we hit an error, we want to recover promptly. */
- RETURN(cfs_atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
+ /* If we hit an error, we want to recover promptly. */
+ RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
}
EXPORT_SYMBOL(ptlrpc_check_set);
RETURN(1);
}
- cfs_atomic_inc(&imp->imp_timeouts);
+ atomic_inc(&imp->imp_timeouts);
/* The DLM server doesn't want recovery run on its imports. */
if (imp->imp_dlm_fake)
* EINTR.
* I don't really care if we go once more round the loop in
* the error cases -eeb. */
- if (rc == 0 && cfs_atomic_read(&set->set_remaining) == 0) {
+ if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
cfs_list_for_each(tmp, &set->set_requests) {
req = cfs_list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
spin_lock(&req->rq_lock);
req->rq_invalid_rqset = 1;
spin_unlock(&req->rq_lock);
- }
- }
- } while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0);
+ }
+ }
+ } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
- LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
+ LASSERT(atomic_read(&set->set_remaining) == 0);
rc = set->set_rc; /* rq_status of already freed requests if any */
cfs_list_for_each(tmp, &set->set_requests) {
}
LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
- if (cfs_atomic_read(&request->rq_refcount) != 0) {
- DEBUG_REQ(D_ERROR, request,
- "freeing request with nonzero refcount");
- LBUG();
- }
+ if (atomic_read(&request->rq_refcount) != 0) {
+ DEBUG_REQ(D_ERROR, request,
+ "freeing request with nonzero refcount");
+ LBUG();
+ }
if (request->rq_repbuf != NULL)
sptlrpc_cli_free_repbuf(request);
}
DEBUG_REQ(D_INFO, request, "refcount now %u",
- cfs_atomic_read(&request->rq_refcount) - 1);
+ atomic_read(&request->rq_refcount) - 1);
- if (cfs_atomic_dec_and_test(&request->rq_refcount)) {
+ if (atomic_dec_and_test(&request->rq_refcount)) {
__ptlrpc_free_req(request, locked);
RETURN(1);
}
*/
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
{
- ENTRY;
- cfs_atomic_inc(&req->rq_refcount);
- RETURN(req);
+ ENTRY;
+ atomic_inc(&req->rq_refcount);
+ RETURN(req);
}
EXPORT_SYMBOL(ptlrpc_request_addref);
* In case of error restart replay process.
*/
static int ptlrpc_replay_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void * data, int rc)
+ struct ptlrpc_request *req,
+ void * data, int rc)
{
- struct ptlrpc_replay_async_args *aa = data;
- struct obd_import *imp = req->rq_import;
+ struct ptlrpc_replay_async_args *aa = data;
+ struct obd_import *imp = req->rq_import;
- ENTRY;
- cfs_atomic_dec(&imp->imp_replay_inflight);
+ ENTRY;
+ atomic_dec(&imp->imp_replay_inflight);
if (!ptlrpc_client_replied(req)) {
CERROR("request replay timed out, restarting recovery\n");
ptlrpc_at_get_net_latency(req));
DEBUG_REQ(D_HA, req, "REPLAY");
- cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
- ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
+ atomic_inc(&req->rq_import->imp_replay_inflight);
+ ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
- ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- RETURN(0);
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ RETURN(0);
}
EXPORT_SYMBOL(ptlrpc_replay_req);
conn->c_peer = peer;
conn->c_self = self;
CFS_INIT_HLIST_NODE(&conn->c_hash);
- cfs_atomic_set(&conn->c_refcount, 1);
+ atomic_set(&conn->c_refcount, 1);
if (uuid)
obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);
- /*
- * Add the newly created conn to the hash, on key collision we
- * lost a racing addition and must destroy our newly allocated
- * connection. The object which exists in the has will be
- * returned and may be compared against out object.
- */
+ /*
+ * Add the newly created conn to the hash, on key collision we
+ * lost a racing addition and must destroy our newly allocated
+ * connection. The object which exists in the has will be
+ * returned and may be compared against out object.
+ */
/* In the function below, .hs_keycmp resolves to
* conn_keycmp() */
/* coverity[overrun-buffer-val] */
- conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
- if (conn != conn2) {
- OBD_FREE_PTR(conn);
- conn = conn2;
- }
- EXIT;
+ conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
+ if (conn != conn2) {
+ OBD_FREE_PTR(conn);
+ conn = conn2;
+ }
+ EXIT;
out:
- CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
- conn, cfs_atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
- return conn;
+ CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
+ conn, atomic_read(&conn->c_refcount),
+ libcfs_nid2str(conn->c_peer.nid));
+ return conn;
}
EXPORT_SYMBOL(ptlrpc_connection_get);
int ptlrpc_connection_put(struct ptlrpc_connection *conn)
{
- int rc = 0;
- ENTRY;
-
- if (!conn)
- RETURN(rc);
-
- LASSERT(cfs_atomic_read(&conn->c_refcount) > 1);
-
- /*
- * We do not remove connection from hashtable and
- * do not free it even if last caller released ref,
- * as we want to have it cached for the case it is
- * needed again.
- *
- * Deallocating it and later creating new connection
- * again would be wastful. This way we also avoid
- * expensive locking to protect things from get/put
- * race when found cached connection is freed by
- * ptlrpc_connection_put().
- *
- * It will be freed later in module unload time,
- * when ptlrpc_connection_fini()->lh_exit->conn_exit()
- * path is called.
- */
- if (cfs_atomic_dec_return(&conn->c_refcount) == 1)
- rc = 1;
-
- CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n",
- conn, cfs_atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
-
- RETURN(rc);
+ int rc = 0;
+ ENTRY;
+
+ if (!conn)
+ RETURN(rc);
+
+ LASSERT(atomic_read(&conn->c_refcount) > 1);
+
+ /*
+ * We do not remove connection from hashtable and
+ * do not free it even if last caller released ref,
+ * as we want to have it cached for the case it is
+ * needed again.
+ *
+ * Deallocating it and later creating new connection
+ * again would be wastful. This way we also avoid
+ * expensive locking to protect things from get/put
+ * race when found cached connection is freed by
+ * ptlrpc_connection_put().
+ *
+ * It will be freed later in module unload time,
+ * when ptlrpc_connection_fini()->lh_exit->conn_exit()
+ * path is called.
+ */
+ if (atomic_dec_return(&conn->c_refcount) == 1)
+ rc = 1;
+
+ CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n",
+ conn, atomic_read(&conn->c_refcount),
+ libcfs_nid2str(conn->c_peer.nid));
+
+ RETURN(rc);
}
EXPORT_SYMBOL(ptlrpc_connection_put);
struct ptlrpc_connection *
ptlrpc_connection_addref(struct ptlrpc_connection *conn)
{
- ENTRY;
+ ENTRY;
- cfs_atomic_inc(&conn->c_refcount);
- CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
- conn, cfs_atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
+ atomic_inc(&conn->c_refcount);
+ CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
+ conn, atomic_read(&conn->c_refcount),
+ libcfs_nid2str(conn->c_peer.nid));
- RETURN(conn);
+ RETURN(conn);
}
EXPORT_SYMBOL(ptlrpc_connection_addref);
struct ptlrpc_connection *conn;
conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- cfs_atomic_inc(&conn->c_refcount);
+ atomic_inc(&conn->c_refcount);
}
static void
struct ptlrpc_connection *conn;
conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- cfs_atomic_dec(&conn->c_refcount);
+ atomic_dec(&conn->c_refcount);
}
static void
conn_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
- struct ptlrpc_connection *conn;
-
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- /*
- * Nothing should be left. Connection user put it and
- * connection also was deleted from table by this time
- * so we should have 0 refs.
- */
- LASSERTF(cfs_atomic_read(&conn->c_refcount) == 0,
- "Busy connection with %d refs\n",
- cfs_atomic_read(&conn->c_refcount));
- OBD_FREE_PTR(conn);
+ struct ptlrpc_connection *conn;
+
+ conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ /*
+ * Nothing should be left. Connection user put it and
+ * connection also was deleted from table by this time
+ * so we should have 0 refs.
+ */
+ LASSERTF(atomic_read(&conn->c_refcount) == 0,
+ "Busy connection with %d refs\n",
+ atomic_read(&conn->c_refcount));
+ OBD_FREE_PTR(conn);
}
static cfs_hash_ops_t conn_hash_ops = {
spin_lock_init(&req->rq_lock);
CFS_INIT_LIST_HEAD(&req->rq_timed_list);
CFS_INIT_LIST_HEAD(&req->rq_exp_list);
- cfs_atomic_set(&req->rq_refcount, 1);
- if (ev->type == LNET_EVENT_PUT)
- CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n",
- req, req->rq_xid, ev->mlength);
+ atomic_set(&req->rq_refcount, 1);
+ if (ev->type == LNET_EVENT_PUT)
+ CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n",
+ req, req->rq_xid, ev->mlength);
- CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
+ CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
spin_lock(&svcpt->scp_lock);
struct module *gm_owner;
char *gm_name;
rawobj_t gm_oid;
- cfs_atomic_t gm_count;
+ atomic_t gm_count;
struct gss_api_ops *gm_ops;
int gm_sf_num;
struct subflavor_desc *gm_sfs;
int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
{
- struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
- struct obd_import *imp = ctx->cc_sec->ps_import;
- struct ptlrpc_request *req;
- struct ptlrpc_user_desc *pud;
- int rc;
- ENTRY;
-
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
+ struct obd_import *imp = ctx->cc_sec->ps_import;
+ struct ptlrpc_request *req;
+ struct ptlrpc_user_desc *pud;
+ int rc;
+ ENTRY;
+
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
};
struct gss_cli_ctx {
- struct ptlrpc_cli_ctx gc_base;
- __u32 gc_flavor;
- __u32 gc_proc;
- __u32 gc_win;
- cfs_atomic_t gc_seq;
- rawobj_t gc_handle;
- struct gss_ctx *gc_mechctx;
- /* handle for the buddy svc ctx */
- rawobj_t gc_svc_handle;
+ struct ptlrpc_cli_ctx gc_base;
+ __u32 gc_flavor;
+ __u32 gc_proc;
+ __u32 gc_win;
+ atomic_t gc_seq;
+ rawobj_t gc_handle;
+ struct gss_ctx *gc_mechctx;
+ /* handle for the buddy svc ctx */
+ rawobj_t gc_svc_handle;
};
struct gss_cli_ctx_keyring {
return NULL;
}
- ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
+ ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
- cfs_atomic_inc(&ctx->cc_refcount); /* for the caller */
+ atomic_inc(&ctx->cc_refcount); /* for the caller */
- return ctx;
+ return ctx;
}
static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
{
- struct ptlrpc_sec *sec = ctx->cc_sec;
- struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
+ struct ptlrpc_sec *sec = ctx->cc_sec;
+ struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
- CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
+ CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
/* at this time the association with key has been broken. */
LASSERT(sec);
- LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
- LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(atomic_read(&sec->ps_nctx) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
LASSERT(gctx_kr->gck_key == NULL);
- ctx_clear_timer_kr(ctx);
- LASSERT(gctx_kr->gck_timer == NULL);
+ ctx_clear_timer_kr(ctx);
+ LASSERT(gctx_kr->gck_timer == NULL);
- if (gss_cli_ctx_fini_common(sec, ctx))
- return;
+ if (gss_cli_ctx_fini_common(sec, ctx))
+ return;
- OBD_FREE_PTR(gctx_kr);
+ OBD_FREE_PTR(gctx_kr);
- cfs_atomic_dec(&sec->ps_nctx);
- sptlrpc_sec_put(sec);
+ atomic_dec(&sec->ps_nctx);
+ sptlrpc_sec_put(sec);
}
static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync)
{
- if (sync) {
- ctx_destroy_kr(ctx);
- } else {
- cfs_atomic_inc(&ctx->cc_refcount);
- sptlrpc_gc_add_ctx(ctx);
- }
+ if (sync) {
+ ctx_destroy_kr(ctx);
+ } else {
+ atomic_inc(&ctx->cc_refcount);
+ sptlrpc_gc_add_ctx(ctx);
+ }
}
static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
{
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
- ctx_release_kr(ctx, sync);
+ if (atomic_dec_and_test(&ctx->cc_refcount))
+ ctx_release_kr(ctx, sync);
}
/*
static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
{
- struct ptlrpc_sec *sec = ctx->cc_sec;
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_sec *sec = ctx->cc_sec;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
spin_lock_if(&sec->ps_lock, !locked);
- cfs_atomic_inc(&ctx->cc_refcount);
+ atomic_inc(&ctx->cc_refcount);
set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
if (is_root)
*/
static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
{
- struct ptlrpc_sec *sec = ctx->cc_sec;
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_sec *sec = ctx->cc_sec;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- /* if hashed bit has gone, leave the job to somebody who is doing it */
+ /* if hashed bit has gone, leave the job to somebody who is doing it */
if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
- return 0;
+ return 0;
- /* drop ref inside spin lock to prevent race with other operations */
- spin_lock_if(&sec->ps_lock, !locked);
+ /* drop ref inside spin lock to prevent race with other operations */
+ spin_lock_if(&sec->ps_lock, !locked);
- if (gsec_kr->gsk_root_ctx == ctx)
- gsec_kr->gsk_root_ctx = NULL;
- cfs_hlist_del_init(&ctx->cc_cache);
- cfs_atomic_dec(&ctx->cc_refcount);
+ if (gsec_kr->gsk_root_ctx == ctx)
+ gsec_kr->gsk_root_ctx = NULL;
+ cfs_hlist_del_init(&ctx->cc_cache);
+ atomic_dec(&ctx->cc_refcount);
- spin_unlock_if(&sec->ps_lock, !locked);
+ spin_unlock_if(&sec->ps_lock, !locked);
- return 1;
+ return 1;
}
/*
*/
static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(atomic_read(&key->usage) > 0);
- LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
- LASSERT(key->payload.data == NULL);
-
- /* at this time context may or may not in list. */
- key_get(key);
- cfs_atomic_inc(&ctx->cc_refcount);
- ctx2gctx_keyring(ctx)->gck_key = key;
- key->payload.data = ctx;
+ LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
+ LASSERT(key->payload.data == NULL);
+
+ /* at this time context may or may not in list. */
+ key_get(key);
+ atomic_inc(&ctx->cc_refcount);
+ ctx2gctx_keyring(ctx)->gck_key = key;
+ key->payload.data = ctx;
}
/*
cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
cfs_hlist_del_init(&ctx->cc_cache);
- /* reverse ctx: update current seq to buddy svcctx if exist.
- * ideally this should be done at gss_cli_ctx_finalize(), but
- * the ctx destroy could be delayed by:
- * 1) ctx still has reference;
- * 2) ctx destroy is asynchronous;
- * and reverse import call inval_all_ctx() require this be done
- *_immediately_ otherwise newly created reverse ctx might copy
- * the very old sequence number from svcctx. */
- gctx = ctx2gctx(ctx);
- if (!rawobj_empty(&gctx->gc_svc_handle) &&
- sec_is_reverse(gctx->gc_base.cc_sec)) {
- gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
- (__u32) cfs_atomic_read(&gctx->gc_seq));
- }
-
- /* we need to wakeup waiting reqs here. the context might
- * be forced released before upcall finished, then the
- * late-arrived downcall can't find the ctx even. */
- sptlrpc_cli_ctx_wakeup(ctx);
-
- unbind_ctx_kr(ctx);
- ctx_put_kr(ctx, 0);
- }
+ /* reverse ctx: update current seq to buddy svcctx if exist.
+ * ideally this should be done at gss_cli_ctx_finalize(), but
+ * the ctx destroy could be delayed by:
+ * 1) ctx still has reference;
+ * 2) ctx destroy is asynchronous;
+ * and reverse import call inval_all_ctx() require this be done
+ * _immediately_ otherwise newly created reverse ctx might copy
+ * the very old sequence number from svcctx. */
+ gctx = ctx2gctx(ctx);
+ if (!rawobj_empty(&gctx->gc_svc_handle) &&
+ sec_is_reverse(gctx->gc_base.cc_sec)) {
+ gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
+ (__u32) atomic_read(&gctx->gc_seq));
+ }
+
+ /* we need to wakeup waiting reqs here. the context might
+ * be forced released before upcall finished, then the
+ * late-arrived downcall can't find the ctx even. */
+ sptlrpc_cli_ctx_wakeup(ctx);
+
+ unbind_ctx_kr(ctx);
+ ctx_put_kr(ctx, 0);
+ }
}
/*
}
}
- if (ctx) {
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist));
- cfs_atomic_inc(&ctx->cc_refcount);
- }
+ if (ctx) {
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist));
+ atomic_inc(&ctx->cc_refcount);
+ }
spin_unlock(&sec->ps_lock);
* need wirtelock of key->sem to serialize them. */
down_write(&key->sem);
- if (likely(key->payload.data != NULL)) {
- ctx = key->payload.data;
-
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 1);
- LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
- LASSERT(atomic_read(&key->usage) >= 2);
-
- /* simply take a ref and return. it's upper layer's
- * responsibility to detect & replace dead ctx. */
- cfs_atomic_inc(&ctx->cc_refcount);
- } else {
- /* pre initialization with a cli_ctx. this can't be done in
- * key_instantiate() because we'v no enough information
- * there. */
- ctx = ctx_create_kr(sec, vcred);
- if (ctx != NULL) {
- ctx_enlist_kr(ctx, is_root, 0);
- bind_key_ctx(key, ctx);
-
- ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
-
- CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
- key, ctx, sec);
- } else {
- /* we'd prefer to call key_revoke(), but we more like
- * to revoke it within this key->sem locked period. */
- key_revoke_locked(key);
- }
-
- create_new = 1;
- }
+ if (likely(key->payload.data != NULL)) {
+ ctx = key->payload.data;
+
+ LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
+ LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
+ LASSERT(atomic_read(&key->usage) >= 2);
+
+ /* simply take a ref and return. it's upper layer's
+ * responsibility to detect & replace dead ctx. */
+ atomic_inc(&ctx->cc_refcount);
+ } else {
+ /* pre initialization with a cli_ctx. this can't be done in
+ * key_instantiate() because we'v no enough information
+ * there. */
+ ctx = ctx_create_kr(sec, vcred);
+ if (ctx != NULL) {
+ ctx_enlist_kr(ctx, is_root, 0);
+ bind_key_ctx(key, ctx);
+
+ ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
+
+ CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
+ key, ctx, sec);
+ } else {
+ /* we'd prefer to call key_revoke(), but we more like
+ * to revoke it within this key->sem locked period. */
+ key_revoke_locked(key);
+ }
+
+ create_new = 1;
+ }
up_write(&key->sem);
struct ptlrpc_cli_ctx *ctx,
int sync)
{
- LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
ctx_release_kr(ctx, sync);
}
gsec_kr = sec2gsec_keyring(sec);
spin_lock(&sec->ps_lock);
- cfs_hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_kr->gsk_clist, cc_cache) {
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
-
- if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
- continue;
-
- /* at this moment there's at least 2 base reference:
- * key association and in-list. */
- if (cfs_atomic_read(&ctx->cc_refcount) > 2) {
- if (!force)
- continue;
- CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
- ctx, ctx->cc_vcred.vc_uid,
- sec2target_str(ctx->cc_sec),
- cfs_atomic_read(&ctx->cc_refcount) - 2);
- }
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_kr->gsk_clist, cc_cache) {
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
+ continue;
+
+ /* at this moment there's at least 2 base reference:
+ * key association and in-list. */
+ if (atomic_read(&ctx->cc_refcount) > 2) {
+ if (!force)
+ continue;
+ CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
+ ctx, ctx->cc_vcred.vc_uid,
+ sec2target_str(ctx->cc_sec),
+ atomic_read(&ctx->cc_refcount) - 2);
+ }
set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
if (!grace)
clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
- cfs_atomic_inc(&ctx->cc_refcount);
+ atomic_inc(&ctx->cc_refcount);
- if (ctx_unlist_kr(ctx, 1)) {
- cfs_hlist_add_head(&ctx->cc_cache, &freelist);
- } else {
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2);
- cfs_atomic_dec(&ctx->cc_refcount);
- }
- }
+ if (ctx_unlist_kr(ctx, 1)) {
+ cfs_hlist_add_head(&ctx->cc_cache, &freelist);
+ } else {
+ LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
+ atomic_dec(&ctx->cc_refcount);
+ }
+ }
spin_unlock(&sec->ps_lock);
dispose_ctx_list_kr(&freelist);
int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
uid_t uid, int grace, int force)
{
- ENTRY;
+ ENTRY;
- CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
- sec, cfs_atomic_read(&sec->ps_refcount),
- cfs_atomic_read(&sec->ps_nctx),
- uid, grace, force);
+ CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
+ sec, atomic_read(&sec->ps_refcount),
+ atomic_read(&sec->ps_nctx),
+ uid, grace, force);
- if (uid != -1 && uid != 0)
- flush_user_ctx_cache_kr(sec, uid, grace, force);
- else
- flush_spec_ctx_cache_kr(sec, uid, grace, force);
+ if (uid != -1 && uid != 0)
+ flush_user_ctx_cache_kr(sec, uid, grace, force);
+ else
+ flush_spec_ctx_cache_kr(sec, uid, grace, force);
- RETURN(0);
+ RETURN(0);
}
static
void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
{
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT;
- cfs_hlist_node_t *pos, *next;
- struct ptlrpc_cli_ctx *ctx;
- ENTRY;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT;
+ cfs_hlist_node_t *pos, *next;
+ struct ptlrpc_cli_ctx *ctx;
+ ENTRY;
- CWARN("running gc\n");
+ CWARN("running gc\n");
spin_lock(&sec->ps_lock);
- cfs_hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_kr->gsk_clist, cc_cache) {
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
-
- cfs_atomic_inc(&ctx->cc_refcount);
-
- if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
- cfs_hlist_add_head(&ctx->cc_cache, &freelist);
- CWARN("unhashed ctx %p\n", ctx);
- } else {
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2);
- cfs_atomic_dec(&ctx->cc_refcount);
- }
- }
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_kr->gsk_clist, cc_cache) {
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ atomic_inc(&ctx->cc_refcount);
+
+ if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
+ cfs_hlist_add_head(&ctx->cc_cache, &freelist);
+ CWARN("unhashed ctx %p\n", ctx);
+ } else {
+ LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
+ atomic_dec(&ctx->cc_refcount);
+ }
+ }
spin_unlock(&sec->ps_lock);
dispose_ctx_list_kr(&freelist);
snprintf(mech, sizeof(mech), "N/A");
mech[sizeof(mech) - 1] = '\0';
- seq_printf(seq, "%p: uid %u, ref %d, expire %ld(%+ld), fl %s, "
- "seq %d, win %u, key %08x(ref %d), "
- "hdl "LPX64":"LPX64", mech: %s\n",
- ctx, ctx->cc_vcred.vc_uid,
- cfs_atomic_read(&ctx->cc_refcount),
- ctx->cc_expire,
- ctx->cc_expire ? ctx->cc_expire - now : 0,
- flags_str,
- cfs_atomic_read(&gctx->gc_seq),
- gctx->gc_win,
- key ? key->serial : 0,
- key ? atomic_read(&key->usage) : 0,
- gss_handle_to_u64(&gctx->gc_handle),
- gss_handle_to_u64(&gctx->gc_svc_handle),
- mech);
- }
+ seq_printf(seq, "%p: uid %u, ref %d, expire %ld(%+ld), fl %s, "
+ "seq %d, win %u, key %08x(ref %d), "
+ "hdl "LPX64":"LPX64", mech: %s\n",
+ ctx, ctx->cc_vcred.vc_uid,
+ atomic_read(&ctx->cc_refcount),
+ ctx->cc_expire,
+ ctx->cc_expire ? ctx->cc_expire - now : 0,
+ flags_str,
+ atomic_read(&gctx->gc_seq),
+ gctx->gc_win,
+ key ? key->serial : 0,
+ key ? atomic_read(&key->usage) : 0,
+ gss_handle_to_u64(&gctx->gc_handle),
+ gss_handle_to_u64(&gctx->gc_svc_handle),
+ mech);
+ }
spin_unlock(&sec->ps_lock);
RETURN(0);
static
int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(ctx->cc_sec);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(ctx->cc_sec);
- if (cli_ctx_check_death(ctx)) {
- kill_ctx_kr(ctx);
- return 1;
- }
+ if (cli_ctx_check_death(ctx)) {
+ kill_ctx_kr(ctx);
+ return 1;
+ }
- if (cli_ctx_is_ready(ctx))
- return 0;
- return 1;
+ if (cli_ctx_is_ready(ctx))
+ return 0;
+ return 1;
}
static
void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
{
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(ctx->cc_sec);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(ctx->cc_sec);
- cli_ctx_expire(ctx);
- kill_ctx_kr(ctx);
+ cli_ctx_expire(ctx);
+ kill_ctx_kr(ctx);
}
/****************************************
RETURN(rc);
}
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(ctx->cc_sec);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(ctx->cc_sec);
- ctx_clear_timer_kr(ctx);
+ ctx_clear_timer_kr(ctx);
/* don't proceed if already refreshed */
if (cli_ctx_is_refreshed(ctx)) {
static
void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
{
- struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- if (gss_cli_ctx_fini_common(sec, ctx))
- return;
+ if (gss_cli_ctx_fini_common(sec, ctx))
+ return;
- OBD_FREE_PTR(gctx);
+ OBD_FREE_PTR(gctx);
- cfs_atomic_dec(&sec->ps_nctx);
- sptlrpc_sec_put(sec);
+ atomic_dec(&sec->ps_nctx);
+ sptlrpc_sec_put(sec);
}
static
void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
{
set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- cfs_atomic_inc(&ctx->cc_refcount);
- cfs_hlist_add_head(&ctx->cc_cache, hash);
+ atomic_inc(&ctx->cc_refcount);
+ cfs_hlist_add_head(&ctx->cc_cache, hash);
}
/*
void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
{
LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock));
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
+ if (atomic_dec_and_test(&ctx->cc_refcount)) {
__cfs_hlist_del(&ctx->cc_cache);
cfs_hlist_add_head(&ctx->cc_cache, freelist);
} else {
int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
cfs_hlist_head_t *freelist)
{
- LASSERT(ctx->cc_sec);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(ctx->cc_sec);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- return ctx_check_death_pf(ctx, freelist);
+ return ctx_check_death_pf(ctx, freelist);
}
static inline
static
void ctx_list_destroy_pf(cfs_hlist_head_t *head)
{
- struct ptlrpc_cli_ctx *ctx;
+ struct ptlrpc_cli_ctx *ctx;
- while (!cfs_hlist_empty(head)) {
- ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx,
- cc_cache);
+ while (!cfs_hlist_empty(head)) {
+ ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx,
+ cc_cache);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
- &ctx->cc_flags) == 0);
+ &ctx->cc_flags) == 0);
- cfs_hlist_del_init(&ctx->cc_cache);
- ctx_destroy_pf(ctx->cc_sec, ctx);
- }
+ cfs_hlist_del_init(&ctx->cc_cache);
+ ctx_destroy_pf(ctx->cc_sec, ctx);
+ }
}
/****************************************
void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
{
LASSERT(ctx->cc_sec);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
cli_ctx_expire(ctx);
if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 1);
cfs_hlist_del_init(&ctx->cc_cache);
- if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
+ if (atomic_dec_and_test(&ctx->cc_refcount))
LBUG();
}
/* hold a ref */
if (ctx)
- cfs_atomic_inc(&ctx->cc_refcount);
+ atomic_inc(&ctx->cc_refcount);
spin_unlock(&sec->ps_lock);
- /* the allocator of the context must give the first push to refresh */
- if (new) {
- LASSERT(new == ctx);
- gss_cli_ctx_refresh_pf(new);
- }
+ /* the allocator of the context must give the first push to refresh */
+ if (new) {
+ LASSERT(new == ctx);
+ gss_cli_ctx_refresh_pf(new);
+ }
- ctx_list_destroy_pf(&freelist);
- RETURN(ctx);
+ ctx_list_destroy_pf(&freelist);
+ RETURN(ctx);
}
static
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
spin_lock(&sec->ps_lock);
- for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
- cfs_hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i],
- cc_cache) {
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
-
- if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
- continue;
-
- if (cfs_atomic_read(&ctx->cc_refcount) > 1) {
- busy++;
- if (!force)
- continue;
-
- CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
- "grace %d\n",
- cfs_atomic_read(&ctx->cc_refcount),
- ctx, ctx->cc_vcred.vc_uid,
- sec2target_str(ctx->cc_sec), grace);
- }
- ctx_unhash_pf(ctx, &freelist);
+ for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_pf->gsp_chash[i],
+ cc_cache) {
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
+ continue;
+
+ if (atomic_read(&ctx->cc_refcount) > 1) {
+ busy++;
+ if (!force)
+ continue;
+
+ CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
+ "grace %d\n",
+ atomic_read(&ctx->cc_refcount),
+ ctx, ctx->cc_vcred.vc_uid,
+ sec2target_str(ctx->cc_sec), grace);
+ }
+ ctx_unhash_pf(ctx, &freelist);
set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
if (!grace)
};
struct gss_upcall_msg {
- struct rpc_pipe_msg gum_base;
- cfs_atomic_t gum_refcount;
- cfs_list_t gum_list;
- __u32 gum_mechidx;
- struct gss_sec *gum_gsec;
- struct gss_cli_ctx *gum_gctx;
- struct gss_upcall_msg_data gum_data;
+ struct rpc_pipe_msg gum_base;
+ atomic_t gum_refcount;
+ cfs_list_t gum_list;
+ __u32 gum_mechidx;
+ struct gss_sec *gum_gsec;
+ struct gss_cli_ctx *gum_gctx;
+ struct gss_upcall_msg_data gum_data;
};
-static cfs_atomic_t upcall_seq = CFS_ATOMIC_INIT(0);
+static atomic_t upcall_seq = ATOMIC_INIT(0);
static inline
__u32 upcall_get_sequence(void)
{
- return (__u32) cfs_atomic_inc_return(&upcall_seq);
+ return (__u32) atomic_inc_return(&upcall_seq);
}
enum mech_idx_t {
static
void gss_release_msg(struct gss_upcall_msg *gmsg)
{
- ENTRY;
- LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
+ ENTRY;
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
- if (!cfs_atomic_dec_and_test(&gmsg->gum_refcount)) {
- EXIT;
- return;
- }
+ if (!atomic_dec_and_test(&gmsg->gum_refcount)) {
+ EXIT;
+ return;
+ }
if (gmsg->gum_gctx) {
sptlrpc_cli_ctx_wakeup(&gmsg->gum_gctx->gc_base);
return;
cfs_list_del_init(&gmsg->gum_list);
- LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
- cfs_atomic_dec(&gmsg->gum_refcount);
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
+ atomic_dec(&gmsg->gum_refcount);
}
static
static
void gss_msg_fail_ctx(struct gss_upcall_msg *gmsg)
{
- if (gmsg->gum_gctx) {
- struct ptlrpc_cli_ctx *ctx = &gmsg->gum_gctx->gc_base;
+ if (gmsg->gum_gctx) {
+ struct ptlrpc_cli_ctx *ctx = &gmsg->gum_gctx->gc_base;
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- sptlrpc_cli_ctx_expire(ctx);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ sptlrpc_cli_ctx_expire(ctx);
set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
- }
+ }
}
static
struct gss_upcall_msg * gss_find_upcall(__u32 mechidx, __u32 seq)
{
- struct gss_upcall_msg *gmsg;
+ struct gss_upcall_msg *gmsg;
- upcall_list_lock(mechidx);
- cfs_list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
- if (gmsg->gum_data.gum_seq != seq)
- continue;
+ upcall_list_lock(mechidx);
+ cfs_list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
+ if (gmsg->gum_data.gum_seq != seq)
+ continue;
- LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
- LASSERT(gmsg->gum_mechidx == mechidx);
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+ LASSERT(gmsg->gum_mechidx == mechidx);
- cfs_atomic_inc(&gmsg->gum_refcount);
- upcall_list_unlock(mechidx);
- return gmsg;
- }
- upcall_list_unlock(mechidx);
- return NULL;
+ atomic_inc(&gmsg->gum_refcount);
+ upcall_list_unlock(mechidx);
+ return gmsg;
+ }
+ upcall_list_unlock(mechidx);
+ return NULL;
}
static
GOTO(out_free, rc = -EINVAL);
}
- gss_unhash_msg(gss_msg);
- gctx = gss_msg->gum_gctx;
- LASSERT(gctx);
- LASSERT(cfs_atomic_read(&gctx->gc_base.cc_refcount) > 0);
+ gss_unhash_msg(gss_msg);
+ gctx = gss_msg->gum_gctx;
+ LASSERT(gctx);
+ LASSERT(atomic_read(&gctx->gc_base.cc_refcount) > 0);
/* timeout is not in use for now */
if (simple_get_bytes(&data, &datalen, &timeout, sizeof(timeout)))
return;
}
- gmsg = container_of(msg, struct gss_upcall_msg, gum_base);
- gumd = &gmsg->gum_data;
- LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
+ gmsg = container_of(msg, struct gss_upcall_msg, gum_base);
+ gumd = &gmsg->gum_data;
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
- CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
- "errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
- gumd->gum_nid, (int) sizeof(gumd->gum_obd),
- gumd->gum_obd, msg->errno);
+ CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
+ "errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
+ gumd->gum_nid, (int) sizeof(gumd->gum_obd),
+ gumd->gum_obd, msg->errno);
- cfs_atomic_inc(&gmsg->gum_refcount);
- gss_unhash_msg(gmsg);
- if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
- cfs_time_t now = cfs_time_current_sec();
+ atomic_inc(&gmsg->gum_refcount);
+ gss_unhash_msg(gmsg);
+ if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
+ cfs_time_t now = cfs_time_current_sec();
- if (cfs_time_after(now, ratelimit)) {
- CWARN("upcall timed out, is lgssd running?\n");
- ratelimit = now + 15;
- }
- }
- gss_msg_fail_ctx(gmsg);
- gss_release_msg(gmsg);
- EXIT;
+ if (cfs_time_after(now, ratelimit)) {
+ CWARN("upcall timed out, is lgssd running?\n");
+ ratelimit = now + 15;
+ }
+ }
+ gss_msg_fail_ctx(gmsg);
+ gss_release_msg(gmsg);
+ EXIT;
}
static
gumd->gum_nid, (int) sizeof(gumd->gum_obd),
gumd->gum_obd);
- gmsg->gum_base.errno = -EPIPE;
- cfs_atomic_inc(&gmsg->gum_refcount);
- gss_unhash_msg_nolock(gmsg);
+ gmsg->gum_base.errno = -EPIPE;
+ atomic_inc(&gmsg->gum_refcount);
+ gss_unhash_msg_nolock(gmsg);
- gss_msg_fail_ctx(gmsg);
+ gss_msg_fail_ctx(gmsg);
- upcall_list_unlock(idx);
- gss_release_msg(gmsg);
- upcall_list_lock(idx);
- }
- upcall_list_unlock(idx);
- EXIT;
+ upcall_list_unlock(idx);
+ gss_release_msg(gmsg);
+ upcall_list_lock(idx);
+ }
+ upcall_list_unlock(idx);
+ EXIT;
}
static struct rpc_pipe_ops gss_upcall_ops = {
gmsg->gum_base.copied = 0;
gmsg->gum_base.errno = 0;
- /* init upcall msg */
- cfs_atomic_set(&gmsg->gum_refcount, 1);
- gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
- gmsg->gum_gsec = gsec;
- gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
- struct gss_cli_ctx, gc_base);
- gmsg->gum_data.gum_seq = upcall_get_sequence();
- gmsg->gum_data.gum_uid = ctx->cc_vcred.vc_uid;
- gmsg->gum_data.gum_gid = 0; /* not used for now */
- gmsg->gum_data.gum_svc = import_to_gss_svc(imp);
- gmsg->gum_data.gum_nid = imp->imp_connection->c_peer.nid;
- strncpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name,
- sizeof(gmsg->gum_data.gum_obd));
+ /* init upcall msg */
+ atomic_set(&gmsg->gum_refcount, 1);
+ gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
+ gmsg->gum_gsec = gsec;
+ gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
+ struct gss_cli_ctx, gc_base);
+ gmsg->gum_data.gum_seq = upcall_get_sequence();
+ gmsg->gum_data.gum_uid = ctx->cc_vcred.vc_uid;
+ gmsg->gum_data.gum_gid = 0; /* not used for now */
+ gmsg->gum_data.gum_svc = import_to_gss_svc(imp);
+ gmsg->gum_data.gum_nid = imp->imp_connection->c_peer.nid;
+ strncpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name,
+ sizeof(gmsg->gum_data.gum_obd));
/* This only could happen when sysadmin set it dead/expired
* using lctl by force. */
* statistic of "out-of-sequence-window"
*/
static struct {
- spinlock_t oos_lock;
- cfs_atomic_t oos_cli_count; /* client occurrence */
- int oos_cli_behind; /* client max seqs behind */
- cfs_atomic_t oos_svc_replay[3]; /* server replay detected */
- cfs_atomic_t oos_svc_pass[3]; /* server verified ok */
+ spinlock_t oos_lock;
+ atomic_t oos_cli_count; /* client occurrence */
+ int oos_cli_behind; /* client max seqs behind */
+ atomic_t oos_svc_replay[3]; /* server replay detected */
+ atomic_t oos_svc_pass[3]; /* server verified ok */
} gss_stat_oos = {
- .oos_cli_count = CFS_ATOMIC_INIT(0),
- .oos_cli_behind = 0,
- .oos_svc_replay = { CFS_ATOMIC_INIT(0), },
- .oos_svc_pass = { CFS_ATOMIC_INIT(0), },
+ .oos_cli_count = ATOMIC_INIT(0),
+ .oos_cli_behind = 0,
+ .oos_svc_replay = { ATOMIC_INIT(0), },
+ .oos_svc_pass = { ATOMIC_INIT(0), },
};
void gss_stat_oos_record_cli(int behind)
{
- cfs_atomic_inc(&gss_stat_oos.oos_cli_count);
+ atomic_inc(&gss_stat_oos.oos_cli_count);
spin_lock(&gss_stat_oos.oos_lock);
if (behind > gss_stat_oos.oos_cli_behind)
void gss_stat_oos_record_svc(int phase, int replay)
{
- LASSERT(phase >= 0 && phase <= 2);
+ LASSERT(phase >= 0 && phase <= 2);
- if (replay)
- cfs_atomic_inc(&gss_stat_oos.oos_svc_replay[phase]);
- else
- cfs_atomic_inc(&gss_stat_oos.oos_svc_pass[phase]);
+ if (replay)
+ atomic_inc(&gss_stat_oos.oos_svc_replay[phase]);
+ else
+ atomic_inc(&gss_stat_oos.oos_svc_pass[phase]);
}
static int gss_proc_oos_seq_show(struct seq_file *m, void *v)
{
- return seq_printf(m,
- "seqwin: %u\n"
- "backwin: %u\n"
- "client fall behind seqwin\n"
- " occurrence: %d\n"
- " max seq behind: %d\n"
- "server replay detected:\n"
- " phase 0: %d\n"
- " phase 1: %d\n"
- " phase 2: %d\n"
- "server verify ok:\n"
- " phase 2: %d\n",
- GSS_SEQ_WIN_MAIN,
- GSS_SEQ_WIN_BACK,
- cfs_atomic_read(&gss_stat_oos.oos_cli_count),
- gss_stat_oos.oos_cli_behind,
- cfs_atomic_read(&gss_stat_oos.oos_svc_replay[0]),
- cfs_atomic_read(&gss_stat_oos.oos_svc_replay[1]),
- cfs_atomic_read(&gss_stat_oos.oos_svc_replay[2]),
- cfs_atomic_read(&gss_stat_oos.oos_svc_pass[2]));
+ return seq_printf(m, "seqwin: %u\n"
+ "backwin: %u\n"
+ "client fall behind seqwin\n"
+ " occurrence: %d\n"
+ " max seq behind: %d\n"
+ "server replay detected:\n"
+ " phase 0: %d\n"
+ " phase 1: %d\n"
+ " phase 2: %d\n"
+ "server verify ok:\n"
+ " phase 2: %d\n",
+ GSS_SEQ_WIN_MAIN,
+ GSS_SEQ_WIN_BACK,
+ atomic_read(&gss_stat_oos.oos_cli_count),
+ gss_stat_oos.oos_cli_behind,
+ atomic_read(&gss_stat_oos.oos_svc_replay[0]),
+ atomic_read(&gss_stat_oos.oos_svc_replay[1]),
+ atomic_read(&gss_stat_oos.oos_svc_replay[2]),
+ atomic_read(&gss_stat_oos.oos_svc_pass[2]));
}
LPROC_SEQ_FOPS_RO(gss_proc_oos);
int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(cfs_atomic_read(&ctx->cc_refcount));
+ LASSERT(atomic_read(&ctx->cc_refcount));
if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
- if (!ctx->cc_early_expire)
+ if (!ctx->cc_early_expire)
clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
- CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
- ctx->cc_expire,
- ctx->cc_expire == 0 ? 0 :
- cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
+ CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ ctx->cc_expire,
+ ctx->cc_expire == 0 ? 0 :
+ cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
- sptlrpc_cli_ctx_wakeup(ctx);
- return 1;
- }
+ sptlrpc_cli_ctx_wakeup(ctx);
+ return 1;
+ }
- return 0;
+ return 0;
}
/*
flags |= LUSTRE_GSS_PACK_USER;
redo:
- seq = cfs_atomic_inc_return(&gctx->gc_seq);
-
- rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
- ctx->cc_sec->ps_part,
- flags, gctx->gc_proc, seq, svc,
- &gctx->gc_handle);
- if (rc < 0)
- RETURN(rc);
-
- /* gss_sign_msg() msg might take long time to finish, in which period
- * more rpcs could be wrapped up and sent out. if we found too many
- * of them we should repack this rpc, because sent it too late might
- * lead to the sequence number fall behind the window on server and
- * be dropped. also applies to gss_cli_ctx_seal().
- *
- * Note: null mode dosen't check sequence number. */
- if (svc != SPTLRPC_SVC_NULL &&
- cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
- int behind = cfs_atomic_read(&gctx->gc_seq) - seq;
-
- gss_stat_oos_record_cli(behind);
- CWARN("req %p: %u behind, retry signing\n", req, behind);
- goto redo;
- }
-
- req->rq_reqdata_len = rc;
- RETURN(0);
+ seq = atomic_inc_return(&gctx->gc_seq);
+
+ rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
+ ctx->cc_sec->ps_part,
+ flags, gctx->gc_proc, seq, svc,
+ &gctx->gc_handle);
+ if (rc < 0)
+ RETURN(rc);
+
+ /* gss_sign_msg() msg might take long time to finish, in which period
+ * more rpcs could be wrapped up and sent out. if we found too many
+ * of them we should repack this rpc, because sent it too late might
+ * lead to the sequence number fall behind the window on server and
+ * be dropped. also applies to gss_cli_ctx_seal().
+ *
+ * Note: null mode dosen't check sequence number. */
+ if (svc != SPTLRPC_SVC_NULL &&
+ atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+ int behind = atomic_read(&gctx->gc_seq) - seq;
+
+ gss_stat_oos_record_cli(behind);
+ CWARN("req %p: %u behind, retry signing\n", req, behind);
+ goto redo;
+ }
+
+ req->rq_reqdata_len = rc;
+ RETURN(0);
}
static
ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
redo:
- ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
+ ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
/* buffer objects */
hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
}
LASSERT(token.len <= buflens[1]);
- /* see explain in gss_cli_ctx_sign() */
- if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
- GSS_SEQ_REPACK_THRESHOLD)) {
- int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
+ /* see explain in gss_cli_ctx_sign() */
+ if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
+ GSS_SEQ_REPACK_THRESHOLD)) {
+ int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
- gss_stat_oos_record_cli(behind);
- CWARN("req %p: %u behind, retry sealing\n", req, behind);
+ gss_stat_oos_record_cli(behind);
+ CWARN("req %p: %u behind, retry sealing\n", req, behind);
- ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
- goto redo;
- }
+ ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ goto redo;
+ }
- /* now set the final wire data length */
- req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
- RETURN(0);
+ /* now set the final wire data length */
+ req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
+ RETURN(0);
err_free:
- if (!req->rq_pool) {
- OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
- req->rq_reqbuf = NULL;
- req->rq_reqbuf_len = 0;
- }
- RETURN(rc);
+ if (!req->rq_pool) {
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = NULL;
+ req->rq_reqbuf_len = 0;
+ }
+ RETURN(rc);
}
int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
spin_lock_init(&gsec->gs_lock);
gsec->gs_rvs_hdl = 0ULL;
- /* initialize upper ptlrpc_sec */
- sec = &gsec->gs_base;
- sec->ps_policy = policy;
- cfs_atomic_set(&sec->ps_refcount, 0);
- cfs_atomic_set(&sec->ps_nctx, 0);
- sec->ps_id = sptlrpc_get_next_secid();
- sec->ps_flvr = *sf;
- sec->ps_import = class_import_get(imp);
+ /* initialize upper ptlrpc_sec */
+ sec = &gsec->gs_base;
+ sec->ps_policy = policy;
+ atomic_set(&sec->ps_refcount, 0);
+ atomic_set(&sec->ps_nctx, 0);
+ sec->ps_id = sptlrpc_get_next_secid();
+ sec->ps_flvr = *sf;
+ sec->ps_import = class_import_get(imp);
spin_lock_init(&sec->ps_lock);
- CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
+ CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
if (!svcctx) {
sec->ps_gc_interval = GSS_GC_INTERVAL;
void gss_sec_destroy_common(struct gss_sec *gsec)
{
- struct ptlrpc_sec *sec = &gsec->gs_base;
- ENTRY;
+ struct ptlrpc_sec *sec = &gsec->gs_base;
+ ENTRY;
- LASSERT(sec->ps_import);
- LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
- LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(sec->ps_import);
+ LASSERT(atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(atomic_read(&sec->ps_nctx) == 0);
- if (gsec->gs_mech) {
- lgss_mech_put(gsec->gs_mech);
- gsec->gs_mech = NULL;
- }
+ if (gsec->gs_mech) {
+ lgss_mech_put(gsec->gs_mech);
+ gsec->gs_mech = NULL;
+ }
- class_import_put(sec->ps_import);
+ class_import_put(sec->ps_import);
- if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
- sptlrpc_enc_pool_del_user();
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
+ sptlrpc_enc_pool_del_user();
- EXIT;
+ EXIT;
}
void gss_sec_kill(struct ptlrpc_sec *sec)
{
- sec->ps_dying = 1;
+ sec->ps_dying = 1;
}
int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
struct ptlrpc_ctx_ops *ctxops,
struct vfs_cred *vcred)
{
- struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- gctx->gc_win = 0;
- cfs_atomic_set(&gctx->gc_seq, 0);
+ gctx->gc_win = 0;
+ atomic_set(&gctx->gc_seq, 0);
- CFS_INIT_HLIST_NODE(&ctx->cc_cache);
- cfs_atomic_set(&ctx->cc_refcount, 0);
- ctx->cc_sec = sec;
- ctx->cc_ops = ctxops;
- ctx->cc_expire = 0;
- ctx->cc_flags = PTLRPC_CTX_NEW;
- ctx->cc_vcred = *vcred;
+ CFS_INIT_HLIST_NODE(&ctx->cc_cache);
+ atomic_set(&ctx->cc_refcount, 0);
+ ctx->cc_sec = sec;
+ ctx->cc_ops = ctxops;
+ ctx->cc_expire = 0;
+ ctx->cc_flags = PTLRPC_CTX_NEW;
+ ctx->cc_vcred = *vcred;
spin_lock_init(&ctx->cc_lock);
- CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
- CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
+ CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
+ CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
- /* take a ref on belonging sec, balanced in ctx destroying */
- cfs_atomic_inc(&sec->ps_refcount);
- /* statistic only */
- cfs_atomic_inc(&sec->ps_nctx);
+ /* take a ref on belonging sec, balanced in ctx destroying */
+ atomic_inc(&sec->ps_refcount);
+ /* statistic only */
+ atomic_inc(&sec->ps_nctx);
- CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
- sec->ps_policy->sp_name, ctx->cc_sec,
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- return 0;
+ CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
+ sec->ps_policy->sp_name, ctx->cc_sec,
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ return 0;
}
/*
int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
struct ptlrpc_cli_ctx *ctx)
{
- struct gss_cli_ctx *gctx = ctx2gctx(ctx);
-
- LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(ctx->cc_sec == sec);
-
- /*
- * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
- * this is to avoid potential problems of client side reverse svc ctx
- * be mis-destroyed in various recovery senarios. anyway client can
- * manage its reverse ctx well by associating it with its buddy ctx.
- */
- if (sec_is_reverse(sec))
- ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
-
- if (gctx->gc_mechctx) {
- /* the final context fini rpc will use this ctx too, and it's
- * asynchronous which finished by request_out_callback(). so
- * we add refcount, whoever drop finally drop the refcount to
- * 0 should responsible for the rest of destroy. */
- cfs_atomic_inc(&ctx->cc_refcount);
-
- gss_do_ctx_fini_rpc(gctx);
- gss_cli_ctx_finalize(gctx);
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+
+ LASSERT(atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(ctx->cc_sec == sec);
+
+ /*
+ * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
+ * this is to avoid potential problems of client side reverse svc ctx
+ * be mis-destroyed in various recovery senarios. anyway client can
+ * manage its reverse ctx well by associating it with its buddy ctx.
+ */
+ if (sec_is_reverse(sec))
+ ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
+
+ if (gctx->gc_mechctx) {
+ /* the final context fini rpc will use this ctx too, and it's
+ * asynchronous which finished by request_out_callback(). so
+ * we add refcount, whoever drop finally drop the refcount to
+ * 0 should responsible for the rest of destroy. */
+ atomic_inc(&ctx->cc_refcount);
+
+ gss_do_ctx_fini_rpc(gctx);
+ gss_cli_ctx_finalize(gctx);
+
+ if (!atomic_dec_and_test(&ctx->cc_refcount))
+ return 1;
+ }
+
+ if (sec_is_reverse(sec))
+ CWARN("reverse sec %p: destroy ctx %p\n",
+ ctx->cc_sec, ctx);
+ else
+ CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
+ sec->ps_policy->sp_name, ctx->cc_sec,
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
- return 1;
- }
-
- if (sec_is_reverse(sec))
- CWARN("reverse sec %p: destroy ctx %p\n",
- ctx->cc_sec, ctx);
- else
- CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
- sec->ps_policy->sp_name, ctx->cc_sec,
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
-
- return 0;
+ return 0;
}
static
static inline
void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
{
- LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
- cfs_atomic_inc(&grctx->src_base.sc_refcount);
+ LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+ atomic_inc(&grctx->src_base.sc_refcount);
}
static inline
void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
{
- LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
+ LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
- if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount))
- gss_svc_reqctx_free(grctx);
+ if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
+ gss_svc_reqctx_free(grctx);
}
static
if (!grctx)
RETURN(SECSVC_DROP);
- grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
- cfs_atomic_set(&grctx->src_base.sc_refcount, 1);
- req->rq_svc_ctx = &grctx->src_base;
- gw = &grctx->src_wirectx;
+ grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
+ atomic_set(&grctx->src_base.sc_refcount, 1);
+ req->rq_svc_ctx = &grctx->src_base;
+ gw = &grctx->src_wirectx;
/* save wire context */
gw->gw_flags = ghdr->gh_flags;
void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
{
- LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0);
- gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
+ LASSERT(atomic_read(&ctx->sc_refcount) == 0);
+ gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
}
int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
cli_gctx->gc_win = GSS_SEQ_WIN;
- /* The problem is the reverse ctx might get lost in some recovery
- * situations, and the same svc_ctx will be used to re-create it.
- * if there's callback be sentout before that, new reverse ctx start
- * with sequence 0 will lead to future callback rpc be treated as
- * replay.
- *
- * each reverse root ctx will record its latest sequence number on its
- * buddy svcctx before be destroied, so here we continue use it.
- */
- cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
-
- if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
- CERROR("failed to dup svc handle\n");
- goto err_out;
- }
+ /* The problem is the reverse ctx might get lost in some recovery
+ * situations, and the same svc_ctx will be used to re-create it.
+ * if there's callback be sentout before that, new reverse ctx start
+ * with sequence 0 will lead to future callback rpc be treated as
+ * replay.
+ *
+ * each reverse root ctx will record its latest sequence number on its
+ * buddy svcctx before be destroied, so here we continue use it.
+ */
+ atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
+
+ if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
+ CERROR("failed to dup svc handle\n");
+ goto err_out;
+ }
if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
GSS_S_COMPLETE) {
*/
void ptlrpc_invalidate_import(struct obd_import *imp)
{
- cfs_list_t *tmp, *n;
- struct ptlrpc_request *req;
- struct l_wait_info lwi;
- unsigned int timeout;
- int rc;
+ cfs_list_t *tmp, *n;
+ struct ptlrpc_request *req;
+ struct l_wait_info lwi;
+ unsigned int timeout;
+ int rc;
- cfs_atomic_inc(&imp->imp_inval_count);
+ atomic_inc(&imp->imp_inval_count);
- if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
- ptlrpc_deactivate_import(imp);
+ if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
+ ptlrpc_deactivate_import(imp);
- LASSERT(imp->imp_invalid);
+ LASSERT(imp->imp_invalid);
/* Wait forever until inflight == 0. We really can't do it another
* way because in some cases we need to wait for very long reply
CDEBUG(D_RPCTRACE,"Sleeping %d sec for inflight to error out\n",
timeout);
- /* Wait for all requests to error out and call completion
- * callbacks. Cap it at obd_timeout -- these should all
- * have been locally cancelled by ptlrpc_abort_inflight. */
- lwi = LWI_TIMEOUT_INTERVAL(
- cfs_timeout_cap(cfs_time_seconds(timeout)),
- (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
- NULL, NULL);
- rc = l_wait_event(imp->imp_recovery_waitq,
- (cfs_atomic_read(&imp->imp_inflight) == 0),
- &lwi);
- if (rc) {
- const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
-
- CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
- cli_tgt, rc,
- cfs_atomic_read(&imp->imp_inflight));
+ /* Wait for all requests to error out and call completion
+ * callbacks. Cap it at obd_timeout -- these should all
+ * have been locally cancelled by ptlrpc_abort_inflight. */
+ lwi = LWI_TIMEOUT_INTERVAL(
+ cfs_timeout_cap(cfs_time_seconds(timeout)),
+ (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
+ NULL, NULL);
+ rc = l_wait_event(imp->imp_recovery_waitq,
+ (atomic_read(&imp->imp_inflight) == 0),
+ &lwi);
+ if (rc) {
+ const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
+
+ CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
+ cli_tgt, rc, atomic_read(&imp->imp_inflight));
spin_lock(&imp->imp_lock);
- if (cfs_atomic_read(&imp->imp_inflight) == 0) {
- int count = cfs_atomic_read(&imp->imp_unregistering);
-
- /* We know that "unregistering" rpcs only can
- * survive in sending or delaying lists (they
- * maybe waiting for long reply unlink in
- * sluggish nets). Let's check this. If there
- * is no inflight and unregistering != 0, this
- * is bug. */
- LASSERTF(count == 0, "Some RPCs are still "
- "unregistering: %d\n", count);
-
- /* Let's save one loop as soon as inflight have
- * dropped to zero. No new inflights possible at
- * this point. */
- rc = 0;
+ if (atomic_read(&imp->imp_inflight) == 0) {
+ int count = atomic_read(&imp->imp_unregistering);
+
+ /* We know that "unregistering" rpcs only can
+ * survive in sending or delaying lists (they
+ * maybe waiting for long reply unlink in
+ * sluggish nets). Let's check this. If there
+ * is no inflight and unregistering != 0, this
+ * is bug. */
+ LASSERTF(count == 0, "Some RPCs are still "
+ "unregistering: %d\n", count);
+
+ /* Let's save one loop as soon as inflight have
+ * dropped to zero. No new inflights possible at
+ * this point. */
+ rc = 0;
} else {
cfs_list_for_each_safe(tmp, n,
&imp->imp_sending_list) {
"still on delayed list");
}
- CERROR("%s: RPCs in \"%s\" phase found (%d). "
- "Network is sluggish? Waiting them "
- "to error out.\n", cli_tgt,
- ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
- cfs_atomic_read(&imp->
- imp_unregistering));
- }
+ CERROR("%s: RPCs in \"%s\" phase found (%d). "
+ "Network is sluggish? Waiting them "
+ "to error out.\n", cli_tgt,
+ ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
+ atomic_read(&imp->imp_unregistering));
+ }
spin_unlock(&imp->imp_lock);
- }
- } while (rc != 0);
+ }
+ } while (rc != 0);
/*
* Let's additionally check that no new rpcs added to import in
* "invalidate" state.
*/
- LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
+ LASSERT(atomic_read(&imp->imp_inflight) == 0);
obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
sptlrpc_import_flush_all_ctx(imp);
- cfs_atomic_dec(&imp->imp_inval_count);
+ atomic_dec(&imp->imp_inval_count);
wake_up_all(&imp->imp_recovery_waitq);
}
EXPORT_SYMBOL(ptlrpc_invalidate_import);
int ptlrpc_reconnect_import(struct obd_import *imp)
{
- ptlrpc_set_import_discon(imp, 0);
- /* Force a new connect attempt */
- ptlrpc_invalidate_import(imp);
- /* Do a fresh connect next time by zeroing the handle */
- ptlrpc_disconnect_import(imp, 1);
- /* Wait for all invalidate calls to finish */
- if (cfs_atomic_read(&imp->imp_inval_count) > 0) {
- int rc;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(imp->imp_recovery_waitq,
- (cfs_atomic_read(&imp->imp_inval_count) == 0),
- &lwi);
- if (rc)
- CERROR("Interrupted, inval=%d\n",
- cfs_atomic_read(&imp->imp_inval_count));
- }
+ ptlrpc_set_import_discon(imp, 0);
+ /* Force a new connect attempt */
+ ptlrpc_invalidate_import(imp);
+ /* Do a fresh connect next time by zeroing the handle */
+ ptlrpc_disconnect_import(imp, 1);
+ /* Wait for all invalidate calls to finish */
+ if (atomic_read(&imp->imp_inval_count) > 0) {
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ int rc;
+
+ rc = l_wait_event(imp->imp_recovery_waitq,
+ (atomic_read(&imp->imp_inval_count) == 0),
+ &lwi);
+ if (rc)
+ CERROR("Interrupted, inval=%d\n",
+ atomic_read(&imp->imp_inval_count));
+ }
- /* Allow reconnect attempts */
- imp->imp_obd->obd_no_recov = 0;
- /* Remove 'invalid' flag */
- ptlrpc_activate_import(imp);
- /* Attempt a new connect */
- ptlrpc_recover_import(imp, NULL, 0);
- return 0;
+ /* Allow reconnect attempts */
+ imp->imp_obd->obd_no_recov = 0;
+ /* Remove 'invalid' flag */
+ ptlrpc_activate_import(imp);
+ /* Attempt a new connect */
+ ptlrpc_recover_import(imp, NULL, 0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_reconnect_import);
struct ptlrpc_request *req,
void * data, int rc)
{
- ENTRY;
- cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
- if (req->rq_status == 0 &&
- !req->rq_import->imp_vbr_failed) {
- ptlrpc_import_recovery_state_machine(req->rq_import);
- } else {
- if (req->rq_import->imp_vbr_failed) {
- CDEBUG(D_WARNING,
- "%s: version recovery fails, reconnecting\n",
- req->rq_import->imp_obd->obd_name);
- } else {
- CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
- "reconnecting\n",
- req->rq_import->imp_obd->obd_name,
- req->rq_status);
- }
- ptlrpc_connect_import(req->rq_import);
- }
+ ENTRY;
+ atomic_dec(&req->rq_import->imp_replay_inflight);
+ if (req->rq_status == 0 &&
+ !req->rq_import->imp_vbr_failed) {
+ ptlrpc_import_recovery_state_machine(req->rq_import);
+ } else {
+ if (req->rq_import->imp_vbr_failed) {
+ CDEBUG(D_WARNING,
+ "%s: version recovery fails, reconnecting\n",
+ req->rq_import->imp_obd->obd_name);
+ } else {
+ CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
+ "reconnecting\n",
+ req->rq_import->imp_obd->obd_name,
+ req->rq_status);
+ }
+ ptlrpc_connect_import(req->rq_import);
+ }
- RETURN(0);
+ RETURN(0);
}
/**
*/
static int signal_completed_replay(struct obd_import *imp)
{
- struct ptlrpc_request *req;
- ENTRY;
+ struct ptlrpc_request *req;
+ ENTRY;
- if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
- RETURN(0);
+ if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
+ RETURN(0);
- LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
- cfs_atomic_inc(&imp->imp_replay_inflight);
+ LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
+ atomic_inc(&imp->imp_replay_inflight);
- req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
- OBD_PING);
- if (req == NULL) {
- cfs_atomic_dec(&imp->imp_replay_inflight);
- RETURN(-ENOMEM);
- }
+ req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
+ OBD_PING);
+ if (req == NULL) {
+ atomic_dec(&imp->imp_replay_inflight);
+ RETURN(-ENOMEM);
+ }
- ptlrpc_request_set_replen(req);
- req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
- lustre_msg_add_flags(req->rq_reqmsg,
- MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE);
- if (AT_OFF)
- req->rq_timeout *= 3;
- req->rq_interpret_reply = completed_replay_interpret;
+ ptlrpc_request_set_replen(req);
+ req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
+ lustre_msg_add_flags(req->rq_reqmsg,
+ MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE);
+ if (AT_OFF)
+ req->rq_timeout *= 3;
+ req->rq_interpret_reply = completed_replay_interpret;
- ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
- RETURN(0);
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ RETURN(0);
}
#ifdef __KERNEL__
#endif
}
- if (imp->imp_state == LUSTRE_IMP_REPLAY) {
- CDEBUG(D_HA, "replay requested by %s\n",
- obd2cli_tgt(imp->imp_obd));
- rc = ptlrpc_replay_next(imp, &inflight);
- if (inflight == 0 &&
- cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
- rc = ldlm_replay_locks(imp);
- if (rc)
- GOTO(out, rc);
- }
- rc = 0;
- }
-
- if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
- if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
- rc = signal_completed_replay(imp);
- if (rc)
- GOTO(out, rc);
- }
+ if (imp->imp_state == LUSTRE_IMP_REPLAY) {
+ CDEBUG(D_HA, "replay requested by %s\n",
+ obd2cli_tgt(imp->imp_obd));
+ rc = ptlrpc_replay_next(imp, &inflight);
+ if (inflight == 0 &&
+ atomic_read(&imp->imp_replay_inflight) == 0) {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
+ rc = ldlm_replay_locks(imp);
+ if (rc)
+ GOTO(out, rc);
+ }
+ rc = 0;
+ }
- }
+ if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
+ if (atomic_read(&imp->imp_replay_inflight) == 0) {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
+ rc = signal_completed_replay(imp);
+ if (rc)
+ GOTO(out, rc);
+ }
+ }
- if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
- if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
- }
- }
+ if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
+ if (atomic_read(&imp->imp_replay_inflight) == 0) {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
+ }
+ }
if (imp->imp_state == LUSTRE_IMP_RECOVER) {
CDEBUG(D_HA, "reconnected to %s@%s\n",
ptlrpc_request_addref(request);
if (obd->obd_svc_stats != NULL)
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
- cfs_atomic_read(&request->rq_import->imp_inflight));
+ atomic_read(&request->rq_import->imp_inflight));
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
policy->pol_state = NRS_POL_STATE_STOPPED;
- if (cfs_atomic_dec_and_test(&policy->pol_desc->pd_refs))
+ if (atomic_dec_and_test(&policy->pol_desc->pd_refs))
module_put(policy->pol_desc->pd_owner);
EXIT;
* Increase the module usage count for policies registering from other
* modules.
*/
- if (cfs_atomic_inc_return(&policy->pol_desc->pd_refs) == 1 &&
+ if (atomic_inc_return(&policy->pol_desc->pd_refs) == 1 &&
!try_module_get(policy->pol_desc->pd_owner)) {
- cfs_atomic_dec(&policy->pol_desc->pd_refs);
+ atomic_dec(&policy->pol_desc->pd_refs);
CERROR("NRS: cannot get module for policy %s; is it alive?\n",
policy->pol_desc->pd_name);
RETURN(-ENODEV);
spin_lock(&nrs->nrs_lock);
if (rc != 0) {
- if (cfs_atomic_dec_and_test(&policy->pol_desc->pd_refs))
+ if (atomic_dec_and_test(&policy->pol_desc->pd_refs))
module_put(policy->pol_desc->pd_owner);
policy->pol_state = NRS_POL_STATE_STOPPED;
if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0)
desc->pd_owner = conf->nc_owner;
desc->pd_flags = conf->nc_flags;
- cfs_atomic_set(&desc->pd_refs, 0);
+ atomic_set(&desc->pd_refs, 0);
/**
* For policies that are held in the same module as NRS (currently
struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
- cfs_atomic_inc(&cli->cc_ref);
+ atomic_inc(&cli->cc_ref);
}
static void nrs_crrn_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
- cfs_atomic_dec(&cli->cc_ref);
+ atomic_dec(&cli->cc_ref);
}
static void nrs_crrn_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
- LASSERTF(cfs_atomic_read(&cli->cc_ref) == 0,
+ LASSERTF(atomic_read(&cli->cc_ref) == 0,
"Busy CRR-N object from client with NID %s, with %d refs\n",
- libcfs_nid2str(cli->cc_nid), cfs_atomic_read(&cli->cc_ref));
+ libcfs_nid2str(cli->cc_nid), atomic_read(&cli->cc_ref));
OBD_FREE_PTR(cli);
}
cli->cc_nid = req->rq_peer.nid;
- cfs_atomic_set(&cli->cc_ref, 1);
+ atomic_set(&cli->cc_ref, 1);
tmp = cfs_hash_findadd_unique(net->cn_cli_hash, &cli->cc_nid,
&cli->cc_hnode);
if (tmp != cli) {
static void nrs_tbf_rule_fini(struct nrs_tbf_rule *rule)
{
- LASSERT(cfs_atomic_read(&rule->tr_ref) == 0);
+ LASSERT(atomic_read(&rule->tr_ref) == 0);
LASSERT(cfs_list_empty(&rule->tr_cli_list));
LASSERT(cfs_list_empty(&rule->tr_linkage));
*/
static void nrs_tbf_rule_put(struct nrs_tbf_rule *rule)
{
- if (cfs_atomic_dec_and_test(&rule->tr_ref))
+ if (atomic_dec_and_test(&rule->tr_ref))
nrs_tbf_rule_fini(rule);
}
*/
static inline void nrs_tbf_rule_get(struct nrs_tbf_rule *rule)
{
- cfs_atomic_inc(&rule->tr_ref);
+ atomic_inc(&rule->tr_ref);
}
static void
cli->tc_depth = rule->tr_depth;
cli->tc_ntoken = rule->tr_depth;
cli->tc_check_time = ktime_to_ns(ktime_get());
- cli->tc_rule_sequence = cfs_atomic_read(&head->th_rule_sequence);
+ cli->tc_rule_sequence = atomic_read(&head->th_rule_sequence);
cli->tc_rule_generation = rule->tr_generation;
if (cli->tc_in_heap)
head->th_ops->o_cli_init(cli, req);
CFS_INIT_LIST_HEAD(&cli->tc_list);
CFS_INIT_LIST_HEAD(&cli->tc_linkage);
- cfs_atomic_set(&cli->tc_ref, 1);
+ atomic_set(&cli->tc_ref, 1);
rule = nrs_tbf_rule_match(head, cli);
nrs_tbf_cli_reset(head, rule, cli);
}
{
LASSERT(cfs_list_empty(&cli->tc_list));
LASSERT(!cli->tc_in_heap);
- LASSERT(cfs_atomic_read(&cli->tc_ref) == 0);
+ LASSERT(atomic_read(&cli->tc_ref) == 0);
nrs_tbf_cli_rule_put(cli);
OBD_FREE_PTR(cli);
}
rule->tr_rpc_rate = start->tc_rpc_rate;
rule->tr_nsecs = NSEC_PER_SEC / rule->tr_rpc_rate;
rule->tr_depth = tbf_depth;
- cfs_atomic_set(&rule->tr_ref, 1);
+ atomic_set(&rule->tr_ref, 1);
CFS_INIT_LIST_HEAD(&rule->tr_cli_list);
CFS_INIT_LIST_HEAD(&rule->tr_nids);
cfs_list_add(&rule->tr_linkage, &head->th_list);
rule->tr_head = head;
spin_unlock(&head->th_rule_lock);
- cfs_atomic_inc(&head->th_rule_sequence);
+ atomic_inc(&head->th_rule_sequence);
if (start->tc_rule_flags & NTRS_DEFAULT) {
rule->tr_flags |= NTRS_DEFAULT;
LASSERT(head->th_rule == NULL);
struct nrs_tbf_client,
tc_hnode);
- cfs_atomic_inc(&cli->tc_ref);
+ atomic_inc(&cli->tc_ref);
}
static void nrs_tbf_jobid_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
struct nrs_tbf_client,
tc_hnode);
- cfs_atomic_dec(&cli->tc_ref);
+ atomic_dec(&cli->tc_ref);
}
static void nrs_tbf_jobid_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
struct nrs_tbf_client,
tc_hnode);
- LASSERT(cfs_atomic_read(&cli->tc_ref) == 0);
+ LASSERT(atomic_read(&cli->tc_ref) == 0);
nrs_tbf_cli_fini(cli);
}
cli = cfs_list_entry(bkt->ntb_lru.next,
struct nrs_tbf_client,
tc_lru);
- LASSERT(cfs_atomic_read(&cli->tc_ref) == 0);
+ LASSERT(atomic_read(&cli->tc_ref) == 0);
cfs_hash_bd_del_locked(hs, &bd, &cli->tc_hnode);
cfs_list_move(&cli->tc_lru, &zombies);
}
rule->tr_name,
rule->tr_jobids_str,
rule->tr_rpc_rate,
- cfs_atomic_read(&rule->tr_ref) - 1);
+ atomic_read(&rule->tr_ref) - 1);
}
static int
struct nrs_tbf_client,
tc_hnode);
- cfs_atomic_inc(&cli->tc_ref);
+ atomic_inc(&cli->tc_ref);
}
static void nrs_tbf_nid_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
struct nrs_tbf_client,
tc_hnode);
- cfs_atomic_dec(&cli->tc_ref);
+ atomic_dec(&cli->tc_ref);
}
static void nrs_tbf_nid_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
struct nrs_tbf_client,
tc_hnode);
- LASSERTF(cfs_atomic_read(&cli->tc_ref) == 0,
+ LASSERTF(atomic_read(&cli->tc_ref) == 0,
"Busy TBF object from client with NID %s, with %d refs\n",
- libcfs_nid2str(cli->tc_nid), cfs_atomic_read(&cli->tc_ref));
+ libcfs_nid2str(cli->tc_nid), atomic_read(&cli->tc_ref));
nrs_tbf_cli_fini(cli);
}
rule->tr_name,
rule->tr_nids_str,
rule->tr_rpc_rate,
- cfs_atomic_read(&rule->tr_ref) - 1);
+ atomic_read(&rule->tr_ref) - 1);
}
static int
if (head->th_binheap == NULL)
GOTO(out_free_head, rc = -ENOMEM);
- cfs_atomic_set(&head->th_rule_sequence, 0);
+ atomic_set(&head->th_rule_sequence, 0);
spin_lock_init(&head->th_rule_lock);
CFS_INIT_LIST_HEAD(&head->th_list);
hrtimer_init(&head->th_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
spin_lock(&policy->pol_nrs->nrs_svcpt->scp_req_lock);
LASSERT(cli->tc_rule);
if (cli->tc_rule_sequence !=
- cfs_atomic_read(&head->th_rule_sequence) ||
+ atomic_read(&head->th_rule_sequence) ||
cli->tc_rule->tr_flags & NTRS_STOPPING) {
struct nrs_tbf_rule *rule;
nrs_tbf_cli_init(head, cli, req);
tmp = head->th_ops->o_cli_findadd(head, cli);
if (tmp != cli) {
- cfs_atomic_dec(&cli->tc_ref);
+ atomic_dec(&cli->tc_ref);
nrs_tbf_cli_fini(cli);
cli = tmp;
}
if (rc)
RETURN(rc);
- rs = req->rq_reply_state;
- cfs_atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
- rs->rs_cb_id.cbid_fn = reply_out_callback;
- rs->rs_cb_id.cbid_arg = rs;
+ rs = req->rq_reply_state;
+ atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
+ rs->rs_cb_id.cbid_fn = reply_out_callback;
+ rs->rs_cb_id.cbid_arg = rs;
rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
- CFS_INIT_LIST_HEAD(&rs->rs_exp_list);
- CFS_INIT_LIST_HEAD(&rs->rs_obd_list);
- CFS_INIT_LIST_HEAD(&rs->rs_list);
+ CFS_INIT_LIST_HEAD(&rs->rs_exp_list);
+ CFS_INIT_LIST_HEAD(&rs->rs_obd_list);
+ CFS_INIT_LIST_HEAD(&rs->rs_list);
spin_lock_init(&rs->rs_lock);
req->rq_replen = msg_len;
void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
{
- PTLRPC_RS_DEBUG_LRU_DEL(rs);
+ PTLRPC_RS_DEBUG_LRU_DEL(rs);
- LASSERT (cfs_atomic_read(&rs->rs_refcount) == 0);
- LASSERT (!rs->rs_difficult || rs->rs_handled);
- LASSERT (!rs->rs_on_net);
- LASSERT (!rs->rs_scheduled);
- LASSERT (rs->rs_export == NULL);
- LASSERT (rs->rs_nlocks == 0);
- LASSERT (cfs_list_empty(&rs->rs_exp_list));
- LASSERT (cfs_list_empty(&rs->rs_obd_list));
+ LASSERT(atomic_read(&rs->rs_refcount) == 0);
+ LASSERT(!rs->rs_difficult || rs->rs_handled);
+ LASSERT(!rs->rs_on_net);
+ LASSERT(!rs->rs_scheduled);
+ LASSERT(rs->rs_export == NULL);
+ LASSERT(rs->rs_nlocks == 0);
+ LASSERT(cfs_list_empty(&rs->rs_exp_list));
+ LASSERT(cfs_list_empty(&rs->rs_obd_list));
- sptlrpc_svc_free_rs(rs);
+ sptlrpc_svc_free_rs(rs);
}
EXPORT_SYMBOL(lustre_free_reply_state);
else if (req->rq_export && req->rq_export->exp_connection)
nid = req->rq_export->exp_connection->c_peer.nid;
- va_start(args, fmt);
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " req@%p x"LPU64"/t"LPD64"("LPD64") o%d->%s@%s:%d/%d"
- " lens %d/%d e %d to %d dl "CFS_TIME_T" ref %d "
- "fl "REQ_FLAGS_FMT"/%x/%x rc %d/%d\n",
- req, req->rq_xid, req->rq_transno,
- req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
- req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
- req->rq_import ?
- req->rq_import->imp_obd->obd_name :
- req->rq_export ?
- req->rq_export->exp_client_uuid.uuid :
- "<?>",
- libcfs_nid2str(nid),
- req->rq_request_portal, req->rq_reply_portal,
- req->rq_reqlen, req->rq_replen,
- req->rq_early_count, req->rq_timedout,
- req->rq_deadline,
- cfs_atomic_read(&req->rq_refcount),
- DEBUG_REQ_FLAGS(req),
- req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
- rep_ok ? lustre_msg_get_flags(req->rq_repmsg) : -1,
- req->rq_status,
- rep_ok ? lustre_msg_get_status(req->rq_repmsg) : -1);
+ va_start(args, fmt);
+ libcfs_debug_vmsg2(msgdata, fmt, args,
+ " req@%p x"LPU64"/t"LPD64"("LPD64") o%d->%s@%s:%d/%d"
+ " lens %d/%d e %d to %d dl "CFS_TIME_T" ref %d "
+ "fl "REQ_FLAGS_FMT"/%x/%x rc %d/%d\n",
+ req, req->rq_xid, req->rq_transno,
+ req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
+ req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
+ req->rq_import ?
+ req->rq_import->imp_obd->obd_name :
+ req->rq_export ?
+ req->rq_export->exp_client_uuid.uuid :
+ "<?>",
+ libcfs_nid2str(nid),
+ req->rq_request_portal, req->rq_reply_portal,
+ req->rq_reqlen, req->rq_replen,
+ req->rq_early_count, req->rq_timedout,
+ req->rq_deadline,
+ atomic_read(&req->rq_refcount),
+ DEBUG_REQ_FLAGS(req),
+ req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
+ rep_ok ? lustre_msg_get_flags(req->rq_repmsg) : -1,
+ req->rq_status,
+ rep_ok ? lustre_msg_get_status(req->rq_repmsg) : -1);
va_end(args);
}
EXPORT_SYMBOL(_debug_req);
pd->pd_this_ping = curtime;
mutex_unlock(&pinger_mutex);
- /* Might be empty, that's OK. */
- if (cfs_atomic_read(&set->set_remaining) == 0)
- CDEBUG(D_RPCTRACE, "nothing to ping\n");
+ /* Might be empty, that's OK. */
+ if (atomic_read(&set->set_remaining) == 0)
+ CDEBUG(D_RPCTRACE, "nothing to ping\n");
cfs_list_for_each(iter, &set->set_requests) {
struct ptlrpc_request *req =
spin_lock(&imp->imp_lock);
if (!cfs_list_empty(&req->rq_list)) {
cfs_list_del_init(&req->rq_list);
- cfs_atomic_dec(&imp->imp_inflight);
+ atomic_dec(&imp->imp_inflight);
}
spin_unlock(&imp->imp_lock);
- cfs_atomic_dec(&set->set_remaining);
+ atomic_dec(&set->set_remaining);
}
mutex_unlock(&pinger_mutex);
static inline int ll_rpc_recoverable_error(int rc)
{
- return (rc == -ENOTCONN || rc == -ENODEV);
+ return (rc == -ENOTCONN || rc == -ENODEV);
}
#if defined HAVE_SERVER_SUPPORT && defined(__KERNEL__)
static inline void ptlrpc_reqset_put(struct ptlrpc_request_set *set)
{
- if (cfs_atomic_dec_and_test(&set->set_refcount))
- OBD_FREE_PTR(set);
+ if (atomic_dec_and_test(&set->set_refcount))
+ OBD_FREE_PTR(set);
}
#endif /* PTLRPC_INTERNAL_H */
cfs_list_entry(pos, struct ptlrpc_request,
rq_set_chain);
- LASSERT(req->rq_phase == RQ_PHASE_NEW);
+ LASSERT(req->rq_phase == RQ_PHASE_NEW);
#ifdef __KERNEL__
- req->rq_set = new;
- req->rq_queued_time = cfs_time_current();
+ req->rq_set = new;
+ req->rq_queued_time = cfs_time_current();
#else
- cfs_list_del_init(&req->rq_set_chain);
- req->rq_set = NULL;
- ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- cfs_atomic_dec(&set->set_remaining);
+ cfs_list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ atomic_dec(&set->set_remaining);
#endif
- }
+ }
#ifdef __KERNEL__
spin_lock(&new->set_new_req_lock);
cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
- i = cfs_atomic_read(&set->set_remaining);
- count = cfs_atomic_add_return(i, &new->set_new_count);
- cfs_atomic_set(&set->set_remaining, 0);
+ i = atomic_read(&set->set_remaining);
+ count = atomic_add_return(i, &new->set_new_count);
+ atomic_set(&set->set_remaining, 0);
spin_unlock(&new->set_new_req_lock);
if (count == i) {
wake_up(&new->set_waitq);
}
cfs_list_splice_init(&src->set_new_requests,
&des->set_requests);
- rc = cfs_atomic_read(&src->set_new_count);
- cfs_atomic_add(rc, &des->set_remaining);
- cfs_atomic_set(&src->set_new_count, 0);
- }
+ rc = atomic_read(&src->set_new_count);
+ atomic_add(rc, &des->set_remaining);
+ atomic_set(&src->set_new_count, 0);
+ }
spin_unlock(&src->set_new_req_lock);
return rc;
}
*/
void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
{
- struct ptlrpcd_ctl *pc;
+ struct ptlrpcd_ctl *pc;
if (req->rq_reqmsg)
lustre_msg_set_jobid(req->rq_reqmsg, NULL);
spin_lock(&req->rq_lock);
- if (req->rq_invalid_rqset) {
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
- back_to_sleep, NULL);
+ if (req->rq_invalid_rqset) {
+ struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
+ back_to_sleep, NULL);
- req->rq_invalid_rqset = 0;
+ req->rq_invalid_rqset = 0;
spin_unlock(&req->rq_lock);
- l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
- } else if (req->rq_set) {
- /* If we have a vaid "rq_set", just reuse it to avoid double
- * linked. */
- LASSERT(req->rq_phase == RQ_PHASE_NEW);
- LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
-
- /* ptlrpc_check_set will decrease the count */
- cfs_atomic_inc(&req->rq_set->set_remaining);
+ l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
+ } else if (req->rq_set) {
+ /* If we have a vaid "rq_set", just reuse it to avoid double
+ * linked. */
+ LASSERT(req->rq_phase == RQ_PHASE_NEW);
+ LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
+
+ /* ptlrpc_check_set will decrease the count */
+ atomic_inc(&req->rq_set->set_remaining);
spin_unlock(&req->rq_lock);
wake_up(&req->rq_set->set_waitq);
return;
} else {
spin_unlock(&req->rq_lock);
- }
+ }
- pc = ptlrpcd_select_pc(req, policy, idx);
+ pc = ptlrpcd_select_pc(req, policy, idx);
- DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
- req, pc->pc_name, pc->pc_index);
+ DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
+ req, pc->pc_name, pc->pc_index);
- ptlrpc_set_add_new_req(pc, req);
+ ptlrpc_set_add_new_req(pc, req);
}
EXPORT_SYMBOL(ptlrpcd_add_req);
static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
{
- cfs_atomic_inc(&set->set_refcount);
+ atomic_inc(&set->set_refcount);
}
/**
int rc2;
ENTRY;
- if (cfs_atomic_read(&set->set_new_count)) {
+ if (atomic_read(&set->set_new_count)) {
spin_lock(&set->set_new_req_lock);
- if (likely(!cfs_list_empty(&set->set_new_requests))) {
- cfs_list_splice_init(&set->set_new_requests,
- &set->set_requests);
- cfs_atomic_add(cfs_atomic_read(&set->set_new_count),
- &set->set_remaining);
- cfs_atomic_set(&set->set_new_count, 0);
- /*
- * Need to calculate its timeout.
- */
- rc = 1;
- }
+ if (likely(!cfs_list_empty(&set->set_new_requests))) {
+ cfs_list_splice_init(&set->set_new_requests,
+ &set->set_requests);
+ atomic_add(atomic_read(&set->set_new_count),
+ &set->set_remaining);
+ atomic_set(&set->set_new_count, 0);
+ /*
+ * Need to calculate its timeout.
+ */
+ rc = 1;
+ }
spin_unlock(&set->set_new_req_lock);
- }
+ }
- /* We should call lu_env_refill() before handling new requests to make
- * sure that env key the requests depending on really exists.
- */
- rc2 = lu_env_refill(env);
- if (rc2 != 0) {
- /*
- * XXX This is very awkward situation, because
- * execution can neither continue (request
- * interpreters assume that env is set up), nor repeat
- * the loop (as this potentially results in a tight
- * loop of -ENOMEM's).
- *
- * Fortunately, refill only ever does something when
- * new modules are loaded, i.e., early during boot up.
- */
- CERROR("Failure to refill session: %d\n", rc2);
- RETURN(rc);
- }
+ /* We should call lu_env_refill() before handling new requests to make
+ * sure that env key the requests depending on really exists.
+ */
+ rc2 = lu_env_refill(env);
+ if (rc2 != 0) {
+ /*
+ * XXX This is very awkward situation, because
+ * execution can neither continue (request
+ * interpreters assume that env is set up), nor repeat
+ * the loop (as this potentially results in a tight
+ * loop of -ENOMEM's).
+ *
+ * Fortunately, refill only ever does something when
+ * new modules are loaded, i.e., early during boot up.
+ */
+ CERROR("Failure to refill session: %d\n", rc2);
+ RETURN(rc);
+ }
- if (cfs_atomic_read(&set->set_remaining))
- rc |= ptlrpc_check_set(env, set);
+ if (atomic_read(&set->set_remaining))
+ rc |= ptlrpc_check_set(env, set);
if (!cfs_list_empty(&set->set_requests)) {
/*
}
}
- if (rc == 0) {
- /*
- * If new requests have been added, make sure to wake up.
- */
- rc = cfs_atomic_read(&set->set_new_count);
+ if (rc == 0) {
+ /*
+ * If new requests have been added, make sure to wake up.
+ */
+ rc = atomic_read(&set->set_new_count);
#ifdef __KERNEL__
/* If we have nothing to do, check whether we can take some
ptlrpc_reqset_get(ps);
spin_unlock(&partner->pc_lock);
- if (cfs_atomic_read(&ps->set_new_count)) {
- rc = ptlrpcd_steal_rqset(set, ps);
- if (rc > 0)
- CDEBUG(D_RPCTRACE, "transfer %d"
- " async RPCs [%d->%d]\n",
- rc, partner->pc_index,
- pc->pc_index);
- }
- ptlrpc_reqset_put(ps);
- } while (rc == 0 && pc->pc_cursor != first);
- }
+ if (atomic_read(&ps->set_new_count)) {
+ rc = ptlrpcd_steal_rqset(set, ps);
+ if (rc > 0)
+ CDEBUG(D_RPCTRACE, "transfer %d"
+ " async RPCs [%d->%d]\n",
+ rc, partner->pc_index,
+ pc->pc_index);
+ }
+ ptlrpc_reqset_put(ps);
+ } while (rc == 0 && pc->pc_cursor != first);
+ }
#endif
- }
+ }
- RETURN(rc);
+ RETURN(rc);
}
#ifdef __KERNEL__
int ptlrpcd_idle(void *arg)
{
- struct ptlrpcd_ctl *pc = arg;
+ struct ptlrpcd_ctl *pc = arg;
- return (cfs_atomic_read(&pc->pc_set->set_new_count) == 0 &&
- cfs_atomic_read(&pc->pc_set->set_remaining) == 0);
+ return (atomic_read(&pc->pc_set->set_new_count) == 0 &&
+ atomic_read(&pc->pc_set->set_remaining) == 0);
}
#endif
spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
- cfs_atomic_read(&imp->imp_inval_count))
+ atomic_read(&imp->imp_inval_count))
rc = -EINVAL;
spin_unlock(&imp->imp_lock);
if (rc)
struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
{
static DEFINE_MUTEX(load_mutex);
- static cfs_atomic_t loaded = CFS_ATOMIC_INIT(0);
- struct ptlrpc_sec_policy *policy;
- __u16 number = SPTLRPC_FLVR_POLICY(flavor);
- __u16 flag = 0;
+ static atomic_t loaded = ATOMIC_INIT(0);
+ struct ptlrpc_sec_policy *policy;
+ __u16 number = SPTLRPC_FLVR_POLICY(flavor);
+ __u16 flag = 0;
- if (number >= SPTLRPC_POLICY_MAX)
- return NULL;
+ if (number >= SPTLRPC_POLICY_MAX)
+ return NULL;
- while (1) {
+ while (1) {
read_lock(&policy_lock);
policy = policies[number];
if (policy && !try_module_get(policy->sp_owner))
policy = NULL;
if (policy == NULL)
- flag = cfs_atomic_read(&loaded);
+ flag = atomic_read(&loaded);
read_unlock(&policy_lock);
- if (policy != NULL || flag != 0 ||
- number != SPTLRPC_POLICY_GSS)
- break;
+ if (policy != NULL || flag != 0 ||
+ number != SPTLRPC_POLICY_GSS)
+ break;
- /* try to load gss module, once */
+ /* try to load gss module, once */
mutex_lock(&load_mutex);
- if (cfs_atomic_read(&loaded) == 0) {
+ if (atomic_read(&loaded) == 0) {
if (request_module("ptlrpc_gss") == 0)
CDEBUG(D_SEC,
"module ptlrpc_gss loaded on demand\n");
- else
- CERROR("Unable to load module ptlrpc_gss\n");
+ else
+ CERROR("Unable to load module ptlrpc_gss\n");
- cfs_atomic_set(&loaded, 1);
- }
+ atomic_set(&loaded, 1);
+ }
mutex_unlock(&load_mutex);
- }
+ }
- return policy;
+ return policy;
}
__u32 sptlrpc_name2flavor_base(const char *name)
struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
{
- cfs_atomic_inc(&ctx->cc_refcount);
- return ctx;
+ atomic_inc(&ctx->cc_refcount);
+ return ctx;
}
EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
{
- struct ptlrpc_sec *sec = ctx->cc_sec;
+ struct ptlrpc_sec *sec = ctx->cc_sec;
- LASSERT(sec);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
+ LASSERT(sec);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
- if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
- return;
+ if (!atomic_dec_and_test(&ctx->cc_refcount))
+ return;
- sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
+ sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
}
EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
RETURN(-ENOMEM);
spin_lock_init(&req->rq_lock);
- cfs_atomic_set(&req->rq_refcount, 10000);
+ atomic_set(&req->rq_refcount, 10000);
CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
init_waitqueue_head(&req->rq_reply_waitq);
init_waitqueue_head(&req->rq_set_waitq);
/*
* "fixed" sec (e.g. null) use sec_id < 0
*/
-static cfs_atomic_t sptlrpc_sec_id = CFS_ATOMIC_INIT(1);
+static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
int sptlrpc_get_next_secid(void)
{
- return cfs_atomic_inc_return(&sptlrpc_sec_id);
+ return atomic_inc_return(&sptlrpc_sec_id);
}
EXPORT_SYMBOL(sptlrpc_get_next_secid);
struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
{
- if (sec)
- cfs_atomic_inc(&sec->ps_refcount);
+ if (sec)
+ atomic_inc(&sec->ps_refcount);
- return sec;
+ return sec;
}
EXPORT_SYMBOL(sptlrpc_sec_get);
void sptlrpc_sec_put(struct ptlrpc_sec *sec)
{
- if (sec) {
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
+ if (sec) {
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
- if (cfs_atomic_dec_and_test(&sec->ps_refcount)) {
- sptlrpc_gc_del_sec(sec);
- sec_cop_destroy_sec(sec);
- }
- }
+ if (atomic_dec_and_test(&sec->ps_refcount)) {
+ sptlrpc_gc_del_sec(sec);
+ sec_cop_destroy_sec(sec);
+ }
+ }
}
EXPORT_SYMBOL(sptlrpc_sec_put);
}
}
- sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
- if (sec) {
- cfs_atomic_inc(&sec->ps_refcount);
+ sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
+ if (sec) {
+ atomic_inc(&sec->ps_refcount);
- sec->ps_part = sp;
+ sec->ps_part = sp;
- if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
- sptlrpc_gc_add_sec(sec);
- } else {
- sptlrpc_policy_put(policy);
- }
+ if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
+ sptlrpc_gc_add_sec(sec);
+ } else {
+ sptlrpc_policy_put(policy);
+ }
- RETURN(sec);
+ RETURN(sec);
}
struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
{
- struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
+ struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
- if (ctx != NULL)
- cfs_atomic_inc(&ctx->sc_refcount);
+ if (ctx != NULL)
+ atomic_inc(&ctx->sc_refcount);
}
void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
{
- struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
+ struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
- if (ctx == NULL)
- return;
+ if (ctx == NULL)
+ return;
- LASSERT_ATOMIC_POS(&ctx->sc_refcount);
- if (cfs_atomic_dec_and_test(&ctx->sc_refcount)) {
- if (ctx->sc_policy->sp_sops->free_ctx)
- ctx->sc_policy->sp_sops->free_ctx(ctx);
- }
- req->rq_svc_ctx = NULL;
+ LASSERT_ATOMIC_POS(&ctx->sc_refcount);
+ if (atomic_dec_and_test(&ctx->sc_refcount)) {
+ if (ctx->sc_policy->sp_sops->free_ctx)
+ ctx->sc_policy->sp_sops->free_ctx(ctx);
+ }
+ req->rq_svc_ctx = NULL;
}
void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
static spinlock_t sec_gc_ctx_list_lock;
static struct ptlrpc_thread sec_gc_thread;
-static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0);
+static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
might_sleep();
/* signal before list_del to make iteration in gc thread safe */
- cfs_atomic_inc(&sec_gc_wait_del);
+ atomic_inc(&sec_gc_wait_del);
spin_lock(&sec_gc_list_lock);
cfs_list_del_init(&sec->ps_gc_list);
mutex_lock(&sec_gc_mutex);
mutex_unlock(&sec_gc_mutex);
- cfs_atomic_dec(&sec_gc_wait_del);
+ atomic_dec(&sec_gc_wait_del);
CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
}
spin_unlock(&sec_gc_ctx_list_lock);
LASSERT(ctx->cc_sec);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 1);
CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
sptlrpc_cli_ctx_put(ctx, 1);
cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
/* if someone is waiting to be deleted, let it
* proceed as soon as possible. */
- if (cfs_atomic_read(&sec_gc_wait_del)) {
+ if (atomic_read(&sec_gc_wait_del)) {
CDEBUG(D_SEC, "deletion pending, start over\n");
mutex_unlock(&sec_gc_mutex);
goto again;
sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str));
- seq_printf(seq, "rpc flavor: %s\n",
- sptlrpc_flavor2name_base(sec->ps_flvr.sf_rpc));
- seq_printf(seq, "bulk flavor: %s\n",
- sptlrpc_flavor2name_bulk(&sec->ps_flvr, str, sizeof(str)));
- seq_printf(seq, "flags: %s\n",
- sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)));
- seq_printf(seq, "id: %d\n", sec->ps_id);
- seq_printf(seq, "refcount: %d\n",
- cfs_atomic_read(&sec->ps_refcount));
- seq_printf(seq, "nctx: %d\n", cfs_atomic_read(&sec->ps_nctx));
- seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval);
- seq_printf(seq, "gc next %ld\n",
- sec->ps_gc_interval ?
- sec->ps_gc_next - cfs_time_current_sec() : 0);
-
- sptlrpc_sec_put(sec);
+ seq_printf(seq, "rpc flavor: %s\n",
+ sptlrpc_flavor2name_base(sec->ps_flvr.sf_rpc));
+ seq_printf(seq, "bulk flavor: %s\n",
+ sptlrpc_flavor2name_bulk(&sec->ps_flvr, str, sizeof(str)));
+ seq_printf(seq, "flags: %s\n",
+ sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)));
+ seq_printf(seq, "id: %d\n", sec->ps_id);
+ seq_printf(seq, "refcount: %d\n",
+ atomic_read(&sec->ps_refcount));
+ seq_printf(seq, "nctx: %d\n", atomic_read(&sec->ps_nctx));
+ seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval);
+ seq_printf(seq, "gc next %ld\n",
+ sec->ps_gc_interval ?
+ sec->ps_gc_next - cfs_time_current_sec() : 0);
+
+ sptlrpc_sec_put(sec);
out:
return 0;
}
struct vfs_cred *vcred,
int create, int remove_dead)
{
- cfs_atomic_inc(&null_cli_ctx.cc_refcount);
- return &null_cli_ctx;
+ atomic_inc(&null_cli_ctx.cc_refcount);
+ return &null_cli_ctx;
}
static
}
static struct ptlrpc_svc_ctx null_svc_ctx = {
- .sc_refcount = CFS_ATOMIC_INIT(1),
+ .sc_refcount = ATOMIC_INIT(1),
.sc_policy = &null_policy,
};
static
int null_accept(struct ptlrpc_request *req)
{
- LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
- SPTLRPC_POLICY_NULL);
+ LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
+ SPTLRPC_POLICY_NULL);
- if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) {
- CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc);
- return SECSVC_DROP;
- }
+ if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) {
+ CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc);
+ return SECSVC_DROP;
+ }
- req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf);
+ req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf);
- req->rq_reqmsg = req->rq_reqbuf;
- req->rq_reqlen = req->rq_reqdata_len;
+ req->rq_reqmsg = req->rq_reqbuf;
+ req->rq_reqlen = req->rq_reqdata_len;
- req->rq_svc_ctx = &null_svc_ctx;
- cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ req->rq_svc_ctx = &null_svc_ctx;
+ atomic_inc(&req->rq_svc_ctx->sc_refcount);
- return SECSVC_OK;
+ return SECSVC_OK;
}
static
rs->rs_size = rs_size;
}
- rs->rs_svc_ctx = req->rq_svc_ctx;
- cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ rs->rs_svc_ctx = req->rq_svc_ctx;
+ atomic_inc(&req->rq_svc_ctx->sc_refcount);
- rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
- rs->rs_repbuf_len = rs_size - sizeof(*rs);
- rs->rs_msg = rs->rs_repbuf;
+ rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+ rs->rs_repbuf_len = rs_size - sizeof(*rs);
+ rs->rs_msg = rs->rs_repbuf;
- req->rq_reply_state = rs;
- return 0;
+ req->rq_reply_state = rs;
+ return 0;
}
static
void null_free_rs(struct ptlrpc_reply_state *rs)
{
- LASSERT_ATOMIC_GT(&rs->rs_svc_ctx->sc_refcount, 1);
- cfs_atomic_dec(&rs->rs_svc_ctx->sc_refcount);
+ LASSERT_ATOMIC_GT(&rs->rs_svc_ctx->sc_refcount, 1);
+ atomic_dec(&rs->rs_svc_ctx->sc_refcount);
- if (!rs->rs_prealloc)
- OBD_FREE_LARGE(rs, rs->rs_size);
+ if (!rs->rs_prealloc)
+ OBD_FREE_LARGE(rs, rs->rs_size);
}
static
static void null_init_internal(void)
{
- static CFS_HLIST_HEAD(__list);
-
- null_sec.ps_policy = &null_policy;
- cfs_atomic_set(&null_sec.ps_refcount, 1); /* always busy */
- null_sec.ps_id = -1;
- null_sec.ps_import = NULL;
- null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
- null_sec.ps_flvr.sf_flags = 0;
- null_sec.ps_part = LUSTRE_SP_ANY;
- null_sec.ps_dying = 0;
+ static CFS_HLIST_HEAD(__list);
+
+ null_sec.ps_policy = &null_policy;
+ atomic_set(&null_sec.ps_refcount, 1); /* always busy */
+ null_sec.ps_id = -1;
+ null_sec.ps_import = NULL;
+ null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
+ null_sec.ps_flvr.sf_flags = 0;
+ null_sec.ps_part = LUSTRE_SP_ANY;
+ null_sec.ps_dying = 0;
spin_lock_init(&null_sec.ps_lock);
- cfs_atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
- CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
- null_sec.ps_gc_interval = 0;
- null_sec.ps_gc_next = 0;
-
- cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list);
- cfs_atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */
- null_cli_ctx.cc_sec = &null_sec;
- null_cli_ctx.cc_ops = &null_ctx_ops;
- null_cli_ctx.cc_expire = 0;
- null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
- PTLRPC_CTX_UPTODATE;
- null_cli_ctx.cc_vcred.vc_uid = 0;
+ atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
+ CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
+ null_sec.ps_gc_interval = 0;
+ null_sec.ps_gc_next = 0;
+
+ cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list);
+ atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */
+ null_cli_ctx.cc_sec = &null_sec;
+ null_cli_ctx.cc_ops = &null_ctx_ops;
+ null_cli_ctx.cc_expire = 0;
+ null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
+ PTLRPC_CTX_UPTODATE;
+ null_cli_ctx.cc_vcred.vc_uid = 0;
spin_lock_init(&null_cli_ctx.cc_lock);
CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
write_lock(&plsec->pls_lock);
- ctx = plsec->pls_ctx;
- if (ctx) {
- cfs_atomic_inc(&ctx->cc_refcount);
-
- if (ctx_new)
- OBD_FREE_PTR(ctx_new);
- } else if (ctx_new) {
- ctx = ctx_new;
-
- cfs_atomic_set(&ctx->cc_refcount, 1); /* for cache */
- ctx->cc_sec = &plsec->pls_base;
- ctx->cc_ops = &plain_ctx_ops;
- ctx->cc_expire = 0;
- ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
- ctx->cc_vcred.vc_uid = 0;
+ ctx = plsec->pls_ctx;
+ if (ctx) {
+ atomic_inc(&ctx->cc_refcount);
+
+ if (ctx_new)
+ OBD_FREE_PTR(ctx_new);
+ } else if (ctx_new) {
+ ctx = ctx_new;
+
+ atomic_set(&ctx->cc_refcount, 1); /* for cache */
+ ctx->cc_sec = &plsec->pls_base;
+ ctx->cc_ops = &plain_ctx_ops;
+ ctx->cc_expire = 0;
+ ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
+ ctx->cc_vcred.vc_uid = 0;
spin_lock_init(&ctx->cc_lock);
- CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
- CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
+ CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
+ CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
- plsec->pls_ctx = ctx;
- cfs_atomic_inc(&plsec->pls_base.ps_nctx);
- cfs_atomic_inc(&plsec->pls_base.ps_refcount);
+ plsec->pls_ctx = ctx;
+ atomic_inc(&plsec->pls_base.ps_nctx);
+ atomic_inc(&plsec->pls_base.ps_refcount);
- cfs_atomic_inc(&ctx->cc_refcount); /* for caller */
- }
+ atomic_inc(&ctx->cc_refcount); /* for caller */
+ }
write_unlock(&plsec->pls_lock);
static
void plain_destroy_sec(struct ptlrpc_sec *sec)
{
- struct plain_sec *plsec = sec2plsec(sec);
- ENTRY;
+ struct plain_sec *plsec = sec2plsec(sec);
+ ENTRY;
- LASSERT(sec->ps_policy == &plain_policy);
- LASSERT(sec->ps_import);
- LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
- LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
- LASSERT(plsec->pls_ctx == NULL);
+ LASSERT(sec->ps_policy == &plain_policy);
+ LASSERT(sec->ps_import);
+ LASSERT(atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(plsec->pls_ctx == NULL);
- class_import_put(sec->ps_import);
+ class_import_put(sec->ps_import);
- OBD_FREE_PTR(plsec);
- EXIT;
+ OBD_FREE_PTR(plsec);
+ EXIT;
}
static
sec = &plsec->pls_base;
sec->ps_policy = &plain_policy;
- cfs_atomic_set(&sec->ps_refcount, 0);
- cfs_atomic_set(&sec->ps_nctx, 0);
+ atomic_set(&sec->ps_refcount, 0);
+ atomic_set(&sec->ps_nctx, 0);
sec->ps_id = sptlrpc_get_next_secid();
sec->ps_import = class_import_get(imp);
sec->ps_flvr = *sf;
read_lock(&plsec->pls_lock);
ctx = plsec->pls_ctx;
if (ctx)
- cfs_atomic_inc(&ctx->cc_refcount);
+ atomic_inc(&ctx->cc_refcount);
read_unlock(&plsec->pls_lock);
if (unlikely(ctx == NULL))
void plain_release_ctx(struct ptlrpc_sec *sec,
struct ptlrpc_cli_ctx *ctx, int sync)
{
- LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
- LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(ctx->cc_sec == sec);
+ LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(ctx->cc_sec == sec);
- OBD_FREE_PTR(ctx);
+ OBD_FREE_PTR(ctx);
- cfs_atomic_dec(&sec->ps_nctx);
- sptlrpc_sec_put(sec);
+ atomic_dec(&sec->ps_nctx);
+ sptlrpc_sec_put(sec);
}
static
****************************************/
static struct ptlrpc_svc_ctx plain_svc_ctx = {
- .sc_refcount = CFS_ATOMIC_INIT(1),
+ .sc_refcount = ATOMIC_INIT(1),
.sc_policy = &plain_policy,
};
req->rq_pack_bulk = 1;
}
- req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
- req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
+ req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
+ req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
- req->rq_svc_ctx = &plain_svc_ctx;
- cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ req->rq_svc_ctx = &plain_svc_ctx;
+ atomic_inc(&req->rq_svc_ctx->sc_refcount);
- RETURN(SECSVC_OK);
+ RETURN(SECSVC_OK);
}
static
rs->rs_size = rs_size;
}
- rs->rs_svc_ctx = req->rq_svc_ctx;
- cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
- rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
- rs->rs_repbuf_len = rs_size - sizeof(*rs);
+ rs->rs_svc_ctx = req->rq_svc_ctx;
+ atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+ rs->rs_repbuf_len = rs_size - sizeof(*rs);
- lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
- rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
+ lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
+ rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
- req->rq_reply_state = rs;
- RETURN(0);
+ req->rq_reply_state = rs;
+ RETURN(0);
}
static
void plain_free_rs(struct ptlrpc_reply_state *rs)
{
- ENTRY;
+ ENTRY;
- LASSERT(cfs_atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
- cfs_atomic_dec(&rs->rs_svc_ctx->sc_refcount);
+ LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
+ atomic_dec(&rs->rs_svc_ctx->sc_refcount);
- if (!rs->rs_prealloc)
- OBD_FREE_LARGE(rs, rs->rs_size);
- EXIT;
+ if (!rs->rs_prealloc)
+ OBD_FREE_LARGE(rs, rs->rs_size);
+ EXIT;
}
static
struct ptlrpc_hr_partition {
/* # of started threads */
- cfs_atomic_t hrp_nstarted;
+ atomic_t hrp_nstarted;
/* # of stopped threads */
- cfs_atomic_t hrp_nstopped;
+ atomic_t hrp_nstopped;
/* cpu partition id */
int hrp_cpt;
/* round-robin rotor for choosing thread */
#endif
CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
init_waitqueue_head(&svcpt->scp_rep_waitq);
- cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
+ atomic_set(&svcpt->scp_nreps_difficult, 0);
/* adaptive timeout */
spin_lock_init(&svcpt->scp_at_lock);
*/
static void ptlrpc_server_free_request(struct ptlrpc_request *req)
{
- LASSERT(cfs_atomic_read(&req->rq_refcount) == 0);
- LASSERT(cfs_list_empty(&req->rq_timed_list));
+ LASSERT(atomic_read(&req->rq_refcount) == 0);
+ LASSERT(cfs_list_empty(&req->rq_timed_list));
- /* DEBUG_REQ() assumes the reply state of a request with a valid
- * ref will not be destroyed until that reference is dropped. */
- ptlrpc_req_drop_rs(req);
+ /* DEBUG_REQ() assumes the reply state of a request with a valid
+ * ref will not be destroyed until that reference is dropped. */
+ ptlrpc_req_drop_rs(req);
- sptlrpc_svc_ctx_decref(req);
+ sptlrpc_svc_ctx_decref(req);
- if (req != &req->rq_rqbd->rqbd_req) {
+ if (req != &req->rq_rqbd->rqbd_req) {
/* NB request buffers use an embedded
* req if the incoming req unlinked the
* MD; this isn't one of them! */
cfs_list_t *tmp;
cfs_list_t *nxt;
- if (!cfs_atomic_dec_and_test(&req->rq_refcount))
+ if (!atomic_dec_and_test(&req->rq_refcount))
return;
if (req->rq_session.lc_state == LCS_ENTERED) {
* now all reqs including the embedded req has been
* disposed, schedule request buffer for re-use.
*/
- LASSERT(cfs_atomic_read(&rqbd->rqbd_req.rq_refcount) ==
+ LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) ==
0);
cfs_list_add_tail(&rqbd->rqbd_list,
&svcpt->scp_rqbd_idle);
reqcopy->rq_reqmsg = reqmsg;
memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
- LASSERT(cfs_atomic_read(&req->rq_refcount));
- /** if it is last refcount then early reply isn't needed */
- if (cfs_atomic_read(&req->rq_refcount) == 1) {
- DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
- "abort sending early reply\n");
- GOTO(out, rc = -EINVAL);
- }
+ LASSERT(atomic_read(&req->rq_refcount));
+ /** if it is last refcount then early reply isn't needed */
+ if (atomic_read(&req->rq_refcount) == 1) {
+ DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
+ "abort sending early reply\n");
+ GOTO(out, rc = -EINVAL);
+ }
/* Connection ref */
reqcopy->rq_export = class_conn2export(
* refcount to 0 already. Let's check this and
* don't add entry to work_list
*/
- if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount)))
+ if (likely(atomic_inc_not_zero(&rq->rq_refcount)))
cfs_list_add(&rq->rq_timed_list, &work_list);
counter++;
- }
+ }
- if (++index >= array->paa_size)
- index = 0;
- }
- array->paa_deadline = deadline;
+ if (++index >= array->paa_size)
+ index = 0;
+ }
+ array->paa_deadline = deadline;
/* we have a new earliest deadline, restart the timer */
ptlrpc_at_set_timer(svcpt);
struct ptlrpc_request *tmp = NULL;
if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ||
- (cfs_atomic_read(&req->rq_export->exp_rpc_count) == 0))
+ (atomic_read(&req->rq_export->exp_rpc_count) == 0))
return 0;
/* bulk request are aborted upon reconnect, don't try to
spin_lock(&svcpt->scp_req_lock);
#ifndef __KERNEL__
/* !@%$# liblustre only has 1 thread */
- if (cfs_atomic_read(&svcpt->scp_nreps_difficult) != 0) {
+ if (atomic_read(&svcpt->scp_nreps_difficult) != 0) {
spin_unlock(&svcpt->scp_req_lock);
RETURN(NULL);
}
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
- cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
+ atomic_read(&request->rq_export->exp_refcount) : -99),
lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
libcfs_id2str(request->rq_peer),
lustre_msg_get_opc(request->rq_reqmsg));
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
- cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
+ atomic_read(&request->rq_export->exp_refcount) : -99),
lustre_msg_get_status(request->rq_reqmsg),
request->rq_xid,
libcfs_id2str(request->rq_peer),
/* Off the net */
spin_unlock(&rs->rs_lock);
- class_export_put (exp);
- rs->rs_export = NULL;
- ptlrpc_rs_decref (rs);
- if (cfs_atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
+ class_export_put (exp);
+ rs->rs_export = NULL;
+ ptlrpc_rs_decref(rs);
+ if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
svc->srv_is_stopping)
wake_up_all(&svcpt->scp_waitq);
RETURN(1);
threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc);
}
- cfs_atomic_inc(&hrp->hrp_nstarted);
+ atomic_inc(&hrp->hrp_nstarted);
wake_up(&ptlrpc_hr.hr_waitq);
while (!ptlrpc_hr.hr_stopping) {
}
}
- cfs_atomic_inc(&hrp->hrp_nstopped);
+ atomic_inc(&hrp->hrp_nstopped);
wake_up(&ptlrpc_hr.hr_waitq);
return 0;
if (hrp->hrp_thrs == NULL)
continue; /* uninitialized */
wait_event(ptlrpc_hr.hr_waitq,
- cfs_atomic_read(&hrp->hrp_nstopped) ==
- cfs_atomic_read(&hrp->hrp_nstarted));
+ atomic_read(&hrp->hrp_nstopped) ==
+ atomic_read(&hrp->hrp_nstarted));
}
}
break;
}
wait_event(ptlrpc_hr.hr_waitq,
- cfs_atomic_read(&hrp->hrp_nstarted) == j);
+ atomic_read(&hrp->hrp_nstarted) == j);
if (!IS_ERR_VALUE(rc))
continue;
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
hrp->hrp_cpt = i;
- cfs_atomic_set(&hrp->hrp_nstarted, 0);
- cfs_atomic_set(&hrp->hrp_nstopped, 0);
+ atomic_set(&hrp->hrp_nstarted, 0);
+ atomic_set(&hrp->hrp_nstopped, 0);
hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
hrp->hrp_nthrs /= cfs_cpu_ht_nsiblings(0);
NULL, NULL);
rc = l_wait_event(svcpt->scp_waitq,
- cfs_atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
+ atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
if (rc == 0)
break;
CWARN("Unexpectedly long timeout %s %p\n",
struct lquota_site *lqe_site;
/* reference counter */
- cfs_atomic_t lqe_ref;
+ atomic_t lqe_ref;
/* linked to list of lqes which:
* - need quota space adjustment on slave
static inline void lqe_getref(struct lquota_entry *lqe)
{
LASSERT(lqe != NULL);
- cfs_atomic_inc(&lqe->lqe_ref);
+ atomic_inc(&lqe->lqe_ref);
}
static inline void lqe_putref(struct lquota_entry *lqe)
__u32 qpi_key;
/* track users of this pool instance */
- cfs_atomic_t qpi_ref;
+ atomic_t qpi_ref;
/* back pointer to master target
* immutable after creation. */
*/
static inline void qpi_getref(struct qmt_pool_info *pool)
{
- cfs_atomic_inc(&pool->qpi_ref);
+ atomic_inc(&pool->qpi_ref);
}
static inline void qpi_putref(const struct lu_env *env,
struct qmt_pool_info *pool)
{
LASSERT(atomic_read(&pool->qpi_ref) > 0);
- if (cfs_atomic_dec_and_test(&pool->qpi_ref))
+ if (atomic_dec_and_test(&pool->qpi_ref))
qmt_pool_free(env, pool);
}
static inline void qpi_putref_locked(struct qmt_pool_info *pool)
{
- LASSERT(cfs_atomic_read(&pool->qpi_ref) > 1);
- cfs_atomic_dec(&pool->qpi_ref);
+ LASSERT(atomic_read(&pool->qpi_ref) > 1);
+ atomic_dec(&pool->qpi_ref);
}
/*
" least qunit: %lu\n",
pool->qpi_key & 0x0000ffff,
RES_NAME(pool->qpi_key >> 16),
- cfs_atomic_read(&pool->qpi_ref),
+ atomic_read(&pool->qpi_ref),
pool->qpi_least_qunit);
for (type = 0; type < MAXQUOTAS; type++)
" #lqe: %d\n",
QTYPE_NAME(type),
pool->qpi_slv_nr[type],
- cfs_atomic_read(&pool->qpi_site[type]->lqs_hash->hs_count));
+ atomic_read(&pool->qpi_site[type]->lqs_hash->hs_count));
return 0;
}
*/
struct qsd_qtype_info {
/* reference count incremented by each user of this structure */
- cfs_atomic_t qqi_ref;
+ atomic_t qqi_ref;
/* quota type, either USRQUOTA or GRPQUOTA
* immutable after creation. */
/* qqi_getref/putref is used to track users of a qqi structure */
static inline void qqi_getref(struct qsd_qtype_info *qqi)
{
- cfs_atomic_inc(&qqi->qqi_ref);
+ atomic_inc(&qqi->qqi_ref);
}
static inline void qqi_putref(struct qsd_qtype_info *qqi)
{
- LASSERT(cfs_atomic_read(&qqi->qqi_ref) > 0);
- cfs_atomic_dec(&qqi->qqi_ref);
+ LASSERT(atomic_read(&qqi->qqi_ref) > 0);
+ atomic_dec(&qqi->qqi_ref);
}
#define QSD_RES_TYPE(qsd) ((qsd)->qsd_is_md ? LQUOTA_RES_MD : LQUOTA_RES_DT)
* qsd_glb_blocking_ast() might haven't been called yet when we
* get here.
*/
- while (cfs_atomic_read(&qqi->qqi_ref) > 1) {
+ while (atomic_read(&qqi->qqi_ref) > 1) {
CDEBUG(D_QUOTA, "qqi reference count %u, repeat: %d\n",
- cfs_atomic_read(&qqi->qqi_ref), repeat);
+ atomic_read(&qqi->qqi_ref), repeat);
repeat++;
schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
cfs_time_seconds(1));
}
/* by now, all qqi users should have gone away */
- LASSERT(cfs_atomic_read(&qqi->qqi_ref) == 1);
+ LASSERT(atomic_read(&qqi->qqi_ref) == 1);
lu_ref_fini(&qqi->qqi_reference);
/* release accounting object */
if (qqi == NULL)
RETURN(-ENOMEM);
qsd->qsd_type_array[qtype] = qqi;
- cfs_atomic_set(&qqi->qqi_ref, 1); /* referenced from qsd */
+ atomic_set(&qqi->qqi_ref, 1); /* referenced from qsd */
/* set backpointer and other parameters */
qqi->qqi_qsd = qsd;