*/
void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
{
- cfs_list_t *l, *tmp;
+ struct list_head *l, *tmp;
struct ptlrpc_request *req;
LASSERT(pool != NULL);
spin_lock(&pool->prp_lock);
- cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
- req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
- cfs_list_del(&req->rq_list);
+ list_for_each_safe(l, tmp, &pool->prp_req_list) {
+ req = list_entry(l, struct ptlrpc_request, rq_list);
+ list_del(&req->rq_list);
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
while (size < pool->prp_rq_size)
size <<= 1;
- LASSERTF(cfs_list_empty(&pool->prp_req_list) ||
+ LASSERTF(list_empty(&pool->prp_req_list) ||
size == pool->prp_rq_size,
"Trying to change pool size with nonempty pool "
"from %d to %d bytes\n", pool->prp_rq_size, size);
req->rq_reqbuf_len = size;
req->rq_pool = pool;
spin_lock(&pool->prp_lock);
- cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
+ list_add_tail(&req->rq_list, &pool->prp_req_list);
}
spin_unlock(&pool->prp_lock);
return;
*/
struct ptlrpc_request_pool *
ptlrpc_init_rq_pool(int num_rq, int msgsize,
- void (*populate_pool)(struct ptlrpc_request_pool *, int))
+ void (*populate_pool)(struct ptlrpc_request_pool *, int))
{
- struct ptlrpc_request_pool *pool;
+ struct ptlrpc_request_pool *pool;
- OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
- if (!pool)
- return NULL;
+ OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool));
+ if (!pool)
+ return NULL;
- /* Request next power of two for the allocation, because internally
- kernel would do exactly this */
+ /* Request next power of two for the allocation, because internally
+ kernel would do exactly this */
spin_lock_init(&pool->prp_lock);
- CFS_INIT_LIST_HEAD(&pool->prp_req_list);
- pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
- pool->prp_populate = populate_pool;
+ INIT_LIST_HEAD(&pool->prp_req_list);
+ pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
+ pool->prp_populate = populate_pool;
- populate_pool(pool, num_rq);
+ populate_pool(pool, num_rq);
- if (cfs_list_empty(&pool->prp_req_list)) {
- /* have not allocated a single request for the pool */
- OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
- pool = NULL;
- }
- return pool;
+ if (list_empty(&pool->prp_req_list)) {
+ /* have not allocated a single request for the pool */
+ OBD_FREE(pool, sizeof(struct ptlrpc_request_pool));
+ pool = NULL;
+ }
+ return pool;
}
EXPORT_SYMBOL(ptlrpc_init_rq_pool);
* in writeout path, where this matters, this is safe to do, because
* nothing is lost in this case, and when some in-flight requests
* complete, this code will be called again. */
- if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
+ if (unlikely(list_empty(&pool->prp_req_list))) {
spin_unlock(&pool->prp_lock);
return NULL;
}
- request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
- cfs_list_del_init(&request->rq_list);
+ request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
+ rq_list);
+ list_del_init(&request->rq_list);
spin_unlock(&pool->prp_lock);
LASSERT(request->rq_reqbuf);
struct ptlrpc_request_pool *pool = request->rq_pool;
spin_lock(&pool->prp_lock);
- LASSERT(cfs_list_empty(&request->rq_list));
+ LASSERT(list_empty(&request->rq_list));
LASSERT(!request->rq_receiving_reply);
- cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
+ list_add_tail(&request->rq_list, &pool->prp_req_list);
spin_unlock(&pool->prp_lock);
}
ptlrpc_at_set_req_timeout(request);
spin_lock_init(&request->rq_lock);
- CFS_INIT_LIST_HEAD(&request->rq_list);
- CFS_INIT_LIST_HEAD(&request->rq_timed_list);
- CFS_INIT_LIST_HEAD(&request->rq_replay_list);
- CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
- CFS_INIT_LIST_HEAD(&request->rq_set_chain);
- CFS_INIT_LIST_HEAD(&request->rq_history_list);
- CFS_INIT_LIST_HEAD(&request->rq_exp_list);
+ INIT_LIST_HEAD(&request->rq_list);
+ INIT_LIST_HEAD(&request->rq_timed_list);
+ INIT_LIST_HEAD(&request->rq_replay_list);
+ INIT_LIST_HEAD(&request->rq_ctx_chain);
+ INIT_LIST_HEAD(&request->rq_set_chain);
+ INIT_LIST_HEAD(&request->rq_history_list);
+ INIT_LIST_HEAD(&request->rq_exp_list);
init_waitqueue_head(&request->rq_reply_waitq);
init_waitqueue_head(&request->rq_set_waitq);
request->rq_xid = ptlrpc_next_xid();
if (!set)
RETURN(NULL);
atomic_set(&set->set_refcount, 1);
- CFS_INIT_LIST_HEAD(&set->set_requests);
+ INIT_LIST_HEAD(&set->set_requests);
init_waitqueue_head(&set->set_waitq);
atomic_set(&set->set_new_count, 0);
atomic_set(&set->set_remaining, 0);
spin_lock_init(&set->set_new_req_lock);
- CFS_INIT_LIST_HEAD(&set->set_new_requests);
- CFS_INIT_LIST_HEAD(&set->set_cblist);
+ INIT_LIST_HEAD(&set->set_new_requests);
+ INIT_LIST_HEAD(&set->set_cblist);
set->set_max_inflight = UINT_MAX;
set->set_producer = NULL;
set->set_producer_arg = NULL;
*/
void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp;
- cfs_list_t *next;
- int expected_phase;
- int n = 0;
- ENTRY;
+ struct list_head *tmp;
+ struct list_head *next;
+ int expected_phase;
+ int n = 0;
+ ENTRY;
- /* Requests on the set should either all be completed, or all be new */
+ /* Requests on the set should either all be completed, or all be new */
expected_phase = (atomic_read(&set->set_remaining) == 0) ?
- RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
- cfs_list_for_each (tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
-
- LASSERT(req->rq_phase == expected_phase);
- n++;
- }
+ RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
+ list_for_each(tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+
+ LASSERT(req->rq_phase == expected_phase);
+ n++;
+ }
LASSERTF(atomic_read(&set->set_remaining) == 0 ||
atomic_read(&set->set_remaining) == n, "%d / %d\n",
atomic_read(&set->set_remaining), n);
- cfs_list_for_each_safe(tmp, next, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
- cfs_list_del_init(&req->rq_set_chain);
+ list_for_each_safe(tmp, next, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ list_del_init(&req->rq_set_chain);
LASSERT(req->rq_phase == expected_phase);
int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
set_interpreter_func fn, void *data)
{
- struct ptlrpc_set_cbdata *cbdata;
+ struct ptlrpc_set_cbdata *cbdata;
- OBD_ALLOC_PTR(cbdata);
- if (cbdata == NULL)
- RETURN(-ENOMEM);
+ OBD_ALLOC_PTR(cbdata);
+ if (cbdata == NULL)
+ RETURN(-ENOMEM);
- cbdata->psc_interpret = fn;
- cbdata->psc_data = data;
- cfs_list_add_tail(&cbdata->psc_item, &set->set_cblist);
+ cbdata->psc_interpret = fn;
+ cbdata->psc_data = data;
+ list_add_tail(&cbdata->psc_item, &set->set_cblist);
- RETURN(0);
+ RETURN(0);
}
EXPORT_SYMBOL(ptlrpc_set_add_cb);
void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
struct ptlrpc_request *req)
{
- LASSERT(cfs_list_empty(&req->rq_set_chain));
+ LASSERT(list_empty(&req->rq_set_chain));
/* The set takes over the caller's request reference */
- cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
+ list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
atomic_inc(&set->set_remaining);
req->rq_queued_time = cfs_time_current();
*/
req->rq_set = set;
req->rq_queued_time = cfs_time_current();
- cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+ list_add_tail(&req->rq_set_chain, &set->set_new_requests);
count = atomic_inc_return(&set->set_new_count);
spin_unlock(&set->set_new_req_lock);
ptlrpc_free_committed(imp);
- if (!cfs_list_empty(&imp->imp_replay_list)) {
+ if (!list_empty(&imp->imp_replay_list)) {
struct ptlrpc_request *last;
- last = cfs_list_entry(imp->imp_replay_list.prev,
- struct ptlrpc_request,
- rq_replay_list);
+ last = list_entry(imp->imp_replay_list.prev,
+ struct ptlrpc_request,
+ rq_replay_list);
/*
* Requests with rq_replay stay on the list even if no
* commit is expected.
"(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+ LASSERT(list_empty(&req->rq_list));
+ list_add_tail(&req->rq_list, &imp->imp_delayed_list);
atomic_inc(&req->rq_import->imp_inflight);
spin_unlock(&imp->imp_lock);
RETURN(0);
RETURN(rc);
}
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
+ LASSERT(list_empty(&req->rq_list));
+ list_add_tail(&req->rq_list, &imp->imp_sending_list);
atomic_inc(&req->rq_import->imp_inflight);
spin_unlock(&imp->imp_lock);
*/
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp, *next;
+ struct list_head *tmp, *next;
int force_timer_recalc = 0;
ENTRY;
if (atomic_read(&set->set_remaining) == 0)
RETURN(1);
- cfs_list_for_each_safe(tmp, next, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
- struct obd_import *imp = req->rq_import;
- int unregistered = 0;
- int rc = 0;
+ list_for_each_safe(tmp, next, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ struct obd_import *imp = req->rq_import;
+ int unregistered = 0;
+ int rc = 0;
/* This schedule point is mainly for the ptlrpcd caller of this
* function. Most ptlrpc sets are not long-lived and unbounded
if (ptlrpc_import_delay_req(imp, req, &status)){
/* put on delay list - only if we wait
* recovery finished - before send */
- cfs_list_del_init(&req->rq_list);
- cfs_list_add_tail(&req->rq_list,
+ list_del_init(&req->rq_list);
+ list_add_tail(&req->rq_list,
&imp->
imp_delayed_list);
spin_unlock(&imp->imp_lock);
GOTO(interpret, req->rq_status);
}
- cfs_list_del_init(&req->rq_list);
- cfs_list_add_tail(&req->rq_list,
+ list_del_init(&req->rq_list);
+ list_add_tail(&req->rq_list,
&imp->imp_sending_list);
spin_unlock(&imp->imp_lock);
* may happen in the case of marking it erroneous for the case
* ptlrpc_import_delay_req(req, status) find it impossible to
* allow sending this rpc and returns *status != 0. */
- if (!cfs_list_empty(&req->rq_list)) {
- cfs_list_del_init(&req->rq_list);
+ if (!list_empty(&req->rq_list)) {
+ list_del_init(&req->rq_list);
atomic_dec(&imp->imp_inflight);
}
spin_unlock(&imp->imp_lock);
/* free the request that has just been completed
* in order not to pollute set->set_requests */
- cfs_list_del_init(&req->rq_set_chain);
+ list_del_init(&req->rq_set_chain);
spin_lock(&req->rq_lock);
req->rq_set = NULL;
req->rq_invalid_rqset = 0;
*/
int ptlrpc_expired_set(void *data)
{
- struct ptlrpc_request_set *set = data;
- cfs_list_t *tmp;
- time_t now = cfs_time_current_sec();
- ENTRY;
+ struct ptlrpc_request_set *set = data;
+ struct list_head *tmp;
+ time_t now = cfs_time_current_sec();
+ ENTRY;
- LASSERT(set != NULL);
+ LASSERT(set != NULL);
- /*
- * A timeout expired. See which reqs it applies to...
- */
- cfs_list_for_each (tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ /*
+ * A timeout expired. See which reqs it applies to...
+ */
+ list_for_each(tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
/* don't expire request waiting for context */
if (req->rq_wait_ctx)
*/
void ptlrpc_interrupted_set(void *data)
{
- struct ptlrpc_request_set *set = data;
- cfs_list_t *tmp;
+ struct ptlrpc_request_set *set = data;
+ struct list_head *tmp;
- LASSERT(set != NULL);
- CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
+ LASSERT(set != NULL);
+ CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
- cfs_list_for_each(tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_for_each(tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
- if (req->rq_phase != RQ_PHASE_RPC &&
- req->rq_phase != RQ_PHASE_UNREGISTERING)
- continue;
+ if (req->rq_phase != RQ_PHASE_RPC &&
+ req->rq_phase != RQ_PHASE_UNREGISTERING)
+ continue;
- ptlrpc_mark_interrupted(req);
- }
+ ptlrpc_mark_interrupted(req);
+ }
}
EXPORT_SYMBOL(ptlrpc_interrupted_set);
*/
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp;
- time_t now = cfs_time_current_sec();
- int timeout = 0;
- struct ptlrpc_request *req;
- int deadline;
- ENTRY;
+ struct list_head *tmp;
+ time_t now = cfs_time_current_sec();
+ int timeout = 0;
+ struct ptlrpc_request *req;
+ int deadline;
+ ENTRY;
- cfs_list_for_each(tmp, &set->set_requests) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
/*
* Request in-flight?
*/
int ptlrpc_set_wait(struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
struct ptlrpc_request *req;
struct l_wait_info lwi;
int rc, timeout;
if (set->set_producer)
(void)ptlrpc_set_producer(set);
else
- cfs_list_for_each(tmp, &set->set_requests) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
if (req->rq_phase == RQ_PHASE_NEW)
(void)ptlrpc_send_new_req(req);
}
- if (cfs_list_empty(&set->set_requests))
+ if (list_empty(&set->set_requests))
RETURN(0);
do {
* I don't really care if we go once more round the loop in
* the error cases -eeb. */
if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
- cfs_list_for_each(tmp, &set->set_requests) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
spin_lock(&req->rq_lock);
req->rq_invalid_rqset = 1;
spin_unlock(&req->rq_lock);
LASSERT(atomic_read(&set->set_remaining) == 0);
rc = set->set_rc; /* rq_status of already freed requests if any */
- cfs_list_for_each(tmp, &set->set_requests) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
if (req->rq_status != 0)
struct ptlrpc_set_cbdata *cbdata, *n;
int err;
- cfs_list_for_each_entry_safe(cbdata, n,
+ list_for_each_entry_safe(cbdata, n,
&set->set_cblist, psc_item) {
- cfs_list_del_init(&cbdata->psc_item);
+ list_del_init(&cbdata->psc_item);
err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
if (err && !rc)
rc = err;
LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
- LASSERTF(cfs_list_empty(&request->rq_list), "req %p\n", request);
- LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request);
- LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request);
+ LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
+ LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
+ LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
LASSERTF(!request->rq_replay, "req %p\n", request);
req_capsule_fini(&request->rq_pill);
if (request->rq_import != NULL) {
if (!locked)
spin_lock(&request->rq_import->imp_lock);
- cfs_list_del_init(&request->rq_replay_list);
+ list_del_init(&request->rq_replay_list);
if (!locked)
spin_unlock(&request->rq_import->imp_lock);
}
- LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
+ LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
if (atomic_read(&request->rq_refcount) != 0) {
DEBUG_REQ(D_ERROR, request,
if (req->rq_commit_cb != NULL)
req->rq_commit_cb(req);
- cfs_list_del_init(&req->rq_replay_list);
+ list_del_init(&req->rq_replay_list);
__ptlrpc_req_finished(req, 1);
}
struct obd_import *imp = req->rq_import;
spin_lock(&imp->imp_lock);
- if (cfs_list_empty(&req->rq_replay_list)) {
+ if (list_empty(&req->rq_replay_list)) {
spin_unlock(&imp->imp_lock);
return;
}
imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
imp->imp_last_generation_checked = imp->imp_generation;
- cfs_list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
+ list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
rq_replay_list) {
/* XXX ok to remove when 1357 resolved - rread 05/29/03 */
LASSERT(req != last_req);
if (req->rq_replay) {
DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
- cfs_list_move_tail(&req->rq_replay_list,
+ list_move_tail(&req->rq_replay_list,
&imp->imp_committed_list);
continue;
}
if (skip_committed_list)
GOTO(out, 0);
- cfs_list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
+ list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
rq_replay_list) {
LASSERT(req->rq_transno != 0);
if (req->rq_import_generation < imp->imp_generation) {
void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
struct obd_import *imp)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
assert_spin_locked(&imp->imp_lock);
as resent replayed requests. */
lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
- /* don't re-add requests that have been replayed */
- if (!cfs_list_empty(&req->rq_replay_list))
- return;
+ /* don't re-add requests that have been replayed */
+ if (!list_empty(&req->rq_replay_list))
+ return;
- lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
+ lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
- LASSERT(imp->imp_replayable);
- /* Balanced in ptlrpc_free_committed, usually. */
- ptlrpc_request_addref(req);
- cfs_list_for_each_prev(tmp, &imp->imp_replay_list) {
- struct ptlrpc_request *iter =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
+ LASSERT(imp->imp_replayable);
+ /* Balanced in ptlrpc_free_committed, usually. */
+ ptlrpc_request_addref(req);
+ list_for_each_prev(tmp, &imp->imp_replay_list) {
+ struct ptlrpc_request *iter = list_entry(tmp,
+ struct ptlrpc_request,
+ rq_replay_list);
/* We may have duplicate transnos if we create and then
* open a file, or for closes retained if to match creating
continue;
}
- cfs_list_add(&req->rq_replay_list, &iter->rq_replay_list);
- return;
- }
+ list_add(&req->rq_replay_list, &iter->rq_replay_list);
+ return;
+ }
- cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list);
+ list_add(&req->rq_replay_list, &imp->imp_replay_list);
}
EXPORT_SYMBOL(ptlrpc_retain_replayable_request);
*/
void ptlrpc_abort_inflight(struct obd_import *imp)
{
- cfs_list_t *tmp, *n;
- ENTRY;
+ struct list_head *tmp, *n;
+ ENTRY;
- /* Make sure that no new requests get processed for this import.
- * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
- * this flag and then putting requests on sending_list or delayed_list.
- */
+ /* Make sure that no new requests get processed for this import.
+ * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
+ * this flag and then putting requests on sending_list or delayed_list.
+ */
spin_lock(&imp->imp_lock);
- /* XXX locking? Maybe we should remove each request with the list
- * locked? Also, how do we know if the requests on the list are
- * being freed at this time?
- */
- cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ /* XXX locking? Maybe we should remove each request with the list
+ * locked? Also, how do we know if the requests on the list are
+ * being freed at this time?
+ */
+ list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+ struct ptlrpc_request *req = list_entry(tmp,
+ struct ptlrpc_request,
+ rq_list);
DEBUG_REQ(D_RPCTRACE, req, "inflight");
spin_unlock(&req->rq_lock);
}
- cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
+ list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ list_entry(tmp, struct ptlrpc_request, rq_list);
DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
*/
void ptlrpc_abort_set(struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp, *pos;
+ struct list_head *tmp, *pos;
- LASSERT(set != NULL);
+ LASSERT(set != NULL);
- cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
+ list_for_each_safe(pos, tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
spin_lock(&req->rq_lock);
if (req->rq_phase != RQ_PHASE_RPC) {
req->rq_pill.rc_fmt = (void *)&worker_format;
spin_lock_init(&req->rq_lock);
- CFS_INIT_LIST_HEAD(&req->rq_list);
- CFS_INIT_LIST_HEAD(&req->rq_replay_list);
- CFS_INIT_LIST_HEAD(&req->rq_set_chain);
- CFS_INIT_LIST_HEAD(&req->rq_history_list);
- CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+ INIT_LIST_HEAD(&req->rq_list);
+ INIT_LIST_HEAD(&req->rq_replay_list);
+ INIT_LIST_HEAD(&req->rq_set_chain);
+ INIT_LIST_HEAD(&req->rq_history_list);
+ INIT_LIST_HEAD(&req->rq_exp_list);
init_waitqueue_head(&req->rq_reply_waitq);
init_waitqueue_head(&req->rq_set_waitq);
atomic_set(&req->rq_refcount, 1);
conn->c_peer = peer;
conn->c_self = self;
- CFS_INIT_HLIST_NODE(&conn->c_hash);
+ INIT_HLIST_NODE(&conn->c_hash);
atomic_set(&conn->c_refcount, 1);
if (uuid)
obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);
static unsigned
conn_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
{
- return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
+ return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
}
static int
-conn_keycmp(const void *key, cfs_hlist_node_t *hnode)
+conn_keycmp(const void *key, struct hlist_node *hnode)
{
- struct ptlrpc_connection *conn;
- const lnet_process_id_t *conn_key;
+ struct ptlrpc_connection *conn;
+ const lnet_process_id_t *conn_key;
- LASSERT(key != NULL);
- conn_key = (lnet_process_id_t*)key;
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ LASSERT(key != NULL);
+ conn_key = (lnet_process_id_t *)key;
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- return conn_key->nid == conn->c_peer.nid &&
+ return conn_key->nid == conn->c_peer.nid &&
conn_key->pid == conn->c_peer.pid;
}
static void *
-conn_key(cfs_hlist_node_t *hnode)
+conn_key(struct hlist_node *hnode)
{
- struct ptlrpc_connection *conn;
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- return &conn->c_peer;
+ struct ptlrpc_connection *conn;
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ return &conn->c_peer;
}
static void *
-conn_object(cfs_hlist_node_t *hnode)
+conn_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ return hlist_entry(hnode, struct ptlrpc_connection, c_hash);
}
static void
-conn_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+conn_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct ptlrpc_connection *conn;
+ struct ptlrpc_connection *conn;
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
atomic_inc(&conn->c_refcount);
}
static void
-conn_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+conn_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct ptlrpc_connection *conn;
+ struct ptlrpc_connection *conn;
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
atomic_dec(&conn->c_refcount);
}
static void
-conn_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+conn_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct ptlrpc_connection *conn;
- conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
/*
* Nothing should be left. Connection user put it and
* connection also was deleted from table by this time
}
static cfs_hash_ops_t conn_hash_ops = {
- .hs_hash = conn_hashfn,
- .hs_keycmp = conn_keycmp,
- .hs_key = conn_key,
- .hs_object = conn_object,
- .hs_get = conn_get,
- .hs_put_locked = conn_put_locked,
- .hs_exit = conn_exit,
+ .hs_hash = conn_hashfn,
+ .hs_keycmp = conn_keycmp,
+ .hs_key = conn_key,
+ .hs_object = conn_object,
+ .hs_get = conn_get,
+ .hs_put_locked = conn_put_locked,
+ .hs_exit = conn_exit,
};
req->rq_history_seq = new_seq;
- cfs_list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
+ list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
}
/*
req->rq_rqbd = rqbd;
req->rq_phase = RQ_PHASE_NEW;
spin_lock_init(&req->rq_lock);
- CFS_INIT_LIST_HEAD(&req->rq_timed_list);
- CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+ INIT_LIST_HEAD(&req->rq_timed_list);
+ INIT_LIST_HEAD(&req->rq_exp_list);
atomic_set(&req->rq_refcount, 1);
if (ev->type == LNET_EVENT_PUT)
CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n",
rqbd->rqbd_refcount++;
}
- cfs_list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
+ list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
svcpt->scp_nreqs_incoming++;
/* NB everything can disappear under us once the request
}
#ifndef __KERNEL__
-CFS_LIST_HEAD(liblustre_wait_callbacks);
-CFS_LIST_HEAD(liblustre_idle_callbacks);
+struct list_head liblustre_wait_callbacks;
+struct list_head liblustre_idle_callbacks;
void *liblustre_services_callback;
void *
-liblustre_register_waitidle_callback (cfs_list_t *callback_list,
- const char *name,
- int (*fn)(void *arg), void *arg)
+liblustre_register_waitidle_callback(struct list_head *callback_list,
+ const char *name,
+ int (*fn)(void *arg), void *arg)
{
struct liblustre_wait_callback *llwc;
llwc->llwc_name = name;
llwc->llwc_fn = fn;
llwc->llwc_arg = arg;
- cfs_list_add_tail(&llwc->llwc_list, callback_list);
+ list_add_tail(&llwc->llwc_list, callback_list);
return (llwc);
}
void
liblustre_deregister_waitidle_callback (void *opaque)
{
- struct liblustre_wait_callback *llwc = opaque;
+ struct liblustre_wait_callback *llwc = opaque;
- cfs_list_del(&llwc->llwc_list);
- OBD_FREE(llwc, sizeof(*llwc));
+ list_del(&llwc->llwc_list);
+ OBD_FREE(llwc, sizeof(*llwc));
}
void *
int
liblustre_wait_event (int timeout)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
struct liblustre_wait_callback *llwc;
int found_something = 0;
found_something = 1;
/* Give all registered callbacks a bite at the cherry */
- cfs_list_for_each(tmp, &liblustre_wait_callbacks) {
- llwc = cfs_list_entry(tmp,
+ list_for_each(tmp, &liblustre_wait_callbacks) {
+ llwc = list_entry(tmp,
struct liblustre_wait_callback,
llwc_list);
liblustre_wait_idle(void)
{
static int recursed = 0;
-
- cfs_list_t *tmp;
- struct liblustre_wait_callback *llwc;
- int idle = 0;
+ struct list_head *tmp;
+ struct liblustre_wait_callback *llwc;
+ int idle = 0;
LASSERT(!recursed);
recursed = 1;
idle = 1;
- cfs_list_for_each(tmp, &liblustre_idle_callbacks) {
- llwc = cfs_list_entry(tmp,
- struct liblustre_wait_callback,
- llwc_list);
-
+ list_for_each(tmp, &liblustre_idle_callbacks) {
+ llwc = list_entry(tmp, struct liblustre_wait_callback,
+ llwc_list);
if (!llwc->llwc_fn(llwc->llwc_arg)) {
idle = 0;
break;
return -EIO;
}
#ifndef __KERNEL__
+ INIT_LIST_HEAD(&liblustre_wait_callbacks);
+ INIT_LIST_HEAD(&liblustre_idle_callbacks);
+
liblustre_services_callback =
liblustre_register_wait_callback("liblustre_check_services",
&liblustre_check_services,
static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
{
time_t now = cfs_time_current_sec();
- cfs_list_t *tmp, *n;
+ struct list_head *tmp, *n;
struct ptlrpc_request *req;
unsigned int timeout = 0;
spin_lock(&imp->imp_lock);
- cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_list);
timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
}
spin_unlock(&imp->imp_lock);
*/
void ptlrpc_invalidate_import(struct obd_import *imp)
{
- cfs_list_t *tmp, *n;
+ struct list_head *tmp, *n;
struct ptlrpc_request *req;
struct l_wait_info lwi;
unsigned int timeout;
* dropped to zero. No new inflights possible at
* this point. */
rc = 0;
- } else {
- cfs_list_for_each_safe(tmp, n,
- &imp->imp_sending_list) {
- req = cfs_list_entry(tmp,
- struct ptlrpc_request,
- rq_list);
- DEBUG_REQ(D_ERROR, req,
- "still on sending list");
- }
- cfs_list_for_each_safe(tmp, n,
- &imp->imp_delayed_list) {
- req = cfs_list_entry(tmp,
- struct ptlrpc_request,
- rq_list);
- DEBUG_REQ(D_ERROR, req,
- "still on delayed list");
- }
+ } else {
+ list_for_each_safe(tmp, n,
+ &imp->imp_sending_list) {
+ req = list_entry(tmp,
+ struct ptlrpc_request,
+ rq_list);
+ DEBUG_REQ(D_ERROR, req,
+ "still on sending list");
+ }
+ list_for_each_safe(tmp, n,
+ &imp->imp_delayed_list) {
+ req = list_entry(tmp,
+ struct ptlrpc_request,
+ rq_list);
+ DEBUG_REQ(D_ERROR, req,
+ "still on delayed list");
+ }
CERROR("%s: RPCs in \"%s\" phase found (%d). "
"Network is sluggish? Waiting them "
spin_lock(&imp->imp_lock);
- if (cfs_list_empty(&imp->imp_conn_list)) {
+ if (list_empty(&imp->imp_conn_list)) {
CERROR("%s: no connections available\n",
imp->imp_obd->obd_name);
spin_unlock(&imp->imp_lock);
RETURN(-EINVAL);
}
- cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
+ list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
imp->imp_obd->obd_name,
libcfs_nid2str(conn->oic_conn->c_peer.nid),
static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
{
struct ptlrpc_request *req;
- cfs_list_t *tmp;
+ struct list_head *tmp;
/* The requests in committed_list always have smaller transnos than
* the requests in replay_list */
- if (!cfs_list_empty(&imp->imp_committed_list)) {
+ if (!list_empty(&imp->imp_committed_list)) {
tmp = imp->imp_committed_list.next;
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
*transno = req->rq_transno;
if (req->rq_transno == 0) {
DEBUG_REQ(D_ERROR, req, "zero transno in committed_list");
}
return 1;
}
- if (!cfs_list_empty(&imp->imp_replay_list)) {
+ if (!list_empty(&imp->imp_replay_list)) {
tmp = imp->imp_replay_list.next;
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
*transno = req->rq_transno;
if (req->rq_transno == 0) {
DEBUG_REQ(D_ERROR, req, "zero transno in replay_list");
static bool warned;
spin_lock(&imp->imp_lock);
- cfs_list_del(&imp->imp_conn_current->oic_item);
- cfs_list_add(&imp->imp_conn_current->oic_item,
+ list_del(&imp->imp_conn_current->oic_item);
+ list_add(&imp->imp_conn_current->oic_item,
&imp->imp_conn_list);
imp->imp_last_success_conn =
imp->imp_conn_current->oic_last_attempt;
pol_idx = 0;
- cfs_list_for_each_entry(policy, &nrs->nrs_policy_list,
- pol_list) {
+ list_for_each_entry(policy, &nrs->nrs_policy_list,
+ pol_list) {
LASSERT(pol_idx < num_pols);
nrs_policy_get_info_locked(policy, &tmp);
struct ptlrpc_srh_iterator *srhi,
__u64 seq)
{
- cfs_list_t *e;
+ struct list_head *e;
struct ptlrpc_request *req;
if (srhi->srhi_req != NULL &&
"%s:%d: seek seq "LPU64", request seq "LPU64"\n",
svcpt->scp_service->srv_name, svcpt->scp_cpt,
srhi->srhi_seq, srhi->srhi_req->rq_history_seq);
- LASSERTF(!cfs_list_empty(&svcpt->scp_hist_reqs),
+ LASSERTF(!list_empty(&svcpt->scp_hist_reqs),
"%s:%d: seek offset "LPU64", request seq "LPU64", "
"last culled "LPU64"\n",
svcpt->scp_service->srv_name, svcpt->scp_cpt,
}
while (e != &svcpt->scp_hist_reqs) {
- req = cfs_list_entry(e, struct ptlrpc_request, rq_history_list);
+ req = list_entry(e, struct ptlrpc_request, rq_history_list);
if (req->rq_history_seq >= seq) {
srhi->srhi_seq = req->rq_history_seq;
#include <libcfs/libcfs.h>
#include "ptlrpc_internal.h"
-extern struct list_head ptlrpc_all_services;
-
/**
* NRS core object.
*/
spin_lock(&nrs->nrs_lock);
}
- LASSERT(cfs_list_empty(&policy->pol_list_queued));
+ LASSERT(list_empty(&policy->pol_list_queued));
LASSERT(policy->pol_req_queued == 0 &&
policy->pol_req_started == 0);
{
struct ptlrpc_nrs_policy *tmp;
- cfs_list_for_each_entry(tmp, &nrs->nrs_policy_list, pol_list) {
+ list_for_each_entry(tmp, &nrs->nrs_policy_list, pol_list) {
if (strncmp(tmp->pol_desc->pd_name, name,
NRS_POL_NAME_MAX) == 0) {
nrs_policy_get_locked(tmp);
LASSERT(policy->pol_state == NRS_POL_STATE_STOPPED);
}
- cfs_list_del(&policy->pol_list);
+ list_del(&policy->pol_list);
nrs->nrs_num_pols--;
nrs_policy_put_locked(policy);
policy->pol_state = NRS_POL_STATE_STOPPED;
policy->pol_flags = desc->pd_flags;
- CFS_INIT_LIST_HEAD(&policy->pol_list);
- CFS_INIT_LIST_HEAD(&policy->pol_list_queued);
+ INIT_LIST_HEAD(&policy->pol_list);
+ INIT_LIST_HEAD(&policy->pol_list_queued);
rc = nrs_policy_init(policy);
if (rc != 0) {
RETURN(-EEXIST);
}
- cfs_list_add_tail(&policy->pol_list, &nrs->nrs_policy_list);
+ list_add_tail(&policy->pol_list, &nrs->nrs_policy_list);
nrs->nrs_num_pols++;
if (policy->pol_flags & PTLRPC_NRS_FL_REG_START)
* Add the policy to the NRS head's list of policies with enqueued
* requests, if it has not been added there.
*/
- if (unlikely(cfs_list_empty(&policy->pol_list_queued)))
- cfs_list_add_tail(&policy->pol_list_queued,
+ if (unlikely(list_empty(&policy->pol_list_queued)))
+ list_add_tail(&policy->pol_list_queued,
&policy->pol_nrs->nrs_policy_queued);
}
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
- cfs_list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
+ list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
if (nrs_policy_compatible(svc, desc)) {
rc = nrs_policy_register(nrs, desc);
if (rc != 0) {
nrs->nrs_svcpt = svcpt;
nrs->nrs_queue_type = queue;
spin_lock_init(&nrs->nrs_lock);
- CFS_INIT_LIST_HEAD(&nrs->nrs_policy_list);
- CFS_INIT_LIST_HEAD(&nrs->nrs_policy_queued);
+ INIT_LIST_HEAD(&nrs->nrs_policy_list);
+ INIT_LIST_HEAD(&nrs->nrs_policy_queued);
nrs->nrs_throttling = 0;
rc = nrs_register_policies_locked(nrs);
}
nrs->nrs_stopping = 1;
- cfs_list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list,
+ list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list,
pol_list) {
rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
LASSERT(rc == 0);
struct ptlrpc_nrs_pol_desc *tmp;
ENTRY;
- cfs_list_for_each_entry(tmp, &nrs_core.nrs_policies, pd_list) {
+ list_for_each_entry(tmp, &nrs_core.nrs_policies, pd_list) {
if (strncmp(tmp->pd_name, name, NRS_POL_NAME_MAX) == 0)
RETURN(tmp);
}
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex));
- cfs_list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
+ list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
if (!nrs_policy_compatible(svc, desc) ||
unlikely(svc->srv_is_stopping))
*/
mutex_lock(&ptlrpc_all_services_mutex);
- cfs_list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
+ list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
struct ptlrpc_service_part *svcpt;
int i;
int rc2;
mutex_unlock(&ptlrpc_all_services_mutex);
internal:
- cfs_list_add_tail(&desc->pd_list, &nrs_core.nrs_policies);
+ list_add_tail(&desc->pd_list, &nrs_core.nrs_policies);
fail:
mutex_unlock(&nrs_core.nrs_mutex);
CDEBUG(D_INFO, "Unregistering policy %s from NRS core.\n",
conf->nc_name);
- cfs_list_del(&desc->pd_list);
+ list_del(&desc->pd_list);
OBD_FREE_PTR(desc);
fail:
* Set up lprocfs interfaces for all supported policies for the
* service.
*/
- cfs_list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
+ list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
if (!nrs_policy_compatible(svc, desc))
continue;
* Clean up lprocfs interfaces for all supported policies for the
* service.
*/
- cfs_list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
+ list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
if (!nrs_policy_compatible(svc, desc))
continue;
* ptlrpc_nrs::nrs_policy_queued.
*/
if (unlikely(policy->pol_req_queued == 0)) {
- cfs_list_del_init(&policy->pol_list_queued);
+ list_del_init(&policy->pol_list_queued);
/**
* If there are other policies with queued requests, move the
LASSERT(policy->pol_req_queued <
policy->pol_nrs->nrs_req_queued);
- cfs_list_move_tail(&policy->pol_list_queued,
+ list_move_tail(&policy->pol_list_queued,
&policy->pol_nrs->nrs_policy_queued);
}
}
* Always try to drain requests from all NRS polices even if they are
* inactive, because the user can change policy status at runtime.
*/
- cfs_list_for_each_entry(policy, &nrs->nrs_policy_queued,
+ list_for_each_entry(policy, &nrs->nrs_policy_queued,
pol_list_queued) {
nrq = nrs_request_get(policy, peek, force);
if (nrq != NULL) {
ENTRY;
mutex_init(&nrs_core.nrs_mutex);
- CFS_INIT_LIST_HEAD(&nrs_core.nrs_policies);
+ INIT_LIST_HEAD(&nrs_core.nrs_policies);
rc = ptlrpc_nrs_policy_register(&nrs_conf_fifo);
if (rc != 0)
struct ptlrpc_nrs_pol_desc *desc;
struct ptlrpc_nrs_pol_desc *tmp;
- cfs_list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies,
+ list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies,
pd_list) {
- cfs_list_del_init(&desc->pd_list);
+ list_del_init(&desc->pd_list);
OBD_FREE_PTR(desc);
}
}
return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
}
-static int nrs_crrn_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int nrs_crrn_hop_keycmp(const void *key, struct hlist_node *hnode)
{
lnet_nid_t *nid = (lnet_nid_t *)key;
- struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_crrn_client *cli = hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
return *nid == cli->cc_nid;
}
-static void *nrs_crrn_hop_key(cfs_hlist_node_t *hnode)
+static void *nrs_crrn_hop_key(struct hlist_node *hnode)
{
- struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_crrn_client *cli = hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
return &cli->cc_nid;
}
-static void *nrs_crrn_hop_object(cfs_hlist_node_t *hnode)
+static void *nrs_crrn_hop_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct nrs_crrn_client, cc_hnode);
+ return hlist_entry(hnode, struct nrs_crrn_client, cc_hnode);
}
-static void nrs_crrn_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_crrn_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_crrn_client *cli = hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
atomic_inc(&cli->cc_ref);
}
-static void nrs_crrn_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_crrn_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_crrn_client *cli = hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
atomic_dec(&cli->cc_ref);
}
-static void nrs_crrn_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_crrn_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_crrn_client *cli = hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
LASSERTF(atomic_read(&cli->cc_ref) == 0,
if (head == NULL)
return -ENOMEM;
- CFS_INIT_LIST_HEAD(&head->fh_list);
+ INIT_LIST_HEAD(&head->fh_list);
policy->pol_private = head;
return 0;
}
struct nrs_fifo_head *head = policy->pol_private;
LASSERT(head != NULL);
- LASSERT(cfs_list_empty(&head->fh_list));
+ LASSERT(list_empty(&head->fh_list));
OBD_FREE_PTR(head);
}
struct nrs_fifo_head *head = policy->pol_private;
struct ptlrpc_nrs_request *nrq;
- nrq = unlikely(cfs_list_empty(&head->fh_list)) ? NULL :
- cfs_list_entry(head->fh_list.next, struct ptlrpc_nrs_request,
+ nrq = unlikely(list_empty(&head->fh_list)) ? NULL :
+ list_entry(head->fh_list.next, struct ptlrpc_nrs_request,
nr_u.fifo.fr_list);
if (likely(!peek && nrq != NULL)) {
struct ptlrpc_request,
rq_nrq);
- cfs_list_del_init(&nrq->nr_u.fifo.fr_list);
+ list_del_init(&nrq->nr_u.fifo.fr_list);
CDEBUG(D_RPCTRACE, "NRS start %s request from %s, seq: "LPU64
"\n", policy->pol_desc->pd_name,
* Only used for debugging
*/
nrq->nr_u.fifo.fr_sequence = head->fh_sequence++;
- cfs_list_add_tail(&nrq->nr_u.fifo.fr_list, &head->fh_list);
+ list_add_tail(&nrq->nr_u.fifo.fr_list, &head->fh_list);
return 0;
}
static void nrs_fifo_req_del(struct ptlrpc_nrs_policy *policy,
struct ptlrpc_nrs_request *nrq)
{
- LASSERT(!cfs_list_empty(&nrq->nr_u.fifo.fr_list));
- cfs_list_del_init(&nrq->nr_u.fifo.fr_list);
+ LASSERT(!list_empty(&nrq->nr_u.fifo.fr_list));
+ list_del_init(&nrq->nr_u.fifo.fr_list);
}
/**
return cfs_hash_djb2_hash(key, sizeof(struct nrs_orr_key), mask);
}
-static void *nrs_orr_hop_key(cfs_hlist_node_t *hnode)
+static void *nrs_orr_hop_key(struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
return &orro->oo_key;
}
-static int nrs_orr_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int nrs_orr_hop_keycmp(const void *key, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
&((struct nrs_orr_key *)key)->ok_fid);
}
-static void *nrs_orr_hop_object(cfs_hlist_node_t *hnode)
+static void *nrs_orr_hop_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct nrs_orr_object, oo_hnode);
+ return hlist_entry(hnode, struct nrs_orr_object, oo_hnode);
}
-static void nrs_orr_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_orr_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
orro->oo_ref++;
* Removes an nrs_orr_object the hash and frees its memory, if the object has
* no active users.
*/
-static void nrs_orr_hop_put_free(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_orr_hop_put_free(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
struct nrs_orr_data *orrd = container_of(orro->oo_res.res_parent,
OBD_SLAB_FREE_PTR(orro, orrd->od_cache);
}
-static void nrs_orr_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_orr_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
orro->oo_ref--;
}
-static int nrs_trr_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int nrs_trr_hop_keycmp(const void *key, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
return orro->oo_key.ok_idx == ((struct nrs_orr_key *)key)->ok_idx;
}
-static void nrs_trr_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_trr_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
struct nrs_orr_data *orrd = container_of(orro->oo_res.res_parent,
static void
nrs_tbf_cli_rule_put(struct nrs_tbf_client *cli)
{
- LASSERT(!cfs_list_empty(&cli->tc_linkage));
+ LASSERT(!list_empty(&cli->tc_linkage));
LASSERT(cli->tc_rule);
- cfs_list_del_init(&cli->tc_linkage);
+ list_del_init(&cli->tc_linkage);
nrs_tbf_rule_put(cli->tc_rule);
cli->tc_rule = NULL;
}
struct nrs_tbf_rule *rule;
LASSERT(head != NULL);
- cfs_list_for_each_entry(rule, &head->th_list, tr_linkage) {
+ list_for_each_entry(rule, &head->th_list, tr_linkage) {
LASSERT((rule->tr_flags & NTRS_STOPPING) == 0);
if (strcmp(rule->tr_name, name) == 0) {
nrs_tbf_rule_get(rule);
spin_lock(&head->th_rule_lock);
/* Match the newest rule in the list */
- cfs_list_for_each_entry(tmp_rule, &head->th_list, tr_linkage) {
+ list_for_each_entry(tmp_rule, &head->th_list, tr_linkage) {
LASSERT((tmp_rule->tr_flags & NTRS_STOPPING) == 0);
if (head->th_ops->o_rule_match(tmp_rule, cli)) {
rule = tmp_rule;
cli->tc_in_heap = false;
head->th_ops->o_cli_init(cli, req);
- CFS_INIT_LIST_HEAD(&cli->tc_list);
- CFS_INIT_LIST_HEAD(&cli->tc_linkage);
+ INIT_LIST_HEAD(&cli->tc_list);
+ INIT_LIST_HEAD(&cli->tc_linkage);
atomic_set(&cli->tc_ref, 1);
rule = nrs_tbf_rule_match(head, cli);
nrs_tbf_cli_reset(head, rule, cli);
static void
nrs_tbf_cli_fini(struct nrs_tbf_client *cli)
{
- LASSERT(cfs_list_empty(&cli->tc_list));
+ LASSERT(list_empty(&cli->tc_list));
LASSERT(!cli->tc_in_heap);
LASSERT(atomic_read(&cli->tc_ref) == 0);
nrs_tbf_cli_rule_put(cli);
rule->tr_nsecs = NSEC_PER_SEC / rule->tr_rpc_rate;
rule->tr_depth = tbf_depth;
atomic_set(&rule->tr_ref, 1);
- CFS_INIT_LIST_HEAD(&rule->tr_cli_list);
- CFS_INIT_LIST_HEAD(&rule->tr_nids);
+ INIT_LIST_HEAD(&rule->tr_cli_list);
+ INIT_LIST_HEAD(&rule->tr_nids);
rc = head->th_ops->o_rule_init(policy, rule, start);
if (rc) {
nrs_tbf_rule_put(rule);
return -EEXIST;
}
- cfs_list_add(&rule->tr_linkage, &head->th_list);
+ list_add(&rule->tr_linkage, &head->th_list);
rule->tr_head = head;
spin_unlock(&head->th_rule_lock);
atomic_inc(&head->th_rule_sequence);
if (rule == NULL)
return -ENOENT;
- cfs_list_del_init(&rule->tr_linkage);
+ list_del_init(&rule->tr_linkage);
rule->tr_flags |= NTRS_STOPPING;
nrs_tbf_rule_put(rule);
nrs_tbf_rule_put(rule);
return cfs_hash_djb2_hash(key, strlen(key), mask);
}
-static int nrs_tbf_jobid_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int nrs_tbf_jobid_hop_keycmp(const void *key, struct hlist_node *hnode)
{
- struct nrs_tbf_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
struct nrs_tbf_client,
tc_hnode);
return (strcmp(cli->tc_jobid, key) == 0);
}
-static void *nrs_tbf_jobid_hop_key(cfs_hlist_node_t *hnode)
+static void *nrs_tbf_jobid_hop_key(struct hlist_node *hnode)
{
- struct nrs_tbf_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
struct nrs_tbf_client,
tc_hnode);
return cli->tc_jobid;
}
-static void *nrs_tbf_jobid_hop_object(cfs_hlist_node_t *hnode)
+static void *nrs_tbf_jobid_hop_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct nrs_tbf_client, tc_hnode);
+ return hlist_entry(hnode, struct nrs_tbf_client, tc_hnode);
}
-static void nrs_tbf_jobid_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_tbf_jobid_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_tbf_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
struct nrs_tbf_client,
tc_hnode);
atomic_inc(&cli->tc_ref);
}
-static void nrs_tbf_jobid_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_tbf_jobid_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_tbf_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
struct nrs_tbf_client,
tc_hnode);
atomic_dec(&cli->tc_ref);
}
-static void nrs_tbf_jobid_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_tbf_jobid_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_tbf_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
struct nrs_tbf_client,
tc_hnode);
cfs_hash_bd_t *bd,
const char *jobid)
{
- cfs_hlist_node_t *hnode;
+ struct hlist_node *hnode;
struct nrs_tbf_client *cli;
/* cfs_hash_bd_peek_locked is a somehow "internal" function
cfs_hash_get(hs, hnode);
cli = container_of0(hnode, struct nrs_tbf_client, tc_hnode);
- if (!cfs_list_empty(&cli->tc_lru))
- cfs_list_del_init(&cli->tc_lru);
+ if (!list_empty(&cli->tc_lru))
+ list_del_init(&cli->tc_lru);
return cli;
}
cfs_hash_t *hs = head->th_cli_hash;
struct nrs_tbf_bucket *bkt;
int hw;
- CFS_LIST_HEAD (zombies);
+ struct list_head zombies;
+ INIT_LIST_HEAD(&zombies);
cfs_hash_bd_get(hs, &cli->tc_jobid, &bd);
bkt = cfs_hash_bd_extra_get(hs, &bd);
if (!cfs_hash_bd_dec_and_lock(hs, &bd, &cli->tc_ref))
return;
- LASSERT(cfs_list_empty(&cli->tc_lru));
- cfs_list_add_tail(&cli->tc_lru, &bkt->ntb_lru);
+ LASSERT(list_empty(&cli->tc_lru));
+ list_add_tail(&cli->tc_lru, &bkt->ntb_lru);
/*
* Check and purge the LRU, there is at least one client in the LRU.
hw = tbf_jobid_cache_size >>
(hs->hs_cur_bits - hs->hs_bkt_bits);
while (cfs_hash_bd_count_get(&bd) > hw) {
- if (unlikely(cfs_list_empty(&bkt->ntb_lru)))
+ if (unlikely(list_empty(&bkt->ntb_lru)))
break;
- cli = cfs_list_entry(bkt->ntb_lru.next,
+ cli = list_entry(bkt->ntb_lru.next,
struct nrs_tbf_client,
tc_lru);
LASSERT(atomic_read(&cli->tc_ref) == 0);
cfs_hash_bd_del_locked(hs, &bd, &cli->tc_hnode);
- cfs_list_move(&cli->tc_lru, &zombies);
+ list_move(&cli->tc_lru, &zombies);
}
cfs_hash_bd_unlock(head->th_cli_hash, &bd, 1);
- while (!cfs_list_empty(&zombies)) {
+ while (!list_empty(&zombies)) {
cli = container_of0(zombies.next,
struct nrs_tbf_client, tc_lru);
- cfs_list_del_init(&cli->tc_lru);
+ list_del_init(&cli->tc_lru);
nrs_tbf_cli_fini(cli);
}
}
if (jobid == NULL)
jobid = NRS_TBF_JOBID_NULL;
LASSERT(strlen(jobid) < JOBSTATS_JOBID_SIZE);
- CFS_INIT_LIST_HEAD(&cli->tc_lru);
+ INIT_LIST_HEAD(&cli->tc_lru);
memcpy(cli->tc_jobid, jobid, strlen(jobid));
}
cfs_hash_for_each_bucket(head->th_cli_hash, &bd, i) {
bkt = cfs_hash_bd_extra_get(head->th_cli_hash, &bd);
- CFS_INIT_LIST_HEAD(&bkt->ntb_lru);
+ INIT_LIST_HEAD(&bkt->ntb_lru);
}
memset(&start, 0, sizeof(start));
start.tc_rpc_rate = tbf_rate;
start.tc_rule_flags = NTRS_DEFAULT;
start.tc_name = NRS_TBF_DEFAULT_RULE;
- CFS_INIT_LIST_HEAD(&start.tc_jobids);
+ INIT_LIST_HEAD(&start.tc_jobids);
rc = nrs_tbf_rule_start(policy, head, &start);
return rc;
*
*/
static void
-nrs_tbf_jobid_list_free(cfs_list_t *jobid_list)
+nrs_tbf_jobid_list_free(struct list_head *jobid_list)
{
struct nrs_tbf_jobid *jobid, *n;
- cfs_list_for_each_entry_safe(jobid, n, jobid_list, tj_linkage) {
+ list_for_each_entry_safe(jobid, n, jobid_list, tj_linkage) {
OBD_FREE(jobid->tj_id, strlen(jobid->tj_id) + 1);
- cfs_list_del(&jobid->tj_linkage);
+ list_del(&jobid->tj_linkage);
OBD_FREE(jobid, sizeof(struct nrs_tbf_jobid));
}
}
static int
-nrs_tbf_jobid_list_add(const struct cfs_lstr *id, cfs_list_t *jobid_list)
+nrs_tbf_jobid_list_add(const struct cfs_lstr *id, struct list_head *jobid_list)
{
struct nrs_tbf_jobid *jobid;
}
memcpy(jobid->tj_id, id->ls_str, id->ls_len);
- cfs_list_add_tail(&jobid->tj_linkage, jobid_list);
+ list_add_tail(&jobid->tj_linkage, jobid_list);
return 0;
}
static int
-nrs_tbf_jobid_list_match(cfs_list_t *jobid_list, char *id)
+nrs_tbf_jobid_list_match(struct list_head *jobid_list, char *id)
{
struct nrs_tbf_jobid *jobid;
- cfs_list_for_each_entry(jobid, jobid_list, tj_linkage) {
+ list_for_each_entry(jobid, jobid_list, tj_linkage) {
if (strcmp(id, jobid->tj_id) == 0)
return 1;
}
}
static int
-nrs_tbf_jobid_list_parse(char *str, int len, cfs_list_t *jobid_list)
+nrs_tbf_jobid_list_parse(char *str, int len, struct list_head *jobid_list)
{
struct cfs_lstr src;
struct cfs_lstr res;
src.ls_str = str;
src.ls_len = len;
- CFS_INIT_LIST_HEAD(jobid_list);
+ INIT_LIST_HEAD(jobid_list);
while (src.ls_str) {
rc = cfs_gettok(&src, ' ', &res);
if (rc == 0) {
static void nrs_tbf_jobid_cmd_fini(struct nrs_tbf_cmd *cmd)
{
- if (!cfs_list_empty(&cmd->tc_jobids))
+ if (!list_empty(&cmd->tc_jobids))
nrs_tbf_jobid_list_free(&cmd->tc_jobids);
if (cmd->tc_jobids_str)
OBD_FREE(cmd->tc_jobids_str, strlen(cmd->tc_jobids_str) + 1);
start->tc_jobids_str,
strlen(start->tc_jobids_str));
- CFS_INIT_LIST_HEAD(&rule->tr_jobids);
- if (!cfs_list_empty(&start->tc_jobids)) {
+ INIT_LIST_HEAD(&rule->tr_jobids);
+ if (!list_empty(&start->tc_jobids)) {
rc = nrs_tbf_jobid_list_parse(rule->tr_jobids_str,
strlen(rule->tr_jobids_str),
&rule->tr_jobids);
static void nrs_tbf_jobid_rule_fini(struct nrs_tbf_rule *rule)
{
- if (!cfs_list_empty(&rule->tr_jobids))
+ if (!list_empty(&rule->tr_jobids))
nrs_tbf_jobid_list_free(&rule->tr_jobids);
LASSERT(rule->tr_jobids_str != NULL);
OBD_FREE(rule->tr_jobids_str, strlen(rule->tr_jobids_str) + 1);
return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
}
-static int nrs_tbf_nid_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int nrs_tbf_nid_hop_keycmp(const void *key, struct hlist_node *hnode)
{
lnet_nid_t *nid = (lnet_nid_t *)key;
- struct nrs_tbf_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
struct nrs_tbf_client,
tc_hnode);
return *nid == cli->tc_nid;
}
-static void *nrs_tbf_nid_hop_key(cfs_hlist_node_t *hnode)
+static void *nrs_tbf_nid_hop_key(struct hlist_node *hnode)
{
- struct nrs_tbf_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
struct nrs_tbf_client,
tc_hnode);
return &cli->tc_nid;
}
-static void *nrs_tbf_nid_hop_object(cfs_hlist_node_t *hnode)
+static void *nrs_tbf_nid_hop_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct nrs_tbf_client, tc_hnode);
+ return hlist_entry(hnode, struct nrs_tbf_client, tc_hnode);
}
-static void nrs_tbf_nid_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_tbf_nid_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_tbf_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
struct nrs_tbf_client,
tc_hnode);
atomic_inc(&cli->tc_ref);
}
-static void nrs_tbf_nid_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_tbf_nid_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_tbf_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
struct nrs_tbf_client,
tc_hnode);
atomic_dec(&cli->tc_ref);
}
-static void nrs_tbf_nid_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_tbf_nid_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_tbf_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
struct nrs_tbf_client,
tc_hnode);
start.tc_rpc_rate = tbf_rate;
start.tc_rule_flags = NTRS_DEFAULT;
start.tc_name = NRS_TBF_DEFAULT_RULE;
- CFS_INIT_LIST_HEAD(&start.tc_nids);
+ INIT_LIST_HEAD(&start.tc_nids);
rc = nrs_tbf_rule_start(policy, head, &start);
return rc;
start->tc_nids_str,
strlen(start->tc_nids_str));
- CFS_INIT_LIST_HEAD(&rule->tr_nids);
- if (!cfs_list_empty(&start->tc_nids)) {
+ INIT_LIST_HEAD(&rule->tr_nids);
+ if (!list_empty(&start->tc_nids)) {
if (cfs_parse_nidlist(rule->tr_nids_str,
strlen(rule->tr_nids_str),
&rule->tr_nids) <= 0) {
static void nrs_tbf_nid_rule_fini(struct nrs_tbf_rule *rule)
{
- if (!cfs_list_empty(&rule->tr_nids))
+ if (!list_empty(&rule->tr_nids))
cfs_free_nidlist(&rule->tr_nids);
LASSERT(rule->tr_nids_str != NULL);
OBD_FREE(rule->tr_nids_str, strlen(rule->tr_nids_str) + 1);
static void nrs_tbf_nid_cmd_fini(struct nrs_tbf_cmd *cmd)
{
- if (!cfs_list_empty(&cmd->tc_nids))
+ if (!list_empty(&cmd->tc_nids))
cfs_free_nidlist(&cmd->tc_nids);
if (cmd->tc_nids_str)
OBD_FREE(cmd->tc_nids_str, strlen(cmd->tc_nids_str) + 1);
atomic_set(&head->th_rule_sequence, 0);
spin_lock_init(&head->th_rule_lock);
- CFS_INIT_LIST_HEAD(&head->th_list);
+ INIT_LIST_HEAD(&head->th_list);
hrtimer_init(&head->th_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
head->th_timer.function = nrs_tbf_timer_cb;
rc = head->th_ops->o_startup(policy, head);
hrtimer_cancel(&head->th_timer);
/* Should cleanup hash first before free rules */
cfs_hash_putref(head->th_cli_hash);
- cfs_list_for_each_entry_safe(rule, n, &head->th_list, tr_linkage) {
- cfs_list_del_init(&rule->tr_linkage);
+ list_for_each_entry_safe(rule, n, &head->th_list, tr_linkage) {
+ list_del_init(&rule->tr_linkage);
nrs_tbf_rule_put(rule);
}
- LASSERT(cfs_list_empty(&head->th_list));
+ LASSERT(list_empty(&head->th_list));
LASSERT(head->th_binheap != NULL);
LASSERT(cfs_binheap_is_empty(head->th_binheap));
cfs_binheap_destroy(head->th_binheap);
cli = container_of(node, struct nrs_tbf_client, tc_node);
LASSERT(cli->tc_in_heap);
if (peek) {
- nrq = cfs_list_entry(cli->tc_list.next,
+ nrq = list_entry(cli->tc_list.next,
struct ptlrpc_nrs_request,
nr_u.tbf.tr_list);
} else {
ntoken = cli->tc_depth;
if (ntoken > 0) {
struct ptlrpc_request *req;
- nrq = cfs_list_entry(cli->tc_list.next,
+ nrq = list_entry(cli->tc_list.next,
struct ptlrpc_nrs_request,
nr_u.tbf.tr_list);
req = container_of(nrq,
ntoken--;
cli->tc_ntoken = ntoken;
cli->tc_check_time = now;
- cfs_list_del_init(&nrq->nr_u.tbf.tr_list);
- if (cfs_list_empty(&cli->tc_list)) {
+ list_del_init(&nrq->nr_u.tbf.tr_list);
+ if (list_empty(&cli->tc_list)) {
cfs_binheap_remove(head->th_binheap,
&cli->tc_node);
cli->tc_in_heap = false;
struct nrs_tbf_client, tc_res);
head = container_of(nrs_request_resource(nrq)->res_parent,
struct nrs_tbf_head, th_res);
- if (cfs_list_empty(&cli->tc_list)) {
+ if (list_empty(&cli->tc_list)) {
LASSERT(!cli->tc_in_heap);
rc = cfs_binheap_insert(head->th_binheap, &cli->tc_node);
if (rc == 0) {
cli->tc_in_heap = true;
nrq->nr_u.tbf.tr_sequence = head->th_sequence++;
- cfs_list_add_tail(&nrq->nr_u.tbf.tr_list,
+ list_add_tail(&nrq->nr_u.tbf.tr_list,
&cli->tc_list);
if (policy->pol_nrs->nrs_throttling) {
__u64 deadline = cli->tc_check_time +
} else {
LASSERT(cli->tc_in_heap);
nrq->nr_u.tbf.tr_sequence = head->th_sequence++;
- cfs_list_add_tail(&nrq->nr_u.tbf.tr_list,
+ list_add_tail(&nrq->nr_u.tbf.tr_list,
&cli->tc_list);
}
return rc;
head = container_of(nrs_request_resource(nrq)->res_parent,
struct nrs_tbf_head, th_res);
- LASSERT(!cfs_list_empty(&nrq->nr_u.tbf.tr_list));
- cfs_list_del_init(&nrq->nr_u.tbf.tr_list);
- if (cfs_list_empty(&cli->tc_list)) {
+ LASSERT(!list_empty(&nrq->nr_u.tbf.tr_list));
+ list_del_init(&nrq->nr_u.tbf.tr_list);
+ if (list_empty(&cli->tc_list)) {
cfs_binheap_remove(head->th_binheap,
&cli->tc_node);
cli->tc_in_heap = false;
EXPORT_SYMBOL(lustre_pack_request);
#if RS_DEBUG
-CFS_LIST_HEAD(ptlrpc_rs_debug_lru);
+struct list_head ptlrpc_rs_debug_lru =
+ LIST_HEAD_INIT(ptlrpc_rs_debug_lru);
spinlock_t ptlrpc_rs_debug_lock;
#define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
do { \
spin_lock(&ptlrpc_rs_debug_lock); \
- cfs_list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
+ list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
spin_unlock(&ptlrpc_rs_debug_lock); \
} while (0)
#define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
do { \
spin_lock(&ptlrpc_rs_debug_lock); \
- cfs_list_del(&(rs)->rs_debug_list); \
+ list_del(&(rs)->rs_debug_list); \
spin_unlock(&ptlrpc_rs_debug_lock); \
} while (0)
#else
spin_lock(&svcpt->scp_rep_lock);
/* See if we have anything in a pool, and wait if nothing */
- while (cfs_list_empty(&svcpt->scp_rep_idle)) {
+ while (list_empty(&svcpt->scp_rep_idle)) {
struct l_wait_info lwi;
int rc;
* bail out instead of waiting infinitely */
lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
rc = l_wait_event(svcpt->scp_rep_waitq,
- !cfs_list_empty(&svcpt->scp_rep_idle), &lwi);
+ !list_empty(&svcpt->scp_rep_idle), &lwi);
if (rc != 0)
goto out;
spin_lock(&svcpt->scp_rep_lock);
}
- rs = cfs_list_entry(svcpt->scp_rep_idle.next,
+ rs = list_entry(svcpt->scp_rep_idle.next,
struct ptlrpc_reply_state, rs_list);
- cfs_list_del(&rs->rs_list);
+ list_del(&rs->rs_list);
spin_unlock(&svcpt->scp_rep_lock);
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
spin_lock(&svcpt->scp_rep_lock);
- cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
+ list_add(&rs->rs_list, &svcpt->scp_rep_idle);
spin_unlock(&svcpt->scp_rep_lock);
wake_up(&svcpt->scp_rep_waitq);
}
rs->rs_cb_id.cbid_fn = reply_out_callback;
rs->rs_cb_id.cbid_arg = rs;
rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
- CFS_INIT_LIST_HEAD(&rs->rs_exp_list);
- CFS_INIT_LIST_HEAD(&rs->rs_obd_list);
- CFS_INIT_LIST_HEAD(&rs->rs_list);
+ INIT_LIST_HEAD(&rs->rs_exp_list);
+ INIT_LIST_HEAD(&rs->rs_obd_list);
+ INIT_LIST_HEAD(&rs->rs_list);
spin_lock_init(&rs->rs_lock);
req->rq_replen = msg_len;
LASSERT(!rs->rs_scheduled);
LASSERT(rs->rs_export == NULL);
LASSERT(rs->rs_nlocks == 0);
- LASSERT(cfs_list_empty(&rs->rs_exp_list));
- LASSERT(cfs_list_empty(&rs->rs_obd_list));
+ LASSERT(list_empty(&rs->rs_exp_list));
+ LASSERT(list_empty(&rs->rs_obd_list));
sptlrpc_svc_free_rs(rs);
}
CFS_MODULE_PARM(suppress_pings, "i", int, 0644, "Suppress pings");
struct mutex pinger_mutex;
-static CFS_LIST_HEAD(pinger_imports);
-static cfs_list_t timeout_list = CFS_LIST_HEAD_INIT(timeout_list);
+static struct list_head pinger_imports =
+ LIST_HEAD_INIT(pinger_imports);
+static struct list_head timeout_list =
+ LIST_HEAD_INIT(timeout_list);
int ptlrpc_pinger_suppress_pings()
{
/* The timeout list is a increase order sorted list */
mutex_lock(&pinger_mutex);
- cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
+ list_for_each_entry(item, &timeout_list, ti_chain) {
int ti_timeout = item->ti_timeout;
if (timeout > ti_timeout)
timeout = ti_timeout;
struct l_wait_info lwi;
cfs_duration_t time_to_next_wake;
struct timeout_item *item;
- cfs_list_t *iter;
+ struct list_head *iter;
mutex_lock(&pinger_mutex);
- cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
+ list_for_each_entry(item, &timeout_list, ti_chain)
item->ti_cb(item, item->ti_cb_data);
- }
- cfs_list_for_each(iter, &pinger_imports) {
- struct obd_import *imp =
- cfs_list_entry(iter, struct obd_import,
- imp_pinger_chain);
+
+ list_for_each(iter, &pinger_imports) {
+ struct obd_import *imp = list_entry(iter,
+ struct obd_import,
+ imp_pinger_chain);
ptlrpc_pinger_process_import(imp, this_ping);
/* obd_timeout might have changed */
int ptlrpc_pinger_add_import(struct obd_import *imp)
{
ENTRY;
- if (!cfs_list_empty(&imp->imp_pinger_chain))
+ if (!list_empty(&imp->imp_pinger_chain))
RETURN(-EALREADY);
mutex_lock(&pinger_mutex);
imp->imp_obd->obd_no_recov = 0;
ptlrpc_update_next_ping(imp, 0);
/* XXX sort, blah blah */
- cfs_list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
+ list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
class_import_get(imp);
ptlrpc_pinger_wake_up();
int ptlrpc_pinger_del_import(struct obd_import *imp)
{
- ENTRY;
- if (cfs_list_empty(&imp->imp_pinger_chain))
- RETURN(-ENOENT);
+ ENTRY;
+
+ if (list_empty(&imp->imp_pinger_chain))
+ RETURN(-ENOENT);
mutex_lock(&pinger_mutex);
- cfs_list_del_init(&imp->imp_pinger_chain);
- CDEBUG(D_HA, "removing pingable import %s->%s\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
- /* if we remove from pinger we don't want recovery on this import */
- imp->imp_obd->obd_no_recov = 1;
- class_import_put(imp);
+ list_del_init(&imp->imp_pinger_chain);
+ CDEBUG(D_HA, "removing pingable import %s->%s\n",
+ imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
+ /* if we remove from pinger we don't want recovery on this import */
+ imp->imp_obd->obd_no_recov = 1;
+ class_import_put(imp);
mutex_unlock(&pinger_mutex);
- RETURN(0);
+ RETURN(0);
}
EXPORT_SYMBOL(ptlrpc_pinger_del_import);
if (!ti)
return(NULL);
- CFS_INIT_LIST_HEAD(&ti->ti_obd_list);
- CFS_INIT_LIST_HEAD(&ti->ti_chain);
+ INIT_LIST_HEAD(&ti->ti_obd_list);
+ INIT_LIST_HEAD(&ti->ti_chain);
ti->ti_timeout = time;
ti->ti_event = event;
ti->ti_cb = cb;
LASSERT(mutex_is_locked(&pinger_mutex));
- cfs_list_for_each_entry(item, &timeout_list, ti_chain)
+ list_for_each_entry(item, &timeout_list, ti_chain)
if (item->ti_event == event)
goto out;
- item = ptlrpc_new_timeout(time, event, cb, data);
- if (item) {
- cfs_list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) {
- if (tmp->ti_timeout < time) {
- cfs_list_add(&item->ti_chain, &tmp->ti_chain);
- goto out;
- }
- }
- cfs_list_add(&item->ti_chain, &timeout_list);
- }
+ item = ptlrpc_new_timeout(time, event, cb, data);
+ if (item) {
+ list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) {
+ if (tmp->ti_timeout < time) {
+ list_add(&item->ti_chain, &tmp->ti_chain);
+ goto out;
+ }
+ }
+ list_add(&item->ti_chain, &timeout_list);
+ }
out:
- return item;
+ return item;
}
/* Add a client_obd to the timeout event list, when timeout(@time)
*/
int ptlrpc_add_timeout_client(int time, enum timeout_event event,
timeout_cb_t cb, void *data,
- cfs_list_t *obd_list)
+ struct list_head *obd_list)
{
struct timeout_item *ti;
mutex_unlock(&pinger_mutex);
return (-EINVAL);
}
- cfs_list_add(obd_list, &ti->ti_obd_list);
+ list_add(obd_list, &ti->ti_obd_list);
mutex_unlock(&pinger_mutex);
return 0;
}
EXPORT_SYMBOL(ptlrpc_add_timeout_client);
-int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
- enum timeout_event event)
+int ptlrpc_del_timeout_client(struct list_head *obd_list,
+ enum timeout_event event)
{
- struct timeout_item *ti = NULL, *item;
+ struct timeout_item *ti = NULL, *item;
- if (cfs_list_empty(obd_list))
- return 0;
+ if (list_empty(obd_list))
+ return 0;
mutex_lock(&pinger_mutex);
- cfs_list_del_init(obd_list);
- /**
- * If there are no obd attached to the timeout event
- * list, remove this timeout event from the pinger
- */
- cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
- if (item->ti_event == event) {
- ti = item;
- break;
- }
- }
- LASSERTF(ti != NULL, "ti is NULL ! \n");
- if (cfs_list_empty(&ti->ti_obd_list)) {
- cfs_list_del(&ti->ti_chain);
- OBD_FREE_PTR(ti);
- }
+ list_del_init(obd_list);
+ /**
+ * If there are no obd attached to the timeout event
+ * list, remove this timeout event from the pinger
+ */
+ list_for_each_entry(item, &timeout_list, ti_chain) {
+ if (item->ti_event == event) {
+ ti = item;
+ break;
+ }
+ }
+ LASSERTF(ti != NULL, "ti is NULL !\n");
+ if (list_empty(&ti->ti_obd_list)) {
+ list_del(&ti->ti_chain);
+ OBD_FREE_PTR(ti);
+ }
mutex_unlock(&pinger_mutex);
- return 0;
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_del_timeout_client);
struct timeout_item *item, *tmp;
mutex_lock(&pinger_mutex);
- cfs_list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
- LASSERT(cfs_list_empty(&item->ti_obd_list));
- cfs_list_del(&item->ti_chain);
+ list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
+ LASSERT(list_empty(&item->ti_obd_list));
+ list_del(&item->ti_chain);
OBD_FREE_PTR(item);
}
mutex_unlock(&pinger_mutex);
static int pet_refcount = 0;
static int pet_state;
-static wait_queue_head_t pet_waitq;
-CFS_LIST_HEAD(pet_list);
+static wait_queue_head_t pet_waitq;
+struct list_head pet_list;
static DEFINE_SPINLOCK(pet_lock);
int ping_evictor_wake(struct obd_export *exp)
}
obd = class_exp2obd(exp);
- if (cfs_list_empty(&obd->obd_evict_list)) {
+ if (list_empty(&obd->obd_evict_list)) {
class_incref(obd, "evictor", obd);
- cfs_list_add(&obd->obd_evict_list, &pet_list);
+ list_add(&obd->obd_evict_list, &pet_list);
}
spin_unlock(&pet_lock);
unshare_fs_struct();
- CDEBUG(D_HA, "Starting Ping Evictor\n");
- pet_state = PET_READY;
- while (1) {
- l_wait_event(pet_waitq, (!cfs_list_empty(&pet_list)) ||
- (pet_state == PET_TERMINATE), &lwi);
+ CDEBUG(D_HA, "Starting Ping Evictor\n");
+ pet_state = PET_READY;
+ while (1) {
+ l_wait_event(pet_waitq, (!list_empty(&pet_list)) ||
+ (pet_state == PET_TERMINATE), &lwi);
- /* loop until all obd's will be removed */
- if ((pet_state == PET_TERMINATE) && cfs_list_empty(&pet_list))
- break;
+ /* loop until all obd's will be removed */
+ if ((pet_state == PET_TERMINATE) && list_empty(&pet_list))
+ break;
- /* we only get here if pet_exp != NULL, and the end of this
- * loop is the only place which sets it NULL again, so lock
- * is not strictly necessary. */
+ /* we only get here if pet_exp != NULL, and the end of this
+ * loop is the only place which sets it NULL again, so lock
+ * is not strictly necessary. */
spin_lock(&pet_lock);
- obd = cfs_list_entry(pet_list.next, struct obd_device,
- obd_evict_list);
+ obd = list_entry(pet_list.next, struct obd_device,
+ obd_evict_list);
spin_unlock(&pet_lock);
- expire_time = cfs_time_current_sec() - PING_EVICT_TIMEOUT;
+ expire_time = cfs_time_current_sec() - PING_EVICT_TIMEOUT;
- CDEBUG(D_HA, "evicting all exports of obd %s older than %ld\n",
- obd->obd_name, expire_time);
+ CDEBUG(D_HA, "evicting all exports of obd %s older than %ld\n",
+ obd->obd_name, expire_time);
- /* Exports can't be deleted out of the list while we hold
- * the obd lock (class_unlink_export), which means we can't
- * lose the last ref on the export. If they've already been
- * removed from the list, we won't find them here. */
+ /* Exports can't be deleted out of the list while we hold
+ * the obd lock (class_unlink_export), which means we can't
+ * lose the last ref on the export. If they've already been
+ * removed from the list, we won't find them here. */
spin_lock(&obd->obd_dev_lock);
- while (!cfs_list_empty(&obd->obd_exports_timed)) {
- exp = cfs_list_entry(obd->obd_exports_timed.next,
- struct obd_export,
- exp_obd_chain_timed);
+ while (!list_empty(&obd->obd_exports_timed)) {
+ exp = list_entry(obd->obd_exports_timed.next,
+ struct obd_export,
+ exp_obd_chain_timed);
if (expire_time > exp->exp_last_request_time) {
class_export_get(exp);
spin_unlock(&obd->obd_dev_lock);
spin_unlock(&obd->obd_dev_lock);
spin_lock(&pet_lock);
- cfs_list_del_init(&obd->obd_evict_list);
+ list_del_init(&obd->obd_evict_list);
spin_unlock(&pet_lock);
class_decref(obd, "evictor", obd);
if (++pet_refcount > 1)
return;
+ INIT_LIST_HEAD(&pet_list);
init_waitqueue_head(&pet_waitq);
task = kthread_run(ping_evictor_main, NULL, "ll_evictor");
cfs_time_t curtime = cfs_time_current();
struct ptlrpc_request *req;
struct ptlrpc_request_set *set;
- cfs_list_t *iter;
+ struct list_head *iter;
struct obd_import *imp;
struct pinger_data *pd = &pinger_args;
int rc;
/* add rpcs into set */
mutex_lock(&pinger_mutex);
- cfs_list_for_each(iter, &pinger_imports) {
- struct obd_import *imp = cfs_list_entry(iter, struct obd_import,
+ list_for_each(iter, &pinger_imports) {
+ struct obd_import *imp = list_entry(iter, struct obd_import,
imp_pinger_chain);
int generation, level;
if (atomic_read(&set->set_remaining) == 0)
CDEBUG(D_RPCTRACE, "nothing to ping\n");
- cfs_list_for_each(iter, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(iter, struct ptlrpc_request,
- rq_set_chain);
- DEBUG_REQ(D_RPCTRACE, req, "pinging %s->%s",
- req->rq_import->imp_obd->obd_uuid.uuid,
- obd2cli_tgt(req->rq_import->imp_obd));
- (void)ptl_send_rpc(req, 0);
- }
+ list_for_each(iter, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(iter, struct ptlrpc_request,
+ rq_set_chain);
+ DEBUG_REQ(D_RPCTRACE, req, "pinging %s->%s",
+ req->rq_import->imp_obd->obd_uuid.uuid,
+ obd2cli_tgt(req->rq_import->imp_obd));
+ (void)ptl_send_rpc(req, 0);
+ }
do_check_set:
rc = ptlrpc_check_set(NULL, set);
return 0;
}
- /* Expire all the requests that didn't come back. */
+ /* Expire all the requests that didn't come back. */
mutex_lock(&pinger_mutex);
- cfs_list_for_each(iter, &set->set_requests) {
- req = cfs_list_entry(iter, struct ptlrpc_request,
- rq_set_chain);
+ list_for_each(iter, &set->set_requests) {
+ req = list_entry(iter, struct ptlrpc_request,
+ rq_set_chain);
if (req->rq_phase == RQ_PHASE_COMPLETE)
continue;
/* This will also unregister reply. */
ptlrpc_expire_one_request(req, 0);
- /* We're done with this req, let's finally move it to complete
- * phase and take care of inflights. */
- ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
- imp = req->rq_import;
+ /* We're done with this req, let's finally move it to complete
+ * phase and take care of inflights. */
+ ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
+ imp = req->rq_import;
spin_lock(&imp->imp_lock);
- if (!cfs_list_empty(&req->rq_list)) {
- cfs_list_del_init(&req->rq_list);
+ if (!list_empty(&req->rq_list)) {
+ list_del_init(&req->rq_list);
atomic_dec(&imp->imp_inflight);
}
spin_unlock(&imp->imp_lock);
}
int ptlrpc_add_timeout_client(int time, enum timeout_event event,
- timeout_cb_t cb, void *data,
- cfs_list_t *obd_list)
+ timeout_cb_t cb, void *data,
+ struct list_head *obd_list)
{
- return 0;
+ return 0;
}
-int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
- enum timeout_event event)
+int ptlrpc_del_timeout_client(struct list_head *obd_list,
+ enum timeout_event event)
{
- return 0;
+ return 0;
}
int ptlrpc_pinger_add_import(struct obd_import *imp)
{
- ENTRY;
- if (!cfs_list_empty(&imp->imp_pinger_chain))
- RETURN(-EALREADY);
+ ENTRY;
- CDEBUG(D_HA, "adding pingable import %s->%s\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
- ptlrpc_pinger_sending_on_import(imp);
+ if (!list_empty(&imp->imp_pinger_chain))
+ RETURN(-EALREADY);
+
+ CDEBUG(D_HA, "adding pingable import %s->%s\n",
+ imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
+ ptlrpc_pinger_sending_on_import(imp);
mutex_lock(&pinger_mutex);
- cfs_list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
- class_import_get(imp);
+ list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
+ class_import_get(imp);
mutex_unlock(&pinger_mutex);
- RETURN(0);
+ RETURN(0);
}
int ptlrpc_pinger_del_import(struct obd_import *imp)
{
- ENTRY;
- if (cfs_list_empty(&imp->imp_pinger_chain))
- RETURN(-ENOENT);
+ ENTRY;
+
+ if (list_empty(&imp->imp_pinger_chain))
+ RETURN(-ENOENT);
mutex_lock(&pinger_mutex);
- cfs_list_del_init(&imp->imp_pinger_chain);
- CDEBUG(D_HA, "removing pingable import %s->%s\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
- class_import_put(imp);
+ list_del_init(&imp->imp_pinger_chain);
+ CDEBUG(D_HA, "removing pingable import %s->%s\n",
+ imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
+ class_import_put(imp);
mutex_unlock(&pinger_mutex);
- RETURN(0);
+ RETURN(0);
}
void ptlrpc_pinger_wake_up()
/* XXX force pinger to run, if needed */
struct obd_import *imp;
ENTRY;
- cfs_list_for_each_entry(imp, &pinger_imports, imp_pinger_chain) {
+ list_for_each_entry(imp, &pinger_imports, imp_pinger_chain) {
CDEBUG(D_RPCTRACE, "checking import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
#ifdef ENABLE_LIBLUSTRE_RECOVERY
struct ldlm_res_id;
struct ptlrpc_request_set;
extern int test_req_buffer_pressure;
+extern struct list_head ptlrpc_all_services;
extern struct mutex ptlrpc_all_services_mutex;
int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait);
#if RS_DEBUG
spin_lock_init(&ptlrpc_rs_debug_lock);
#endif
+ INIT_LIST_HEAD(&ptlrpc_all_services);
mutex_init(&ptlrpc_all_services_mutex);
mutex_init(&pinger_mutex);
mutex_init(&ptlrpcd_mutex);
*/
void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp, *pos;
+ struct list_head *tmp, *pos;
#ifdef __KERNEL__
struct ptlrpcd_ctl *pc;
struct ptlrpc_request_set *new;
new = pc->pc_set;
#endif
- cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
+ list_for_each_safe(pos, tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
LASSERT(req->rq_phase == RQ_PHASE_NEW);
#ifdef __KERNEL__
req->rq_set = new;
req->rq_queued_time = cfs_time_current();
#else
- cfs_list_del_init(&req->rq_set_chain);
+ list_del_init(&req->rq_set_chain);
req->rq_set = NULL;
ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
atomic_dec(&set->set_remaining);
#ifdef __KERNEL__
spin_lock(&new->set_new_req_lock);
- cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
+ list_splice_init(&set->set_requests, &new->set_new_requests);
i = atomic_read(&set->set_remaining);
count = atomic_add_return(i, &new->set_new_count);
atomic_set(&set->set_remaining, 0);
static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
struct ptlrpc_request_set *src)
{
- cfs_list_t *tmp, *pos;
- struct ptlrpc_request *req;
- int rc = 0;
+ struct list_head *tmp, *pos;
+ struct ptlrpc_request *req;
+ int rc = 0;
spin_lock(&src->set_new_req_lock);
- if (likely(!cfs_list_empty(&src->set_new_requests))) {
- cfs_list_for_each_safe(pos, tmp, &src->set_new_requests) {
- req = cfs_list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
- req->rq_set = des;
- }
- cfs_list_splice_init(&src->set_new_requests,
- &des->set_requests);
+ if (likely(!list_empty(&src->set_new_requests))) {
+ list_for_each_safe(pos, tmp, &src->set_new_requests) {
+ req = list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
+ req->rq_set = des;
+ }
+ list_splice_init(&src->set_new_requests,
+ &des->set_requests);
rc = atomic_read(&src->set_new_count);
atomic_add(rc, &des->set_remaining);
atomic_set(&src->set_new_count, 0);
*/
static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
{
- cfs_list_t *tmp, *pos;
+ struct list_head *tmp, *pos;
struct ptlrpc_request *req;
struct ptlrpc_request_set *set = pc->pc_set;
int rc = 0;
if (atomic_read(&set->set_new_count)) {
spin_lock(&set->set_new_req_lock);
- if (likely(!cfs_list_empty(&set->set_new_requests))) {
- cfs_list_splice_init(&set->set_new_requests,
+ if (likely(!list_empty(&set->set_new_requests))) {
+ list_splice_init(&set->set_new_requests,
&set->set_requests);
atomic_add(atomic_read(&set->set_new_count),
&set->set_remaining);
if (atomic_read(&set->set_remaining))
rc |= ptlrpc_check_set(env, set);
- if (!cfs_list_empty(&set->set_requests)) {
- /*
- * XXX: our set never completes, so we prune the completed
- * reqs after each iteration. boy could this be smarter.
- */
- cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
- req = cfs_list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
- if (req->rq_phase != RQ_PHASE_COMPLETE)
- continue;
-
- cfs_list_del_init(&req->rq_set_chain);
- req->rq_set = NULL;
- ptlrpc_req_finished(req);
- }
- }
+ if (!list_empty(&set->set_requests)) {
+ /*
+ * XXX: our set never completes, so we prune the completed
+ * reqs after each iteration. boy could this be smarter.
+ */
+ list_for_each_safe(pos, tmp, &set->set_requests) {
+ req = list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
+ if (req->rq_phase != RQ_PHASE_COMPLETE)
+ continue;
+
+ list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+ ptlrpc_req_finished(req);
+ }
+ }
if (rc == 0) {
/*
/*
* Wait for inflight requests to drain.
*/
- if (!cfs_list_empty(&set->set_requests))
+ if (!list_empty(&set->set_requests))
ptlrpc_set_wait(set);
lu_context_fini(&env.le_ctx);
lu_context_fini(env.le_ses);
int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
{
int rc = 0;
- cfs_list_t *tmp, *pos;
+ struct list_head *tmp, *pos;
struct ptlrpc_request *req = NULL;
__u64 last_transno;
ENTRY;
*/
/* Replay all the committed open requests on committed_list first */
- if (!cfs_list_empty(&imp->imp_committed_list)) {
+ if (!list_empty(&imp->imp_committed_list)) {
tmp = imp->imp_committed_list.prev;
- req = cfs_list_entry(tmp, struct ptlrpc_request,
+ req = list_entry(tmp, struct ptlrpc_request,
rq_replay_list);
/* The last request on committed_list hasn't been replayed */
while (imp->imp_replay_cursor !=
&imp->imp_committed_list) {
- req = cfs_list_entry(imp->imp_replay_cursor,
+ req = list_entry(imp->imp_replay_cursor,
struct ptlrpc_request,
rq_replay_list);
if (req->rq_transno > last_transno)
/* All the requests in committed list have been replayed, let's replay
* the imp_replay_list */
if (req == NULL) {
- cfs_list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
+ list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
+ req = list_entry(tmp, struct ptlrpc_request,
rq_replay_list);
if (req->rq_transno > last_transno)
RETURN(-1);
}
- cfs_list_for_each_entry_safe(req, next, &imp->imp_sending_list,
+ list_for_each_entry_safe(req, next, &imp->imp_sending_list,
rq_list) {
LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
"req %p bad\n", req);
*/
void ptlrpc_wake_delayed(struct obd_import *imp)
{
- cfs_list_t *tmp, *pos;
+ struct list_head *tmp, *pos;
struct ptlrpc_request *req;
spin_lock(&imp->imp_lock);
- cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_list);
DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
ptlrpc_client_wake_req(req);
struct ptlrpc_request *req, *next;
spin_lock(&ctx->cc_lock);
- cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
+ list_for_each_entry_safe(req, next, &ctx->cc_req_list,
rq_ctx_chain) {
- cfs_list_del_init(&req->rq_ctx_chain);
+ list_del_init(&req->rq_ctx_chain);
ptlrpc_client_wake_req(req);
}
spin_unlock(&ctx->cc_lock);
/* request might be asked to release earlier while still
* in the context waiting list.
*/
- if (!cfs_list_empty(&req->rq_ctx_chain)) {
+ if (!list_empty(&req->rq_ctx_chain)) {
spin_lock(&req->rq_cli_ctx->cc_lock);
- cfs_list_del_init(&req->rq_ctx_chain);
+ list_del_init(&req->rq_ctx_chain);
spin_unlock(&req->rq_cli_ctx->cc_lock);
}
void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
{
spin_lock(&ctx->cc_lock);
- if (!cfs_list_empty(&req->rq_ctx_chain))
- cfs_list_del_init(&req->rq_ctx_chain);
+ if (!list_empty(&req->rq_ctx_chain))
+ list_del_init(&req->rq_ctx_chain);
spin_unlock(&ctx->cc_lock);
}
* waiting list
*/
spin_lock(&ctx->cc_lock);
- if (cfs_list_empty(&req->rq_ctx_chain))
- cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
+ if (list_empty(&req->rq_ctx_chain))
+ list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
spin_unlock(&ctx->cc_lock);
if (timeout < 0)
spin_lock_init(&req->rq_lock);
atomic_set(&req->rq_refcount, 10000);
- CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
+ INIT_LIST_HEAD(&req->rq_ctx_chain);
init_waitqueue_head(&req->rq_reply_waitq);
init_waitqueue_head(&req->rq_set_waitq);
req->rq_import = imp;
req->rq_cli_ctx = ctx;
rc = sptlrpc_req_refresh_ctx(req, 0);
- LASSERT(cfs_list_empty(&req->rq_ctx_chain));
+ LASSERT(list_empty(&req->rq_ctx_chain));
sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
ptlrpc_request_cache_free(req);
spin_lock(&obd->obd_dev_lock);
- cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+ list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
if (exp->exp_connection == NULL)
continue;
**********************************/
struct sptlrpc_conf_tgt {
- cfs_list_t sct_list;
+ struct list_head sct_list;
char sct_name[MAX_OBD_NAME];
struct sptlrpc_rule_set sct_rset;
};
struct sptlrpc_conf {
- cfs_list_t sc_list;
- char sc_fsname[MTI_NAME_MAXLEN];
- unsigned int sc_modified; /* modified during updating */
- unsigned int sc_updated:1, /* updated copy from MGS */
- sc_local:1; /* local copy from target */
- struct sptlrpc_rule_set sc_rset; /* fs general rules */
- cfs_list_t sc_tgts; /* target-specific rules */
+ struct list_head sc_list;
+ char sc_fsname[MTI_NAME_MAXLEN];
+ unsigned int sc_modified; /* modified during updating */
+ unsigned int sc_updated:1, /* updated copy from MGS */
+ sc_local:1; /* local copy from target */
+ struct sptlrpc_rule_set sc_rset; /* fs general rules */
+ struct list_head sc_tgts; /* target-specific rules */
};
static struct mutex sptlrpc_conf_lock;
-static CFS_LIST_HEAD(sptlrpc_confs);
+static struct list_head sptlrpc_confs;
static inline int is_hex(char c)
{
static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf)
{
- struct sptlrpc_conf_tgt *conf_tgt, *conf_tgt_next;
+ struct sptlrpc_conf_tgt *conf_tgt, *conf_tgt_next;
- sptlrpc_rule_set_free(&conf->sc_rset);
+ sptlrpc_rule_set_free(&conf->sc_rset);
- cfs_list_for_each_entry_safe(conf_tgt, conf_tgt_next,
- &conf->sc_tgts, sct_list) {
- sptlrpc_rule_set_free(&conf_tgt->sct_rset);
- cfs_list_del(&conf_tgt->sct_list);
- OBD_FREE_PTR(conf_tgt);
- }
- LASSERT(cfs_list_empty(&conf->sc_tgts));
+ list_for_each_entry_safe(conf_tgt, conf_tgt_next,
+ &conf->sc_tgts, sct_list) {
+ sptlrpc_rule_set_free(&conf_tgt->sct_rset);
+ list_del(&conf_tgt->sct_list);
+ OBD_FREE_PTR(conf_tgt);
+ }
+ LASSERT(list_empty(&conf->sc_tgts));
- conf->sc_updated = 0;
- conf->sc_local = 0;
+ conf->sc_updated = 0;
+ conf->sc_local = 0;
}
static void sptlrpc_conf_free(struct sptlrpc_conf *conf)
{
- CDEBUG(D_SEC, "free sptlrpc conf %s\n", conf->sc_fsname);
+ CDEBUG(D_SEC, "free sptlrpc conf %s\n", conf->sc_fsname);
- sptlrpc_conf_free_rsets(conf);
- cfs_list_del(&conf->sc_list);
- OBD_FREE_PTR(conf);
+ sptlrpc_conf_free_rsets(conf);
+ list_del(&conf->sc_list);
+ OBD_FREE_PTR(conf);
}
static
{
struct sptlrpc_conf_tgt *conf_tgt;
- cfs_list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
+ list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
if (strcmp(conf_tgt->sct_name, name) == 0)
return conf_tgt;
}
if (conf_tgt) {
strlcpy(conf_tgt->sct_name, name, sizeof(conf_tgt->sct_name));
sptlrpc_rule_set_init(&conf_tgt->sct_rset);
- cfs_list_add(&conf_tgt->sct_list, &conf->sc_tgts);
+ list_add(&conf_tgt->sct_list, &conf->sc_tgts);
}
return conf_tgt;
{
struct sptlrpc_conf *conf;
- cfs_list_for_each_entry(conf, &sptlrpc_confs, sc_list) {
+ list_for_each_entry(conf, &sptlrpc_confs, sc_list) {
if (strcmp(conf->sc_fsname, fsname) == 0)
return conf;
}
return NULL;
}
sptlrpc_rule_set_init(&conf->sc_rset);
- CFS_INIT_LIST_HEAD(&conf->sc_tgts);
- cfs_list_add(&conf->sc_list, &sptlrpc_confs);
+ INIT_LIST_HEAD(&conf->sc_tgts);
+ list_add(&conf->sc_list, &sptlrpc_confs);
CDEBUG(D_SEC, "create sptlrpc conf %s\n", conf->sc_fsname);
return conf;
sptlrpc_record_rule_set(llh, conf->sc_fsname, &conf->sc_rset);
- cfs_list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
+ list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
sptlrpc_record_rule_set(llh, conf_tgt->sct_name,
&conf_tgt->sct_rset);
}
int sptlrpc_conf_init(void)
{
+ INIT_LIST_HEAD(&sptlrpc_confs);
mutex_init(&sptlrpc_conf_lock);
- return 0;
+ return 0;
}
void sptlrpc_conf_fini(void)
{
- struct sptlrpc_conf *conf, *conf_next;
+ struct sptlrpc_conf *conf, *conf_next;
mutex_lock(&sptlrpc_conf_lock);
- cfs_list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) {
- sptlrpc_conf_free(conf);
- }
- LASSERT(cfs_list_empty(&sptlrpc_confs));
+ list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list)
+ sptlrpc_conf_free(conf);
+ LASSERT(list_empty(&sptlrpc_confs));
mutex_unlock(&sptlrpc_conf_lock);
}
#ifdef __KERNEL__
static struct mutex sec_gc_mutex;
-static CFS_LIST_HEAD(sec_gc_list);
static spinlock_t sec_gc_list_lock;
+static struct list_head sec_gc_list;
-static CFS_LIST_HEAD(sec_gc_ctx_list);
static spinlock_t sec_gc_ctx_list_lock;
+static struct list_head sec_gc_ctx_list;
static struct ptlrpc_thread sec_gc_thread;
static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
{
LASSERT(sec->ps_policy->sp_cops->gc_ctx);
LASSERT(sec->ps_gc_interval > 0);
- LASSERT(cfs_list_empty(&sec->ps_gc_list));
+ LASSERT(list_empty(&sec->ps_gc_list));
sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
spin_lock(&sec_gc_list_lock);
- cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
+ list_add_tail(&sec_gc_list, &sec->ps_gc_list);
spin_unlock(&sec_gc_list_lock);
CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
{
- if (cfs_list_empty(&sec->ps_gc_list))
+ if (list_empty(&sec->ps_gc_list))
return;
might_sleep();
atomic_inc(&sec_gc_wait_del);
spin_lock(&sec_gc_list_lock);
- cfs_list_del_init(&sec->ps_gc_list);
+ list_del_init(&sec->ps_gc_list);
spin_unlock(&sec_gc_list_lock);
/* barrier */
void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
+ LASSERT(list_empty(&ctx->cc_gc_chain));
CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
spin_lock(&sec_gc_ctx_list_lock);
- cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
+ list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
spin_unlock(&sec_gc_ctx_list_lock);
thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
spin_lock(&sec_gc_ctx_list_lock);
- while (!cfs_list_empty(&sec_gc_ctx_list)) {
- ctx = cfs_list_entry(sec_gc_ctx_list.next,
+ while (!list_empty(&sec_gc_ctx_list)) {
+ ctx = list_entry(sec_gc_ctx_list.next,
struct ptlrpc_cli_ctx, cc_gc_chain);
- cfs_list_del_init(&ctx->cc_gc_chain);
+ list_del_init(&ctx->cc_gc_chain);
spin_unlock(&sec_gc_ctx_list_lock);
LASSERT(ctx->cc_sec);
* another issue here is we wakeup as fixed interval instead of
* according to each sec's expiry time */
mutex_lock(&sec_gc_mutex);
- cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
+ list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
/* if someone is waiting to be deleted, let it
* proceed as soon as possible. */
if (atomic_read(&sec_gc_wait_del)) {
spin_lock_init(&sec_gc_list_lock);
spin_lock_init(&sec_gc_ctx_list_lock);
+ INIT_LIST_HEAD(&sec_gc_list);
+ INIT_LIST_HEAD(&sec_gc_ctx_list);
+
/* initialize thread control */
memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
init_waitqueue_head(&sec_gc_thread.t_ctl_waitq);
static void null_init_internal(void)
{
- static CFS_HLIST_HEAD(__list);
+ static HLIST_HEAD(__list);
null_sec.ps_policy = &null_policy;
atomic_set(&null_sec.ps_refcount, 1); /* always busy */
null_sec.ps_dying = 0;
spin_lock_init(&null_sec.ps_lock);
atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
- CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
+ INIT_LIST_HEAD(&null_sec.ps_gc_list);
null_sec.ps_gc_interval = 0;
null_sec.ps_gc_next = 0;
- cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list);
+ hlist_add_head(&null_cli_ctx.cc_cache, &__list);
atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */
null_cli_ctx.cc_sec = &null_sec;
null_cli_ctx.cc_ops = &null_ctx_ops;
PTLRPC_CTX_UPTODATE;
null_cli_ctx.cc_vcred.vc_uid = 0;
spin_lock_init(&null_cli_ctx.cc_lock);
- CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
- CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
+ INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
+ INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
}
int sptlrpc_null_init(void)
ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
ctx->cc_vcred.vc_uid = 0;
spin_lock_init(&ctx->cc_lock);
- CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
- CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
+ INIT_LIST_HEAD(&ctx->cc_req_list);
+ INIT_LIST_HEAD(&ctx->cc_gc_chain);
plsec->pls_ctx = ctx;
atomic_inc(&plsec->pls_base.ps_nctx);
sec->ps_import = class_import_get(imp);
sec->ps_flvr = *sf;
spin_lock_init(&sec->ps_lock);
- CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
+ INIT_LIST_HEAD(&sec->ps_gc_list);
sec->ps_gc_interval = 0;
sec->ps_gc_next = 0;
static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
/** Holds a list of all PTLRPC services */
-CFS_LIST_HEAD(ptlrpc_all_services);
+struct list_head ptlrpc_all_services;
/** Used to protect the \e ptlrpc_all_services list */
struct mutex ptlrpc_all_services_mutex;
rqbd->rqbd_refcount = 0;
rqbd->rqbd_cbid.cbid_fn = request_in_callback;
rqbd->rqbd_cbid.cbid_arg = rqbd;
- CFS_INIT_LIST_HEAD(&rqbd->rqbd_reqs);
+ INIT_LIST_HEAD(&rqbd->rqbd_reqs);
OBD_CPT_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_cptable,
svcpt->scp_cpt, svc->srv_buf_size);
if (rqbd->rqbd_buffer == NULL) {
}
spin_lock(&svcpt->scp_lock);
- cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
+ list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
svcpt->scp_nrqbds_total++;
spin_unlock(&svcpt->scp_lock);
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
LASSERT(rqbd->rqbd_refcount == 0);
- LASSERT(cfs_list_empty(&rqbd->rqbd_reqs));
+ LASSERT(list_empty(&rqbd->rqbd_reqs));
spin_lock(&svcpt->scp_lock);
- cfs_list_del(&rqbd->rqbd_list);
+ list_del(&rqbd->rqbd_list);
svcpt->scp_nrqbds_total--;
spin_unlock(&svcpt->scp_lock);
int hrt_id; /* thread ID */
spinlock_t hrt_lock;
wait_queue_head_t hrt_waitq;
- cfs_list_t hrt_queue; /* RS queue */
+ struct list_head hrt_queue; /* RS queue */
struct ptlrpc_hr_partition *hrt_partition;
};
};
struct rs_batch {
- cfs_list_t rsb_replies;
+ struct list_head rsb_replies;
unsigned int rsb_n_replies;
struct ptlrpc_service_part *rsb_svcpt;
};
*/
static void rs_batch_init(struct rs_batch *b)
{
- memset(b, 0, sizeof *b);
- CFS_INIT_LIST_HEAD(&b->rsb_replies);
+ memset(b, 0, sizeof *b);
+ INIT_LIST_HEAD(&b->rsb_replies);
}
/**
hrt = ptlrpc_hr_select(b->rsb_svcpt);
spin_lock(&hrt->hrt_lock);
- cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
+ list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
spin_unlock(&hrt->hrt_lock);
wake_up(&hrt->hrt_waitq);
spin_lock(&rs->rs_lock);
rs->rs_scheduled_ever = 1;
if (rs->rs_scheduled == 0) {
- cfs_list_move(&rs->rs_list, &b->rsb_replies);
+ list_move(&rs->rs_list, &b->rsb_replies);
rs->rs_scheduled = 1;
b->rsb_n_replies++;
}
struct ptlrpc_hr_thread *hrt;
ENTRY;
- LASSERT(cfs_list_empty(&rs->rs_list));
+ LASSERT(list_empty(&rs->rs_list));
hrt = ptlrpc_hr_select(rs->rs_svcpt);
spin_lock(&hrt->hrt_lock);
- cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
+ list_add_tail(&rs->rs_list, &hrt->hrt_queue);
spin_unlock(&hrt->hrt_lock);
wake_up(&hrt->hrt_waitq);
EXIT;
#else
- cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue);
+ list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue);
#endif
}
}
rs->rs_scheduled = 1;
- cfs_list_del_init(&rs->rs_list);
+ list_del_init(&rs->rs_list);
ptlrpc_dispatch_difficult_reply(rs);
EXIT;
}
/* CAVEAT EMPTOR: spinlock ordering!!! */
spin_lock(&exp->exp_uncommitted_replies_lock);
- cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
+ list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
rs_obd_list) {
LASSERT (rs->rs_difficult);
/* VBR: per-export last_committed */
LASSERT(rs->rs_export);
if (rs->rs_transno <= exp->exp_last_committed) {
- cfs_list_del_init(&rs->rs_obd_list);
+ list_del_init(&rs->rs_obd_list);
rs_batch_add(&batch, rs);
}
}
for (;;) {
spin_lock(&svcpt->scp_lock);
- if (cfs_list_empty(&svcpt->scp_rqbd_idle)) {
+ if (list_empty(&svcpt->scp_rqbd_idle)) {
spin_unlock(&svcpt->scp_lock);
return posted;
}
- rqbd = cfs_list_entry(svcpt->scp_rqbd_idle.next,
+ rqbd = list_entry(svcpt->scp_rqbd_idle.next,
struct ptlrpc_request_buffer_desc,
rqbd_list);
- cfs_list_del(&rqbd->rqbd_list);
+ list_del(&rqbd->rqbd_list);
/* assume we will post successfully */
svcpt->scp_nrqbds_posted++;
- cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
+ list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
spin_unlock(&svcpt->scp_lock);
spin_lock(&svcpt->scp_lock);
svcpt->scp_nrqbds_posted--;
- cfs_list_del(&rqbd->rqbd_list);
- cfs_list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
+ list_del(&rqbd->rqbd_list);
+ list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
/* Don't complain if no request buffers are posted right now; LNET
* won't drop requests because we set the portal lazy! */
int rc;
svcpt->scp_cpt = cpt;
- CFS_INIT_LIST_HEAD(&svcpt->scp_threads);
+ INIT_LIST_HEAD(&svcpt->scp_threads);
/* rqbd and incoming request queue */
spin_lock_init(&svcpt->scp_lock);
- CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
- CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
- CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
+ INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
+ INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
+ INIT_LIST_HEAD(&svcpt->scp_req_incoming);
init_waitqueue_head(&svcpt->scp_waitq);
/* history request & rqbd list */
- CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
- CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
+ INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
+ INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
/* acitve requests and hp requests */
spin_lock_init(&svcpt->scp_req_lock);
/* reply states */
spin_lock_init(&svcpt->scp_rep_lock);
- CFS_INIT_LIST_HEAD(&svcpt->scp_rep_active);
+ INIT_LIST_HEAD(&svcpt->scp_rep_active);
#ifndef __KERNEL__
- CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
+ INIT_LIST_HEAD(&svcpt->scp_rep_queue);
#endif
- CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
+ INIT_LIST_HEAD(&svcpt->scp_rep_idle);
init_waitqueue_head(&svcpt->scp_rep_waitq);
atomic_set(&svcpt->scp_nreps_difficult, 0);
/* allocate memory for scp_at_array (ptlrpc_at_array) */
OBD_CPT_ALLOC(array->paa_reqs_array,
- svc->srv_cptable, cpt, sizeof(cfs_list_t) * size);
+ svc->srv_cptable, cpt, sizeof(struct list_head) * size);
if (array->paa_reqs_array == NULL)
return -ENOMEM;
for (index = 0; index < size; index++)
- CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]);
+ INIT_LIST_HEAD(&array->paa_reqs_array[index]);
OBD_CPT_ALLOC(array->paa_reqs_count,
svc->srv_cptable, cpt, sizeof(__u32) * size);
if (array->paa_reqs_array != NULL) {
OBD_FREE(array->paa_reqs_array,
- sizeof(cfs_list_t) * array->paa_size);
+ sizeof(struct list_head) * array->paa_size);
array->paa_reqs_array = NULL;
}
spin_lock_init(&service->srv_lock);
service->srv_name = conf->psc_name;
service->srv_watchdog_factor = conf->psc_watchdog_factor;
- CFS_INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
+ INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
/* buffer configuration */
service->srv_nbuf_per_group = test_req_buffer_pressure ?
LASSERT(rc == 0);
mutex_lock(&ptlrpc_all_services_mutex);
- cfs_list_add (&service->srv_list, &ptlrpc_all_services);
+ list_add(&service->srv_list, &ptlrpc_all_services);
mutex_unlock(&ptlrpc_all_services_mutex);
if (proc_entry != NULL)
static void ptlrpc_server_free_request(struct ptlrpc_request *req)
{
LASSERT(atomic_read(&req->rq_refcount) == 0);
- LASSERT(cfs_list_empty(&req->rq_timed_list));
+ LASSERT(list_empty(&req->rq_timed_list));
/* DEBUG_REQ() assumes the reply state of a request with a valid
* ref will not be destroyed until that reference is dropped. */
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
int refcount;
- cfs_list_t *tmp;
- cfs_list_t *nxt;
+ struct list_head *tmp;
+ struct list_head *nxt;
if (!atomic_dec_and_test(&req->rq_refcount))
return;
spin_unlock(&svcpt->scp_at_lock);
}
- LASSERT(cfs_list_empty(&req->rq_timed_list));
+ LASSERT(list_empty(&req->rq_timed_list));
- /* finalize request */
- if (req->rq_export) {
- class_export_put(req->rq_export);
- req->rq_export = NULL;
- }
+ /* finalize request */
+ if (req->rq_export) {
+ class_export_put(req->rq_export);
+ req->rq_export = NULL;
+ }
spin_lock(&svcpt->scp_lock);
- cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs);
+ list_add(&req->rq_list, &rqbd->rqbd_reqs);
- refcount = --(rqbd->rqbd_refcount);
- if (refcount == 0) {
- /* request buffer is now idle: add to history */
- cfs_list_del(&rqbd->rqbd_list);
+ refcount = --(rqbd->rqbd_refcount);
+ if (refcount == 0) {
+ /* request buffer is now idle: add to history */
+ list_del(&rqbd->rqbd_list);
- cfs_list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
+ list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
svcpt->scp_hist_nrqbds++;
/* cull some history?
* I expect only about 1 or 2 rqbds need to be recycled here */
while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
- rqbd = cfs_list_entry(svcpt->scp_hist_rqbds.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ rqbd = list_entry(svcpt->scp_hist_rqbds.next,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
- cfs_list_del(&rqbd->rqbd_list);
+ list_del(&rqbd->rqbd_list);
svcpt->scp_hist_nrqbds--;
- /* remove rqbd's reqs from svc's req history while
- * I've got the service lock */
- cfs_list_for_each(tmp, &rqbd->rqbd_reqs) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
- rq_list);
- /* Track the highest culled req seq */
+ /* remove rqbd's reqs from svc's req history while
+ * I've got the service lock */
+ list_for_each(tmp, &rqbd->rqbd_reqs) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_list);
+ /* Track the highest culled req seq */
if (req->rq_history_seq >
svcpt->scp_hist_seq_culled) {
svcpt->scp_hist_seq_culled =
req->rq_history_seq;
}
- cfs_list_del(&req->rq_history_list);
+ list_del(&req->rq_history_list);
}
spin_unlock(&svcpt->scp_lock);
- cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
- req = cfs_list_entry(rqbd->rqbd_reqs.next,
- struct ptlrpc_request,
- rq_list);
- cfs_list_del(&req->rq_list);
- ptlrpc_server_free_request(req);
- }
+ list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
+ req = list_entry(rqbd->rqbd_reqs.next,
+ struct ptlrpc_request,
+ rq_list);
+ list_del(&req->rq_list);
+ ptlrpc_server_free_request(req);
+ }
spin_lock(&svcpt->scp_lock);
/*
* now all reqs including the embedded req has been
* disposed, schedule request buffer for re-use.
*/
- LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) ==
- 0);
- cfs_list_add_tail(&rqbd->rqbd_list,
- &svcpt->scp_rqbd_idle);
+ LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0);
+ list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
}
spin_unlock(&svcpt->scp_lock);
} else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
/* If we are low on memory, we are not interested in history */
- cfs_list_del(&req->rq_list);
- cfs_list_del_init(&req->rq_history_list);
+ list_del(&req->rq_list);
+ list_del_init(&req->rq_history_list);
/* Track the highest culled req seq */
if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
exp->exp_last_request_time = new_time;
- /* exports may get disconnected from the chain even though the
- export has references, so we must keep the spin lock while
- manipulating the lists */
+ /* exports may get disconnected from the chain even though the
+ export has references, so we must keep the spin lock while
+ manipulating the lists */
spin_lock(&exp->exp_obd->obd_dev_lock);
- if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
+ if (list_empty(&exp->exp_obd_chain_timed)) {
/* this one is not timed */
spin_unlock(&exp->exp_obd->obd_dev_lock);
- RETURN_EXIT;
- }
+ RETURN_EXIT;
+ }
- cfs_list_move_tail(&exp->exp_obd_chain_timed,
- &exp->exp_obd->obd_exports_timed);
+ list_move_tail(&exp->exp_obd_chain_timed,
+ &exp->exp_obd->obd_exports_timed);
- oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next,
- struct obd_export, exp_obd_chain_timed);
- oldest_time = oldest_exp->exp_last_request_time;
+ oldest_exp = list_entry(exp->exp_obd->obd_exports_timed.next,
+ struct obd_export, exp_obd_chain_timed);
+ oldest_time = oldest_exp->exp_last_request_time;
spin_unlock(&exp->exp_obd->obd_dev_lock);
if (exp->exp_obd->obd_recovering) {
return(-ENOSYS);
spin_lock(&svcpt->scp_at_lock);
- LASSERT(cfs_list_empty(&req->rq_timed_list));
+ LASSERT(list_empty(&req->rq_timed_list));
index = (unsigned long)req->rq_deadline % array->paa_size;
if (array->paa_reqs_count[index] > 0) {
/* latest rpcs will have the latest deadlines in the list,
* so search backward. */
- cfs_list_for_each_entry_reverse(rq,
+ list_for_each_entry_reverse(rq,
&array->paa_reqs_array[index],
rq_timed_list) {
if (req->rq_deadline >= rq->rq_deadline) {
- cfs_list_add(&req->rq_timed_list,
+ list_add(&req->rq_timed_list,
&rq->rq_timed_list);
break;
}
}
/* Add the request at the head of the list */
- if (cfs_list_empty(&req->rq_timed_list))
- cfs_list_add(&req->rq_timed_list,
+ if (list_empty(&req->rq_timed_list))
+ list_add(&req->rq_timed_list,
&array->paa_reqs_array[index]);
spin_lock(&req->rq_lock);
array = &req->rq_rqbd->rqbd_svcpt->scp_at_array;
/* NB: must call with hold svcpt::scp_at_lock */
- LASSERT(!cfs_list_empty(&req->rq_timed_list));
- cfs_list_del_init(&req->rq_timed_list);
+ LASSERT(!list_empty(&req->rq_timed_list));
+ list_del_init(&req->rq_timed_list);
spin_lock(&req->rq_lock);
req->rq_at_linked = 0;
{
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
struct ptlrpc_request *rq, *n;
- cfs_list_t work_list;
+ struct list_head work_list;
__u32 index, count;
time_t deadline;
time_t now = cfs_time_current_sec();
RETURN(0);
}
- /* We're close to a timeout, and we don't know how much longer the
- server will take. Send early replies to everyone expiring soon. */
- CFS_INIT_LIST_HEAD(&work_list);
- deadline = -1;
- index = (unsigned long)array->paa_deadline % array->paa_size;
- count = array->paa_count;
- while (count > 0) {
- count -= array->paa_reqs_count[index];
- cfs_list_for_each_entry_safe(rq, n,
- &array->paa_reqs_array[index],
- rq_timed_list) {
+ /* We're close to a timeout, and we don't know how much longer the
+ server will take. Send early replies to everyone expiring soon. */
+ INIT_LIST_HEAD(&work_list);
+ deadline = -1;
+ index = (unsigned long)array->paa_deadline % array->paa_size;
+ count = array->paa_count;
+ while (count > 0) {
+ count -= array->paa_reqs_count[index];
+ list_for_each_entry_safe(rq, n,
+ &array->paa_reqs_array[index],
+ rq_timed_list) {
if (rq->rq_deadline > now + at_early_margin) {
/* update the earliest deadline */
if (deadline == -1 ||
* don't add entry to work_list
*/
if (likely(atomic_inc_not_zero(&rq->rq_refcount)))
- cfs_list_add(&rq->rq_timed_list, &work_list);
+ list_add(&rq->rq_timed_list, &work_list);
counter++;
}
/* we took additional refcount so entries can't be deleted from list, no
* locking is needed */
- while (!cfs_list_empty(&work_list)) {
- rq = cfs_list_entry(work_list.next, struct ptlrpc_request,
+ while (!list_empty(&work_list)) {
+ rq = list_entry(work_list.next, struct ptlrpc_request,
rq_timed_list);
- cfs_list_del_init(&rq->rq_timed_list);
+ list_del_init(&rq->rq_timed_list);
if (ptlrpc_at_send_early_reply(rq) == 0)
ptlrpc_at_add_timed(rq);
* flights on the client, so it is not all that long.
* Also we only hit this codepath in case of a resent
* request which makes it even more rarely hit */
- cfs_list_for_each_entry(tmp, &req->rq_export->exp_reg_rpcs,
+ list_for_each_entry(tmp, &req->rq_export->exp_reg_rpcs,
rq_exp_list) {
/* Found duplicate one */
if (tmp->rq_xid == req->rq_xid)
goto found;
}
- cfs_list_for_each_entry(tmp, &req->rq_export->exp_hp_rpcs,
+ list_for_each_entry(tmp, &req->rq_export->exp_hp_rpcs,
rq_exp_list) {
/* Found duplicate one */
if (tmp->rq_xid == req->rq_xid)
static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
struct ptlrpc_request *req)
{
- cfs_list_t *list;
+ struct list_head *list;
int rc, hp = 0;
ENTRY;
spin_unlock_bh(&req->rq_export->exp_rpc_lock);
RETURN(rc);
}
- cfs_list_add(&req->rq_exp_list, list);
+ list_add(&req->rq_exp_list, list);
spin_unlock_bh(&req->rq_export->exp_rpc_lock);
}
req->rq_ops->hpreq_fini(req);
spin_lock_bh(&req->rq_export->exp_rpc_lock);
- cfs_list_del_init(&req->rq_exp_list);
+ list_del_init(&req->rq_exp_list);
spin_unlock_bh(&req->rq_export->exp_rpc_lock);
}
EXIT;
ENTRY;
spin_lock(&svcpt->scp_lock);
- if (cfs_list_empty(&svcpt->scp_req_incoming)) {
+ if (list_empty(&svcpt->scp_req_incoming)) {
spin_unlock(&svcpt->scp_lock);
RETURN(0);
}
- req = cfs_list_entry(svcpt->scp_req_incoming.next,
+ req = list_entry(svcpt->scp_req_incoming.next,
struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
+ list_del_init(&req->rq_list);
svcpt->scp_nreqs_incoming--;
/* Consider this still a "queued" request as far as stats are
* concerned */
int been_handled;
ENTRY;
- exp = rs->rs_export;
+ exp = rs->rs_export;
- LASSERT (rs->rs_difficult);
- LASSERT (rs->rs_scheduled);
- LASSERT (cfs_list_empty(&rs->rs_list));
+ LASSERT(rs->rs_difficult);
+ LASSERT(rs->rs_scheduled);
+ LASSERT(list_empty(&rs->rs_list));
spin_lock(&exp->exp_lock);
/* Noop if removed already */
- cfs_list_del_init (&rs->rs_exp_list);
+ list_del_init(&rs->rs_exp_list);
spin_unlock(&exp->exp_lock);
/* The disk commit callback holds exp_uncommitted_replies_lock while it
*/
if (!rs->rs_committed) {
spin_lock(&exp->exp_uncommitted_replies_lock);
- cfs_list_del_init(&rs->rs_obd_list);
+ list_del_init(&rs->rs_obd_list);
spin_unlock(&exp->exp_uncommitted_replies_lock);
}
ENTRY;
spin_lock(&svcpt->scp_rep_lock);
- if (!cfs_list_empty(&svcpt->scp_rep_queue)) {
- rs = cfs_list_entry(svcpt->scp_rep_queue.prev,
+ if (!list_empty(&svcpt->scp_rep_queue)) {
+ rs = list_entry(svcpt->scp_rep_queue.prev,
struct ptlrpc_reply_state,
rs_list);
- cfs_list_del_init(&rs->rs_list);
+ list_del_init(&rs->rs_list);
}
spin_unlock(&svcpt->scp_rep_lock);
if (rs != NULL)
int
liblustre_check_services (void *arg)
{
- int did_something = 0;
- int rc;
- cfs_list_t *tmp, *nxt;
- ENTRY;
+ int did_something = 0;
+ int rc;
+ struct list_head *tmp, *nxt;
+ ENTRY;
- /* I'm relying on being single threaded, not to have to lock
- * ptlrpc_all_services etc */
- cfs_list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
- struct ptlrpc_service *svc =
- cfs_list_entry (tmp, struct ptlrpc_service, srv_list);
+ /* I'm relying on being single threaded, not to have to lock
+ * ptlrpc_all_services etc */
+ list_for_each_safe(tmp, nxt, &ptlrpc_all_services) {
+ struct ptlrpc_service *svc =
+ list_entry(tmp, struct ptlrpc_service, srv_list);
struct ptlrpc_service_part *svcpt;
LASSERT(svc->srv_ncpts == 1);
static inline int
ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt)
{
- return !cfs_list_empty(&svcpt->scp_rqbd_idle) &&
+ return !list_empty(&svcpt->scp_rqbd_idle) &&
svcpt->scp_rqbd_timeout == 0;
}
static inline int
ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt)
{
- return !cfs_list_empty(&svcpt->scp_req_incoming);
+ return !list_empty(&svcpt->scp_req_incoming);
}
static __attribute__((__noinline__)) int
env->le_ctx.lc_thread = thread;
env->le_ctx.lc_cookie = 0x6;
- while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) {
+ while (!list_empty(&svcpt->scp_rqbd_idle)) {
rc = ptlrpc_server_post_idle_rqbds(svcpt);
if (rc >= 0)
continue;
NULL, NULL);
spin_lock(&svcpt->scp_rep_lock);
- cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
+ list_add(&rs->rs_list, &svcpt->scp_rep_idle);
wake_up(&svcpt->scp_rep_waitq);
spin_unlock(&svcpt->scp_rep_lock);
}
static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt,
- cfs_list_t *replies)
+ struct list_head *replies)
{
int result;
spin_lock(&hrt->hrt_lock);
- cfs_list_splice_init(&hrt->hrt_queue, replies);
- result = ptlrpc_hr.hr_stopping || !cfs_list_empty(replies);
+ list_splice_init(&hrt->hrt_queue, replies);
+ result = ptlrpc_hr.hr_stopping || !list_empty(replies);
spin_unlock(&hrt->hrt_lock);
return result;
{
struct ptlrpc_hr_thread *hrt = (struct ptlrpc_hr_thread *)arg;
struct ptlrpc_hr_partition *hrp = hrt->hrt_partition;
- CFS_LIST_HEAD (replies);
+ struct list_head replies;
char threadname[20];
int rc;
+ INIT_LIST_HEAD(&replies);
snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
hrp->hrp_cpt, hrt->hrt_id);
unshare_fs_struct();
while (!ptlrpc_hr.hr_stopping) {
l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
- while (!cfs_list_empty(&replies)) {
- struct ptlrpc_reply_state *rs;
+ while (!list_empty(&replies)) {
+ struct ptlrpc_reply_state *rs;
- rs = cfs_list_entry(replies.prev,
- struct ptlrpc_reply_state,
- rs_list);
- cfs_list_del_init(&rs->rs_list);
- ptlrpc_handle_rs(rs);
- }
- }
+ rs = list_entry(replies.prev,
+ struct ptlrpc_reply_state,
+ rs_list);
+ list_del_init(&rs->rs_list);
+ ptlrpc_handle_rs(rs);
+ }
+ }
atomic_inc(&hrp->hrp_nstopped);
wake_up(&ptlrpc_hr.hr_waitq);
{
struct l_wait_info lwi = { 0 };
struct ptlrpc_thread *thread;
- CFS_LIST_HEAD (zombie);
+ struct list_head zombie;
ENTRY;
CDEBUG(D_INFO, "Stopping threads for service %s\n",
svcpt->scp_service->srv_name);
+ INIT_LIST_HEAD(&zombie);
spin_lock(&svcpt->scp_lock);
/* let the thread know that we would like it to stop asap */
list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
wake_up_all(&svcpt->scp_waitq);
- while (!cfs_list_empty(&svcpt->scp_threads)) {
- thread = cfs_list_entry(svcpt->scp_threads.next,
+ while (!list_empty(&svcpt->scp_threads)) {
+ thread = list_entry(svcpt->scp_threads.next,
struct ptlrpc_thread, t_link);
if (thread_is_stopped(thread)) {
- cfs_list_del(&thread->t_link);
- cfs_list_add(&thread->t_link, &zombie);
+ list_del(&thread->t_link);
+ list_add(&thread->t_link, &zombie);
continue;
}
spin_unlock(&svcpt->scp_lock);
spin_unlock(&svcpt->scp_lock);
- while (!cfs_list_empty(&zombie)) {
- thread = cfs_list_entry(zombie.next,
+ while (!list_empty(&zombie)) {
+ thread = list_entry(zombie.next,
struct ptlrpc_thread, t_link);
- cfs_list_del(&thread->t_link);
+ list_del(&thread->t_link);
OBD_FREE_PTR(thread);
}
EXIT;
thread_add_flags(thread, SVC_STARTING);
thread->t_svcpt = svcpt;
- cfs_list_add(&thread->t_link, &svcpt->scp_threads);
+ list_add(&thread->t_link, &svcpt->scp_threads);
spin_unlock(&svcpt->scp_lock);
if (svcpt->scp_cpt >= 0) {
wake_up(&thread->t_ctl_waitq);
spin_unlock(&svcpt->scp_lock);
} else {
- cfs_list_del(&thread->t_link);
+ list_del(&thread->t_link);
spin_unlock(&svcpt->scp_lock);
OBD_FREE_PTR(thread);
}
hrt->hrt_partition = hrp;
init_waitqueue_head(&hrt->hrt_waitq);
spin_lock_init(&hrt->hrt_lock);
- CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
+ INIT_LIST_HEAD(&hrt->hrt_queue);
}
}
/* Unlink all the request buffers. This forces a 'final'
* event with its 'unlink' flag set for each posted rqbd */
- cfs_list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
+ list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
rqbd_list) {
rc = LNetMDUnlink(rqbd->rqbd_md_h);
LASSERT(rc == 0 || rc == -ENOENT);
break;
spin_lock(&svcpt->scp_rep_lock);
- while (!cfs_list_empty(&svcpt->scp_rep_active)) {
- rs = cfs_list_entry(svcpt->scp_rep_active.next,
+ while (!list_empty(&svcpt->scp_rep_active)) {
+ rs = list_entry(svcpt->scp_rep_active.next,
struct ptlrpc_reply_state, rs_list);
spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
/* purge the request queue. NB No new replies (rqbds
* all unlinked) and no service threads, so I'm the only
* thread noodling the request queue now */
- while (!cfs_list_empty(&svcpt->scp_req_incoming)) {
- req = cfs_list_entry(svcpt->scp_req_incoming.next,
+ while (!list_empty(&svcpt->scp_req_incoming)) {
+ req = list_entry(svcpt->scp_req_incoming.next,
struct ptlrpc_request, rq_list);
- cfs_list_del(&req->rq_list);
+ list_del(&req->rq_list);
svcpt->scp_nreqs_incoming--;
ptlrpc_server_finish_request(svcpt, req);
}
ptlrpc_server_finish_active_request(svcpt, req);
}
- LASSERT(cfs_list_empty(&svcpt->scp_rqbd_posted));
+ LASSERT(list_empty(&svcpt->scp_rqbd_posted));
LASSERT(svcpt->scp_nreqs_incoming == 0);
LASSERT(svcpt->scp_nreqs_active == 0);
/* history should have been culled by
/* Now free all the request buffers since nothing
* references them any more... */
- while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) {
- rqbd = cfs_list_entry(svcpt->scp_rqbd_idle.next,
+ while (!list_empty(&svcpt->scp_rqbd_idle)) {
+ rqbd = list_entry(svcpt->scp_rqbd_idle.next,
struct ptlrpc_request_buffer_desc,
rqbd_list);
ptlrpc_free_rqbd(rqbd);
}
ptlrpc_wait_replies(svcpt);
- while (!cfs_list_empty(&svcpt->scp_rep_idle)) {
- rs = cfs_list_entry(svcpt->scp_rep_idle.next,
+ while (!list_empty(&svcpt->scp_rep_idle)) {
+ rs = list_entry(svcpt->scp_rep_idle.next,
struct ptlrpc_reply_state,
rs_list);
- cfs_list_del(&rs->rs_list);
+ list_del(&rs->rs_list);
OBD_FREE_LARGE(rs, svc->srv_max_reply_size);
}
}
if (array->paa_reqs_array != NULL) {
OBD_FREE(array->paa_reqs_array,
- sizeof(cfs_list_t) * array->paa_size);
+ sizeof(struct list_head) * array->paa_size);
array->paa_reqs_array = NULL;
}
service->srv_is_stopping = 1;
mutex_lock(&ptlrpc_all_services_mutex);
- cfs_list_del_init(&service->srv_list);
+ list_del_init(&service->srv_list);
mutex_unlock(&ptlrpc_all_services_mutex);
ptlrpc_service_del_atimer(service);