ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
struct ldlm_extent *new_ex)
{
- cfs_list_t *tmp;
- struct ldlm_resource *res = req->l_resource;
- ldlm_mode_t req_mode = req->l_req_mode;
- __u64 req_start = req->l_req_extent.start;
- __u64 req_end = req->l_req_extent.end;
- int conflicting = 0;
- ENTRY;
-
- lockmode_verify(req_mode);
+ struct ldlm_resource *res = req->l_resource;
+ ldlm_mode_t req_mode = req->l_req_mode;
+ __u64 req_start = req->l_req_extent.start;
+ __u64 req_end = req->l_req_extent.end;
+ struct ldlm_lock *lock;
+ int conflicting = 0;
+ ENTRY;
- /* for waiting locks */
- cfs_list_for_each(tmp, &res->lr_waiting) {
- struct ldlm_lock *lock;
- struct ldlm_extent *l_extent;
+ lockmode_verify(req_mode);
- lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
- l_extent = &lock->l_policy_data.l_extent;
+ /* for waiting locks */
+ list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
+ struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
- /* We already hit the minimum requested size, search no more */
- if (new_ex->start == req_start && new_ex->end == req_end) {
- EXIT;
- return;
- }
+ /* We already hit the minimum requested size, search no more */
+ if (new_ex->start == req_start && new_ex->end == req_end) {
+ EXIT;
+ return;
+ }
/* Don't conflict with ourselves */
if (req == lock)
}
struct ldlm_extent_compat_args {
- cfs_list_t *work_list;
+ struct list_head *work_list;
struct ldlm_lock *lock;
ldlm_mode_t mode;
int *locks;
struct ldlm_extent_compat_args *priv = data;
struct ldlm_interval *node = to_ldlm_interval(n);
struct ldlm_extent *extent;
- cfs_list_t *work_list = priv->work_list;
+ struct list_head *work_list = priv->work_list;
struct ldlm_lock *lock, *enq = priv->lock;
ldlm_mode_t mode = priv->mode;
int count = 0;
ENTRY;
- LASSERT(!cfs_list_empty(&node->li_group));
+ LASSERT(!list_empty(&node->li_group));
- cfs_list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+ list_for_each_entry(lock, &node->li_group, l_sl_policy) {
/* interval tree is for granted lock */
LASSERTF(mode == lock->l_granted_mode,
"mode = %s, lock->l_granted_mode = %s\n",
* \retval negative error, such as EWOULDBLOCK for group locks
*/
static int
-ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
+ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
__u64 *flags, ldlm_error_t *err,
- cfs_list_t *work_list, int *contended_locks)
+ struct list_head *work_list, int *contended_locks)
{
- cfs_list_t *tmp;
- struct ldlm_lock *lock;
- struct ldlm_resource *res = req->l_resource;
- ldlm_mode_t req_mode = req->l_req_mode;
- __u64 req_start = req->l_req_extent.start;
- __u64 req_end = req->l_req_extent.end;
- int compat = 1;
- int scan = 0;
- int check_contention;
- ENTRY;
+ struct ldlm_resource *res = req->l_resource;
+ ldlm_mode_t req_mode = req->l_req_mode;
+ __u64 req_start = req->l_req_extent.start;
+ __u64 req_end = req->l_req_extent.end;
+ struct ldlm_lock *lock;
+ int check_contention;
+ int compat = 1;
+ int scan = 0;
+ ENTRY;
lockmode_verify(req_mode);
} else {
interval_search(tree->lit_root, &ex,
ldlm_extent_compat_cb, &data);
- if (!cfs_list_empty(work_list) && compat)
+ if (!list_empty(work_list) && compat)
compat = 0;
}
}
} else { /* for waiting queue */
- cfs_list_for_each(tmp, queue) {
+ list_for_each_entry(lock, queue, l_res_link) {
check_contention = 1;
- lock = cfs_list_entry(tmp, struct ldlm_lock,
- l_res_link);
-
/* We stop walking the queue if we hit ourselves so
* we don't take conflicting locks enqueued after us
* into account, or we'd wait forever. */
* front of first non-GROUP lock */
ldlm_resource_insert_lock_after(lock, req);
- cfs_list_del_init(&lock->l_res_link);
+ list_del_init(&lock->l_res_link);
ldlm_resource_insert_lock_after(req, lock);
compat = 0;
break;
first non-GROUP lock */
ldlm_resource_insert_lock_after(lock, req);
- cfs_list_del_init(&lock->l_res_link);
+ list_del_init(&lock->l_res_link);
ldlm_resource_insert_lock_after(req, lock);
break;
}
RETURN(compat);
destroylock:
- cfs_list_del_init(&req->l_res_link);
+ list_del_init(&req->l_res_link);
ldlm_lock_destroy_nolock(req);
*err = compat;
RETURN(compat);
* If for whatever reason we do not want to send ASTs to conflicting locks
* anymore, disassemble the list with this function.
*/
-static void discard_bl_list(cfs_list_t *bl_list)
+static void discard_bl_list(struct list_head *bl_list)
{
- cfs_list_t *tmp, *pos;
+ struct list_head *tmp, *pos;
ENTRY;
- cfs_list_for_each_safe(pos, tmp, bl_list) {
+ list_for_each_safe(pos, tmp, bl_list) {
struct ldlm_lock *lock =
- cfs_list_entry(pos, struct ldlm_lock, l_bl_ast);
+ list_entry(pos, struct ldlm_lock, l_bl_ast);
- cfs_list_del_init(&lock->l_bl_ast);
+ list_del_init(&lock->l_bl_ast);
LASSERT(ldlm_is_ast_sent(lock));
ldlm_clear_ast_sent(lock);
LASSERT(lock->l_bl_ast_run == 0);
*/
int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
int first_enq, ldlm_error_t *err,
- cfs_list_t *work_list)
+ struct list_head *work_list)
{
- struct ldlm_resource *res = lock->l_resource;
- CFS_LIST_HEAD(rpc_list);
- int rc, rc2;
- int contended_locks = 0;
- ENTRY;
+ struct ldlm_resource *res = lock->l_resource;
+ struct list_head rpc_list;
+ int rc, rc2;
+ int contended_locks = 0;
+ ENTRY;
LASSERT(lock->l_granted_mode != lock->l_req_mode);
- LASSERT(cfs_list_empty(&res->lr_converting));
- LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
+ LASSERT(list_empty(&res->lr_converting));
+ LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
!ldlm_is_ast_discard_data(lock));
- check_res_locked(res);
- *err = ELDLM_OK;
+ INIT_LIST_HEAD(&rpc_list);
+ check_res_locked(res);
+ *err = ELDLM_OK;
if (!first_enq) {
/* Careful observers will note that we don't handle -EWOULDBLOCK
* bug 2322: we used to unlink and re-add here, which was a
* terrible folly -- if we goto restart, we could get
* re-ordered! Causes deadlock, because ASTs aren't sent! */
- if (cfs_list_empty(&lock->l_res_link))
+ if (list_empty(&lock->l_res_link))
ldlm_resource_add_lock(res, &res->lr_waiting, lock);
unlock_res(res);
rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
}
RETURN(0);
out:
- if (!cfs_list_empty(&rpc_list)) {
+ if (!list_empty(&rpc_list)) {
LASSERT(!ldlm_is_ast_discard_data(lock));
discard_bl_list(&rpc_list);
}
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
{
struct ldlm_resource *res = lock->l_resource;
- cfs_list_t *tmp;
+ struct list_head *tmp;
struct ldlm_lock *lck;
__u64 kms = 0;
ENTRY;
* calculation of the kms */
ldlm_set_kms_ignore(lock);
- cfs_list_for_each(tmp, &res->lr_granted) {
- lck = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ list_for_each(tmp, &res->lr_granted) {
+ lck = list_entry(tmp, struct ldlm_lock, l_res_link);
if (ldlm_is_kms_ignore(lck))
continue;
if (node == NULL)
RETURN(NULL);
- CFS_INIT_LIST_HEAD(&node->li_group);
+ INIT_LIST_HEAD(&node->li_group);
ldlm_interval_attach(node, lock);
RETURN(node);
}
void ldlm_interval_free(struct ldlm_interval *node)
{
if (node) {
- LASSERT(cfs_list_empty(&node->li_group));
+ LASSERT(list_empty(&node->li_group));
LASSERT(!interval_is_intree(&node->li_node));
OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
}
LASSERT(l->l_tree_node == NULL);
LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
- cfs_list_add_tail(&l->l_sl_policy, &n->li_group);
+ list_add_tail(&l->l_sl_policy, &n->li_group);
l->l_tree_node = n;
}
if (n == NULL)
return NULL;
- LASSERT(!cfs_list_empty(&n->li_group));
+ LASSERT(!list_empty(&n->li_group));
l->l_tree_node = NULL;
- cfs_list_del_init(&l->l_sl_policy);
+ list_del_init(&l->l_sl_policy);
- return (cfs_list_empty(&n->li_group) ? n : NULL);
+ return list_empty(&n->li_group) ? n : NULL;
}
static inline int lock_mode_to_index(ldlm_mode_t mode)
if (req->l_export == NULL)
return;
- LASSERT(cfs_hlist_unhashed(&req->l_exp_flock_hash));
+ LASSERT(hlist_unhashed(&req->l_exp_flock_hash));
req->l_policy_data.l_flock.blocking_owner =
lock->l_policy_data.l_flock.owner;
check_res_locked(req->l_resource);
if (req->l_export->exp_flock_hash != NULL &&
- !cfs_hlist_unhashed(&req->l_exp_flock_hash))
+ !hlist_unhashed(&req->l_exp_flock_hash))
cfs_hash_del(req->l_export->exp_flock_hash,
&req->l_policy_data.l_flock.owner,
&req->l_exp_flock_hash);
mode, flags);
/* Safe to not lock here, since it should be empty anyway */
- LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
+ LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
- cfs_list_del_init(&lock->l_res_link);
+ list_del_init(&lock->l_res_link);
if (flags == LDLM_FL_WAIT_NOREPROC) {
/* client side - set a flag to prevent sending a CANCEL */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
};
static int ldlm_flock_lookup_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
struct ldlm_flock_lookup_cb_data *cb_data = data;
struct obd_export *exp = cfs_hash_object(hs, hnode);
}
static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
- cfs_list_t *work_list)
+ struct list_head *work_list)
{
CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock);
*/
int
ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
- ldlm_error_t *err, cfs_list_t *work_list)
+ ldlm_error_t *err, struct list_head *work_list)
{
struct ldlm_resource *res = req->l_resource;
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- cfs_list_t *tmp;
- cfs_list_t *ownlocks = NULL;
+ struct list_head *tmp;
+ struct list_head *ownlocks = NULL;
struct ldlm_lock *lock = NULL;
struct ldlm_lock *new = req;
struct ldlm_lock *new2 = NULL;
if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
/* This loop determines where this processes locks start
* in the resource lr_granted list. */
- cfs_list_for_each(tmp, &res->lr_granted) {
- lock = cfs_list_entry(tmp, struct ldlm_lock,
+ list_for_each(tmp, &res->lr_granted) {
+ lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
ownlocks = tmp;
/* This loop determines if there are existing locks
* that conflict with the new lock request. */
- cfs_list_for_each(tmp, &res->lr_granted) {
- lock = cfs_list_entry(tmp, struct ldlm_lock,
+ list_for_each(tmp, &res->lr_granted) {
+ lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
ownlocks = &res->lr_granted;
list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
- lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
+ lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
if (!ldlm_same_flock_owner(lock, new))
break;
if (lock->l_export != NULL) {
new2->l_export = class_export_lock_get(lock->l_export, new2);
if (new2->l_export->exp_lock_hash &&
- cfs_hlist_unhashed(&new2->l_exp_hash))
+ hlist_unhashed(&new2->l_exp_hash))
cfs_hash_add(new2->l_export->exp_lock_hash,
&new2->l_remote_handle,
&new2->l_exp_hash);
/* Add req to the granted queue before calling ldlm_reprocess_all(). */
if (!added) {
- cfs_list_del_init(&req->l_res_link);
+ list_del_init(&req->l_res_link);
/* insert new lock before ownlocks in list. */
ldlm_resource_add_lock(res, ownlocks, req);
}
* note that ldlm_process_flock_lock() will recurse,
* but only once because first_enq will be false from
* ldlm_reprocess_queue. */
- if ((mode == LCK_NL) && overlaps) {
- CFS_LIST_HEAD(rpc_list);
+ if ((mode == LCK_NL) && overlaps) {
+ struct list_head rpc_list;
int rc;
+
+ INIT_LIST_HEAD(&rpc_list);
restart:
ldlm_reprocess_queue(res, &res->lr_waiting,
&rpc_list);
}
static void *
-ldlm_export_flock_key(cfs_hlist_node_t *hnode)
+ldlm_export_flock_key(struct hlist_node *hnode)
{
struct ldlm_lock *lock;
- lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
return &lock->l_policy_data.l_flock.owner;
}
static int
-ldlm_export_flock_keycmp(const void *key, cfs_hlist_node_t *hnode)
+ldlm_export_flock_keycmp(const void *key, struct hlist_node *hnode)
{
return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
}
static void *
-ldlm_export_flock_object(cfs_hlist_node_t *hnode)
+ldlm_export_flock_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ return hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
}
static void
-ldlm_export_flock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_flock_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct ldlm_lock *lock;
struct ldlm_flock *flock;
- lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
LDLM_LOCK_GET(lock);
flock = &lock->l_policy_data.l_flock;
}
static void
-ldlm_export_flock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_flock_put(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct ldlm_lock *lock;
struct ldlm_flock *flock;
- lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
LDLM_LOCK_RELEASE(lock);
flock = &lock->l_policy_data.l_flock;
* locks if first lock of the bunch is not conflicting with us.
*/
static int
-ldlm_inodebits_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
- cfs_list_t *work_list)
+ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+ struct list_head *work_list)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
struct ldlm_lock *lock;
ldlm_mode_t req_mode = req->l_req_mode;
__u64 req_bits = req->l_policy_data.l_inodebits.bits;
I think. Also such a lock would be compatible
with any other bit lock */
- cfs_list_for_each(tmp, queue) {
- cfs_list_t *mode_tail;
+ list_for_each(tmp, queue) {
+ struct list_head *mode_tail;
- lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
/* We stop walking the queue if we hit ourselves so we don't
* take conflicting locks enqueued after us into account,
/* last lock in mode group */
LASSERT(lock->l_sl_mode.prev != NULL);
- mode_tail = &cfs_list_entry(lock->l_sl_mode.prev,
+ mode_tail = &list_entry(lock->l_sl_mode.prev,
struct ldlm_lock,
l_sl_mode)->l_res_link;
}
for (;;) {
- cfs_list_t *head;
+ struct list_head *head;
/* Advance loop cursor to last lock in policy group. */
- tmp = &cfs_list_entry(lock->l_sl_policy.prev,
+ tmp = &list_entry(lock->l_sl_policy.prev,
struct ldlm_lock,
l_sl_policy)->l_res_link;
ldlm_add_ast_work_item(lock, req,
work_list);
head = &lock->l_sl_policy;
- cfs_list_for_each_entry(lock, head, l_sl_policy)
+ list_for_each_entry(lock, head, l_sl_policy)
if (lock->l_blocking_ast)
ldlm_add_ast_work_item(lock, req,
work_list);
break;
tmp = tmp->next;
- lock = cfs_list_entry(tmp, struct ldlm_lock,
+ lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
} /* Loop over policy groups within one mode group. */
} /* Loop over mode groups within @queue. */
*/
int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
int first_enq, ldlm_error_t *err,
- cfs_list_t *work_list)
+ struct list_head *work_list)
{
- struct ldlm_resource *res = lock->l_resource;
- CFS_LIST_HEAD(rpc_list);
- int rc;
- ENTRY;
+ struct ldlm_resource *res = lock->l_resource;
+ struct list_head rpc_list;
+ int rc;
+ ENTRY;
LASSERT(lock->l_granted_mode != lock->l_req_mode);
- LASSERT(cfs_list_empty(&res->lr_converting));
- check_res_locked(res);
+ LASSERT(list_empty(&res->lr_converting));
+ INIT_LIST_HEAD(&rpc_list);
+ check_res_locked(res);
/* (*flags & LDLM_FL_BLOCK_NOWAIT) is for layout lock right now. */
if (!first_enq || (*flags & LDLM_FL_BLOCK_NOWAIT)) {
* bug 2322: we used to unlink and re-add here, which was a
* terrible folly -- if we goto restart, we could get
* re-ordered! Causes deadlock, because ASTs aren't sent! */
- if (cfs_list_empty(&lock->l_res_link))
+ if (list_empty(&lock->l_res_link))
ldlm_resource_add_lock(res, &res->lr_waiting, lock);
unlock_res(res);
rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
extern int ldlm_srv_namespace_nr;
extern int ldlm_cli_namespace_nr;
extern struct mutex ldlm_srv_namespace_lock;
-extern cfs_list_t ldlm_srv_namespace_list;
+extern struct list_head ldlm_srv_namespace_list;
extern struct mutex ldlm_cli_namespace_lock;
-extern cfs_list_t ldlm_cli_active_namespace_list;
-extern cfs_list_t ldlm_cli_inactive_namespace_list;
+extern struct list_head ldlm_cli_active_namespace_list;
+extern struct list_head ldlm_cli_inactive_namespace_list;
static inline int ldlm_namespace_nr_read(ldlm_side_t client)
{
ldlm_cli_namespace_nr--;
}
-static inline cfs_list_t *ldlm_namespace_list(ldlm_side_t client)
+static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
}
-static inline cfs_list_t *ldlm_namespace_inactive_list(ldlm_side_t client)
+static inline struct list_head *ldlm_namespace_inactive_list(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_list : &ldlm_cli_inactive_namespace_list;
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
ldlm_cancel_flags_t sync, int flags);
int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
- cfs_list_t *cancels, int count, int max,
+ struct list_head *cancels, int count, int max,
ldlm_cancel_flags_t cancel_flags, int flags);
extern int ldlm_enqueue_min;
int ldlm_get_enq_timeout(struct ldlm_lock *lock);
struct ptlrpc_request_set *set;
int type; /* LDLM_{CP,BL,GL}_CALLBACK */
atomic_t restart;
- cfs_list_t *list;
+ struct list_head *list;
union ldlm_gl_desc *gl_desc; /* glimpse AST descriptor */
};
LDLM_WORK_GL_AST
} ldlm_desc_ast_t;
-void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list);
+void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
enum req_location loc, void *data, int size);
struct ldlm_lock *
void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- cfs_list_t *work_list);
+ struct list_head *work_list);
#ifdef HAVE_SERVER_SUPPORT
-int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
- cfs_list_t *work_list);
+int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
+ struct list_head *work_list);
#endif
-int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list,
+int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
ldlm_desc_ast_t ast_type);
int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq);
int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
struct ldlm_lock *lock);
int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld,
- cfs_list_t *cancels, int count,
+ struct list_head *cancels, int count,
ldlm_cancel_flags_t cancel_flags);
void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
/* ldlm_plain.c */
int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
int first_enq, ldlm_error_t *err,
- cfs_list_t *work_list);
+ struct list_head *work_list);
/* ldlm_inodebits.c */
int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
int first_enq, ldlm_error_t *err,
- cfs_list_t *work_list);
+ struct list_head *work_list);
#endif
/* ldlm_extent.c */
#ifdef HAVE_SERVER_SUPPORT
int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
int first_enq, ldlm_error_t *err,
- cfs_list_t *work_list);
+ struct list_head *work_list);
#endif
void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
/* ldlm_flock.c */
int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
int first_enq, ldlm_error_t *err,
- cfs_list_t *work_list);
+ struct list_head *work_list);
int ldlm_init_flock_export(struct obd_export *exp);
void ldlm_destroy_flock_export(struct obd_export *exp);
ldlm_interval_extent(struct ldlm_interval *node)
{
struct ldlm_lock *lock;
- LASSERT(!cfs_list_empty(&node->li_group));
+ LASSERT(!list_empty(&node->li_group));
- lock = cfs_list_entry(node->li_group.next, struct ldlm_lock,
+ lock = list_entry(node->li_group.next, struct ldlm_lock,
l_sl_policy);
return &lock->l_policy_data.l_extent;
}
}
spin_lock(&imp->imp_lock);
- cfs_list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
+ list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
if (obd_uuid_equals(uuid, &item->oic_uuid)) {
if (priority) {
- cfs_list_del(&item->oic_item);
- cfs_list_add(&item->oic_item,
+ list_del(&item->oic_item);
+ list_add(&item->oic_item,
&imp->imp_conn_list);
item->oic_last_attempt = 0;
}
imp_conn->oic_uuid = *uuid;
imp_conn->oic_last_attempt = 0;
if (priority)
- cfs_list_add(&imp_conn->oic_item, &imp->imp_conn_list);
+ list_add(&imp_conn->oic_item, &imp->imp_conn_list);
else
- cfs_list_add_tail(&imp_conn->oic_item,
+ list_add_tail(&imp_conn->oic_item,
&imp->imp_conn_list);
CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
imp, imp->imp_obd->obd_name, uuid->uuid,
ENTRY;
spin_lock(&imp->imp_lock);
- if (cfs_list_empty(&imp->imp_conn_list)) {
+ if (list_empty(&imp->imp_conn_list)) {
LASSERT(!imp->imp_connection);
GOTO(out, rc);
}
- cfs_list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
+ list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
if (!obd_uuid_equals(uuid, &imp_conn->oic_uuid))
continue;
LASSERT(imp_conn->oic_conn);
}
}
- cfs_list_del(&imp_conn->oic_item);
+ list_del(&imp_conn->oic_item);
ptlrpc_connection_put(imp_conn->oic_conn);
OBD_FREE(imp_conn, sizeof(*imp_conn));
CDEBUG(D_HA, "imp %p@%s: remove connection %s\n",
ENTRY;
spin_lock(&imp->imp_lock);
- cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
+ list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
/* Check if conn UUID does have this peer NID. */
if (class_check_uuid(&conn->oic_uuid, peer)) {
*uuid = conn->oic_uuid;
/* cl_dirty_max_pages may be changed at connect time in
* ptlrpc_connect_interpret(). */
client_adjust_max_dirty(cli);
- CFS_INIT_LIST_HEAD(&cli->cl_cache_waiters);
- CFS_INIT_LIST_HEAD(&cli->cl_loi_ready_list);
- CFS_INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
- CFS_INIT_LIST_HEAD(&cli->cl_loi_write_list);
- CFS_INIT_LIST_HEAD(&cli->cl_loi_read_list);
+ INIT_LIST_HEAD(&cli->cl_cache_waiters);
+ INIT_LIST_HEAD(&cli->cl_loi_ready_list);
+ INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
+ INIT_LIST_HEAD(&cli->cl_loi_write_list);
+ INIT_LIST_HEAD(&cli->cl_loi_read_list);
client_obd_list_lock_init(&cli->cl_loi_list_lock);
atomic_set(&cli->cl_pending_w_pages, 0);
atomic_set(&cli->cl_pending_r_pages, 0);
spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
/* lru for osc. */
- CFS_INIT_LIST_HEAD(&cli->cl_lru_osc);
+ INIT_LIST_HEAD(&cli->cl_lru_osc);
atomic_set(&cli->cl_lru_shrinkers, 0);
atomic_set(&cli->cl_lru_busy, 0);
atomic_set(&cli->cl_lru_in_list, 0);
- CFS_INIT_LIST_HEAD(&cli->cl_lru_list);
+ INIT_LIST_HEAD(&cli->cl_lru_list);
client_obd_list_lock_init(&cli->cl_lru_list_lock);
atomic_set(&cli->cl_unstable_count, 0);
/* complete all outstanding replies */
spin_lock(&exp->exp_lock);
- while (!cfs_list_empty(&exp->exp_outstanding_replies)) {
+ while (!list_empty(&exp->exp_outstanding_replies)) {
struct ptlrpc_reply_state *rs =
- cfs_list_entry(exp->exp_outstanding_replies.next,
+ list_entry(exp->exp_outstanding_replies.next,
struct ptlrpc_reply_state, rs_exp_list);
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
spin_lock(&svcpt->scp_rep_lock);
- cfs_list_del_init(&rs->rs_exp_list);
+ list_del_init(&rs->rs_exp_list);
spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
spin_unlock(&rs->rs_lock);
spin_unlock(&export->exp_lock);
spin_lock(&target->obd_dev_lock);
- cfs_list_del_init(&export->exp_obd_chain_timed);
+ list_del_init(&export->exp_obd_chain_timed);
spin_unlock(&target->obd_dev_lock);
} else {
spin_unlock(&export->exp_lock);
if (export->exp_connection != NULL) {
/* Check to see if connection came from another NID. */
if ((export->exp_connection->c_peer.nid != req->rq_peer.nid) &&
- !cfs_hlist_unhashed(&export->exp_nid_hash))
+ !hlist_unhashed(&export->exp_nid_hash))
cfs_hash_del(export->exp_obd->obd_nid_hash,
&export->exp_connection->c_peer.nid,
&export->exp_nid_hash);
export->exp_connection = ptlrpc_connection_get(req->rq_peer,
req->rq_self,
&remote_uuid);
- if (cfs_hlist_unhashed(&export->exp_nid_hash)) {
+ if (hlist_unhashed(&export->exp_nid_hash)) {
cfs_hash_add(export->exp_obd->obd_nid_hash,
&export->exp_connection->c_peer.nid,
&export->exp_nid_hash);
static void target_request_copy_get(struct ptlrpc_request *req)
{
class_export_rpc_inc(req->rq_export);
- LASSERT(cfs_list_empty(&req->rq_list));
- CFS_INIT_LIST_HEAD(&req->rq_replay_list);
+ LASSERT(list_empty(&req->rq_list));
+ INIT_LIST_HEAD(&req->rq_replay_list);
/* Increase refcount to keep request in queue. */
atomic_inc(&req->rq_refcount);
static void target_request_copy_put(struct ptlrpc_request *req)
{
- LASSERT(cfs_list_empty(&req->rq_replay_list));
+ LASSERT(list_empty(&req->rq_replay_list));
LASSERT_ATOMIC_POS(&req->rq_export->exp_replay_count);
atomic_dec(&req->rq_export->exp_replay_count);
LASSERT(exp);
spin_lock(&exp->exp_lock);
- cfs_list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
+ list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
rq_replay_list) {
if (lustre_msg_get_transno(reqiter->rq_reqmsg) == transno) {
dup = 1;
CERROR("invalid flags %x of resent replay\n",
lustre_msg_get_flags(req->rq_reqmsg));
} else {
- cfs_list_add_tail(&req->rq_replay_list,
+ list_add_tail(&req->rq_replay_list,
&exp->exp_req_replay_queue);
}
static void target_exp_dequeue_req_replay(struct ptlrpc_request *req)
{
- LASSERT(!cfs_list_empty(&req->rq_replay_list));
+ LASSERT(!list_empty(&req->rq_replay_list));
LASSERT(req->rq_export);
spin_lock(&req->rq_export->exp_lock);
- cfs_list_del_init(&req->rq_replay_list);
+ list_del_init(&req->rq_replay_list);
spin_unlock(&req->rq_export->exp_lock);
}
ldlm_reprocess_all_ns(obd->obd_namespace);
spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_req_replay_queue) ||
- !cfs_list_empty(&obd->obd_lock_replay_queue) ||
- !cfs_list_empty(&obd->obd_final_req_queue)) {
+ if (!list_empty(&obd->obd_req_replay_queue) ||
+ !list_empty(&obd->obd_lock_replay_queue) ||
+ !list_empty(&obd->obd_final_req_queue)) {
CERROR("%s: Recovery queues ( %s%s%s) are not empty\n",
obd->obd_name,
- cfs_list_empty(&obd->obd_req_replay_queue) ? "" : "req ",
- cfs_list_empty(&obd->obd_lock_replay_queue) ? \
+ list_empty(&obd->obd_req_replay_queue) ? "" : "req ",
+ list_empty(&obd->obd_lock_replay_queue) ? \
"" : "lock ",
- cfs_list_empty(&obd->obd_final_req_queue) ? \
+ list_empty(&obd->obd_final_req_queue) ? \
"" : "final ");
spin_unlock(&obd->obd_recovery_task_lock);
LBUG();
static void abort_req_replay_queue(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
- cfs_list_t abort_list;
+ struct list_head abort_list;
- CFS_INIT_LIST_HEAD(&abort_list);
+ INIT_LIST_HEAD(&abort_list);
spin_lock(&obd->obd_recovery_task_lock);
- cfs_list_splice_init(&obd->obd_req_replay_queue, &abort_list);
+ list_splice_init(&obd->obd_req_replay_queue, &abort_list);
spin_unlock(&obd->obd_recovery_task_lock);
- cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list) {
+ list_for_each_entry_safe(req, n, &abort_list, rq_list) {
DEBUG_REQ(D_WARNING, req, "aborted:");
req->rq_status = -ENOTCONN;
if (ptlrpc_error(req)) {
static void abort_lock_replay_queue(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
- cfs_list_t abort_list;
+ struct list_head abort_list;
- CFS_INIT_LIST_HEAD(&abort_list);
+ INIT_LIST_HEAD(&abort_list);
spin_lock(&obd->obd_recovery_task_lock);
- cfs_list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
+ list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
spin_unlock(&obd->obd_recovery_task_lock);
- cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list){
+ list_for_each_entry_safe(req, n, &abort_list, rq_list) {
DEBUG_REQ(D_ERROR, req, "aborted:");
req->rq_status = -ENOTCONN;
if (ptlrpc_error(req)) {
void target_cleanup_recovery(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
- cfs_list_t clean_list;
+ struct list_head clean_list;
ENTRY;
- CFS_INIT_LIST_HEAD(&clean_list);
+ INIT_LIST_HEAD(&clean_list);
spin_lock(&obd->obd_dev_lock);
if (!obd->obd_recovering) {
spin_unlock(&obd->obd_dev_lock);
spin_lock(&obd->obd_recovery_task_lock);
target_cancel_recovery_timer(obd);
- cfs_list_splice_init(&obd->obd_req_replay_queue, &clean_list);
+ list_splice_init(&obd->obd_req_replay_queue, &clean_list);
spin_unlock(&obd->obd_recovery_task_lock);
- cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list) {
+ list_for_each_entry_safe(req, n, &clean_list, rq_list) {
LASSERT(req->rq_reply_state == 0);
target_exp_dequeue_req_replay(req);
target_request_copy_put(req);
}
spin_lock(&obd->obd_recovery_task_lock);
- cfs_list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
- cfs_list_splice_init(&obd->obd_final_req_queue, &clean_list);
+ list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
+ list_splice_init(&obd->obd_final_req_queue, &clean_list);
spin_unlock(&obd->obd_recovery_task_lock);
- cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list){
+ list_for_each_entry_safe(req, n, &clean_list, rq_list) {
LASSERT(req->rq_reply_state == 0);
target_request_copy_put(req);
}
ENTRY;
spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
- req = cfs_list_entry(obd->obd_req_replay_queue.next,
+ if (!list_empty(&obd->obd_req_replay_queue)) {
+ req = list_entry(obd->obd_req_replay_queue.next,
struct ptlrpc_request, rq_list);
req_transno = lustre_msg_get_transno(req->rq_reqmsg);
} else {
int wake_up = 0;
spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
+ if (!list_empty(&obd->obd_lock_replay_queue)) {
CDEBUG(D_HA, "waking for next lock\n");
wake_up = 1;
} else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
}
spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
- req = cfs_list_entry(obd->obd_req_replay_queue.next,
+ if (!list_empty(&obd->obd_req_replay_queue)) {
+ req = list_entry(obd->obd_req_replay_queue.next,
struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
+ list_del_init(&req->rq_list);
obd->obd_requests_queued_for_recovery--;
spin_unlock(&obd->obd_recovery_task_lock);
} else {
spin_unlock(&obd->obd_recovery_task_lock);
- LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
+ LASSERT(list_empty(&obd->obd_req_replay_queue));
LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
/** evict exports failed VBR */
class_disconnect_stale_exports(obd, exp_vbr_healthy);
abort_lock_replay_queue(obd);
spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
- req = cfs_list_entry(obd->obd_lock_replay_queue.next,
+ if (!list_empty(&obd->obd_lock_replay_queue)) {
+ req = list_entry(obd->obd_lock_replay_queue.next,
struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
+ list_del_init(&req->rq_list);
spin_unlock(&obd->obd_recovery_task_lock);
} else {
spin_unlock(&obd->obd_recovery_task_lock);
- LASSERT(cfs_list_empty(&obd->obd_lock_replay_queue));
+ LASSERT(list_empty(&obd->obd_lock_replay_queue));
LASSERT(atomic_read(&obd->obd_lock_replay_clients) == 0);
/** evict exports failed VBR */
class_disconnect_stale_exports(obd, exp_vbr_healthy);
struct ptlrpc_request *req = NULL;
spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_final_req_queue)) {
- req = cfs_list_entry(obd->obd_final_req_queue.next,
+ if (!list_empty(&obd->obd_final_req_queue)) {
+ req = list_entry(obd->obd_final_req_queue.next,
struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
+ list_del_init(&req->rq_list);
spin_unlock(&obd->obd_recovery_task_lock);
if (req->rq_export->exp_in_recovery) {
spin_lock(&req->rq_export->exp_lock);
int target_queue_recovery_request(struct ptlrpc_request *req,
struct obd_device *obd)
{
- cfs_list_t *tmp;
- int inserted = 0;
__u64 transno = lustre_msg_get_transno(req->rq_reqmsg);
- ENTRY;
+ struct ptlrpc_request *reqiter;
+ int inserted = 0;
+ ENTRY;
if (obd->obd_recovery_data.trd_processing_task == current_pid()) {
/* Processing the queue right now, don't re-add. */
wake_up(&obd->obd_next_transno_waitq);
spin_lock(&obd->obd_recovery_task_lock);
if (obd->obd_recovering) {
- cfs_list_add_tail(&req->rq_list,
+ list_add_tail(&req->rq_list,
&obd->obd_final_req_queue);
} else {
spin_unlock(&obd->obd_recovery_task_lock);
RETURN(-ENOTCONN);
}
LASSERT(req->rq_export->exp_lock_replay_needed);
- cfs_list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
+ list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
spin_unlock(&obd->obd_recovery_task_lock);
RETURN(0);
}
* buffers (eg mdt_body, ost_body etc) have NOT been swabbed. */
if (!transno) {
- CFS_INIT_LIST_HEAD(&req->rq_list);
+ INIT_LIST_HEAD(&req->rq_list);
DEBUG_REQ(D_HA, req, "not queueing");
RETURN(1);
}
spin_lock(&obd->obd_recovery_task_lock);
if (transno < obd->obd_next_recovery_transno) {
/* Processing the queue right now, don't re-add. */
- LASSERT(cfs_list_empty(&req->rq_list));
+ LASSERT(list_empty(&req->rq_list));
spin_unlock(&obd->obd_recovery_task_lock);
RETURN(1);
}
RETURN(0);
}
- /* XXX O(n^2) */
+ /* XXX O(n^2) */
spin_lock(&obd->obd_recovery_task_lock);
- LASSERT(obd->obd_recovering);
- cfs_list_for_each(tmp, &obd->obd_req_replay_queue) {
- struct ptlrpc_request *reqiter =
- cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
-
- if (lustre_msg_get_transno(reqiter->rq_reqmsg) > transno) {
- cfs_list_add_tail(&req->rq_list, &reqiter->rq_list);
- inserted = 1;
- break;
- }
+ LASSERT(obd->obd_recovering);
+ list_for_each_entry(reqiter, &obd->obd_req_replay_queue, rq_list) {
+ if (lustre_msg_get_transno(reqiter->rq_reqmsg) > transno) {
+ list_add_tail(&req->rq_list, &reqiter->rq_list);
+ inserted = 1;
+ goto added;
+ }
if (unlikely(lustre_msg_get_transno(reqiter->rq_reqmsg) ==
transno)) {
RETURN(0);
}
}
-
+added:
if (!inserted)
- cfs_list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
+ list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
obd->obd_requests_queued_for_recovery++;
spin_unlock(&obd->obd_recovery_task_lock);
}
/* must be an export if locks saved */
- LASSERT (req->rq_export != NULL);
+ LASSERT(req->rq_export != NULL);
/* req/reply consistent */
LASSERT(rs->rs_svcpt == svcpt);
/* "fresh" reply */
- LASSERT (!rs->rs_scheduled);
- LASSERT (!rs->rs_scheduled_ever);
- LASSERT (!rs->rs_handled);
- LASSERT (!rs->rs_on_net);
- LASSERT (rs->rs_export == NULL);
- LASSERT (cfs_list_empty(&rs->rs_obd_list));
- LASSERT (cfs_list_empty(&rs->rs_exp_list));
+ LASSERT(!rs->rs_scheduled);
+ LASSERT(!rs->rs_scheduled_ever);
+ LASSERT(!rs->rs_handled);
+ LASSERT(!rs->rs_on_net);
+ LASSERT(rs->rs_export == NULL);
+ LASSERT(list_empty(&rs->rs_obd_list));
+ LASSERT(list_empty(&rs->rs_exp_list));
- exp = class_export_get (req->rq_export);
+ exp = class_export_get(req->rq_export);
/* disable reply scheduling while I'm setting up */
rs->rs_scheduled = 1;
rs->rs_transno, exp->exp_last_committed);
if (rs->rs_transno > exp->exp_last_committed) {
/* not committed already */
- cfs_list_add_tail(&rs->rs_obd_list,
+ list_add_tail(&rs->rs_obd_list,
&exp->exp_uncommitted_replies);
}
spin_unlock(&exp->exp_uncommitted_replies_lock);
spin_lock(&exp->exp_lock);
- cfs_list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
+ list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
spin_unlock(&exp->exp_lock);
netrc = target_send_reply_msg(req, rc, fail_id);
spin_lock(&rs->rs_lock);
if (rs->rs_transno <= exp->exp_last_committed ||
(!rs->rs_on_net && !rs->rs_no_ack) ||
- cfs_list_empty(&rs->rs_exp_list) || /* completed already */
- cfs_list_empty(&rs->rs_obd_list)) {
+ list_empty(&rs->rs_exp_list) || /* completed already */
+ list_empty(&rs->rs_obd_list)) {
CDEBUG(D_HA, "Schedule reply immediately\n");
ptlrpc_dispatch_difficult_reply(rs);
} else {
- cfs_list_add(&rs->rs_list, &svcpt->scp_rep_active);
+ list_add(&rs->rs_list, &svcpt->scp_rep_active);
rs->rs_scheduled = 0; /* allow notifier to schedule */
}
spin_unlock(&rs->rs_lock);
void ldlm_dump_export_locks(struct obd_export *exp)
{
spin_lock(&exp->exp_locks_list_guard);
- if (!cfs_list_empty(&exp->exp_locks_list)) {
+ if (!list_empty(&exp->exp_locks_list)) {
struct ldlm_lock *lock;
CERROR("dumping locks for export %p,"
"ignore if the unmount doesn't hang\n", exp);
- cfs_list_for_each_entry(lock, &exp->exp_locks_list,
+ list_for_each_entry(lock, &exp->exp_locks_list,
l_exp_refs_link)
LDLM_ERROR(lock, "lock:");
}
res = lock->l_resource;
LASSERT(ldlm_is_destroyed(lock));
- LASSERT(cfs_list_empty(&lock->l_res_link));
- LASSERT(cfs_list_empty(&lock->l_pending_chain));
+ LASSERT(list_empty(&lock->l_res_link));
+ LASSERT(list_empty(&lock->l_pending_chain));
lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
LDLM_NSS_LOCKS);
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
{
int rc = 0;
- if (!cfs_list_empty(&lock->l_lru)) {
+ if (!list_empty(&lock->l_lru)) {
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- cfs_list_del_init(&lock->l_lru);
+ list_del_init(&lock->l_lru);
LASSERT(ns->ns_nr_unused > 0);
ns->ns_nr_unused--;
rc = 1;
ENTRY;
if (ldlm_is_ns_srv(lock)) {
- LASSERT(cfs_list_empty(&lock->l_lru));
+ LASSERT(list_empty(&lock->l_lru));
RETURN(0);
}
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
lock->l_last_used = cfs_time_current();
- LASSERT(cfs_list_empty(&lock->l_lru));
+ LASSERT(list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
+ list_add_tail(&lock->l_lru, &ns->ns_unused_list);
ldlm_clear_skipped(lock);
LASSERT(ns->ns_nr_unused >= 0);
ns->ns_nr_unused++;
ENTRY;
if (ldlm_is_ns_srv(lock)) {
- LASSERT(cfs_list_empty(&lock->l_lru));
+ LASSERT(list_empty(&lock->l_lru));
EXIT;
return;
}
spin_lock(&ns->ns_lock);
- if (!cfs_list_empty(&lock->l_lru)) {
+ if (!list_empty(&lock->l_lru)) {
ldlm_lock_remove_from_lru_nolock(lock);
ldlm_lock_add_to_lru_nolock(lock);
}
LBUG();
}
- if (!cfs_list_empty(&lock->l_res_link)) {
+ if (!list_empty(&lock->l_res_link)) {
LDLM_ERROR(lock, "lock still on resource");
LBUG();
}
if (ldlm_is_destroyed(lock)) {
- LASSERT(cfs_list_empty(&lock->l_lru));
+ LASSERT(list_empty(&lock->l_lru));
EXIT;
return 0;
}
lu_ref_add(&resource->lr_reference, "lock", lock);
atomic_set(&lock->l_refc, 2);
- CFS_INIT_LIST_HEAD(&lock->l_res_link);
- CFS_INIT_LIST_HEAD(&lock->l_lru);
- CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
- CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
- CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
- CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
+ INIT_LIST_HEAD(&lock->l_res_link);
+ INIT_LIST_HEAD(&lock->l_lru);
+ INIT_LIST_HEAD(&lock->l_pending_chain);
+ INIT_LIST_HEAD(&lock->l_bl_ast);
+ INIT_LIST_HEAD(&lock->l_cp_ast);
+ INIT_LIST_HEAD(&lock->l_rk_ast);
init_waitqueue_head(&lock->l_waitq);
lock->l_blocking_lock = NULL;
- CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
- CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
- CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
- CFS_INIT_HLIST_NODE(&lock->l_exp_flock_hash);
+ INIT_LIST_HEAD(&lock->l_sl_mode);
+ INIT_LIST_HEAD(&lock->l_sl_policy);
+ INIT_HLIST_NODE(&lock->l_exp_hash);
+ INIT_HLIST_NODE(&lock->l_exp_flock_hash);
lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
LDLM_NSS_LOCKS);
- CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
+ INIT_LIST_HEAD(&lock->l_handle.h_link);
class_handle_hash(&lock->l_handle, &lock_handle_ops);
lu_ref_init(&lock->l_reference);
lock->l_callback_timeout = 0;
#if LUSTRE_TRACKS_LOCK_EXP_REFS
- CFS_INIT_LIST_HEAD(&lock->l_exp_refs_link);
+ INIT_LIST_HEAD(&lock->l_exp_refs_link);
lock->l_exp_refs_nr = 0;
lock->l_exp_refs_target = NULL;
#endif
- CFS_INIT_LIST_HEAD(&lock->l_exp_list);
+ INIT_LIST_HEAD(&lock->l_exp_list);
RETURN(lock);
}
LASSERT(new_resid->name[0] != 0);
/* This function assumes that the lock isn't on any lists */
- LASSERT(cfs_list_empty(&lock->l_res_link));
+ LASSERT(list_empty(&lock->l_res_link));
type = oldres->lr_type;
unlock_res_and_lock(lock);
* Only add if we have not sent a blocking AST to the lock yet.
*/
void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- cfs_list_t *work_list)
+ struct list_head *work_list)
{
if (!ldlm_is_ast_sent(lock)) {
LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
* discard dirty data, rather than writing back. */
if (ldlm_is_ast_discard_data(new))
ldlm_set_discard_data(lock);
- LASSERT(cfs_list_empty(&lock->l_bl_ast));
- cfs_list_add(&lock->l_bl_ast, work_list);
+ LASSERT(list_empty(&lock->l_bl_ast));
+ list_add(&lock->l_bl_ast, work_list);
LDLM_LOCK_GET(lock);
LASSERT(lock->l_blocking_lock == NULL);
lock->l_blocking_lock = LDLM_LOCK_GET(new);
/**
* Add a lock to list of just granted locks to send completion AST to.
*/
-void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
+void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
{
if (!ldlm_is_cp_reqd(lock)) {
ldlm_set_cp_reqd(lock);
LDLM_DEBUG(lock, "lock granted; sending completion AST.");
- LASSERT(cfs_list_empty(&lock->l_cp_ast));
- cfs_list_add(&lock->l_cp_ast, work_list);
+ LASSERT(list_empty(&lock->l_cp_ast));
+ list_add(&lock->l_cp_ast, work_list);
LDLM_LOCK_GET(lock);
}
}
* Must be called with lr_lock held.
*/
void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- cfs_list_t *work_list)
+ struct list_head *work_list)
{
ENTRY;
check_res_locked(lock->l_resource);
EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
struct sl_insert_point {
- cfs_list_t *res_link;
- cfs_list_t *mode_link;
- cfs_list_t *policy_link;
+ struct list_head *res_link;
+ struct list_head *mode_link;
+ struct list_head *policy_link;
};
/**
* NOTE: called by
* - ldlm_grant_lock_with_skiplist
*/
-static void search_granted_lock(cfs_list_t *queue,
+static void search_granted_lock(struct list_head *queue,
struct ldlm_lock *req,
struct sl_insert_point *prev)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
struct ldlm_lock *lock, *mode_end, *policy_end;
ENTRY;
- cfs_list_for_each(tmp, queue) {
- lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ list_for_each(tmp, queue) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
- mode_end = cfs_list_entry(lock->l_sl_mode.prev,
+ mode_end = list_entry(lock->l_sl_mode.prev,
struct ldlm_lock, l_sl_mode);
if (lock->l_req_mode != req->l_req_mode) {
} else if (lock->l_resource->lr_type == LDLM_IBITS) {
for (;;) {
policy_end =
- cfs_list_entry(lock->l_sl_policy.prev,
+ list_entry(lock->l_sl_policy.prev,
struct ldlm_lock,
l_sl_policy);
/* go to next policy group within mode group */
tmp = policy_end->l_res_link.next;
- lock = cfs_list_entry(tmp, struct ldlm_lock,
+ lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
} /* loop over policy groups within the mode group */
return;
}
- LASSERT(cfs_list_empty(&lock->l_res_link));
- LASSERT(cfs_list_empty(&lock->l_sl_mode));
- LASSERT(cfs_list_empty(&lock->l_sl_policy));
+ LASSERT(list_empty(&lock->l_res_link));
+ LASSERT(list_empty(&lock->l_sl_mode));
+ LASSERT(list_empty(&lock->l_sl_policy));
/*
* lock->link == prev->link means lock is first starting the group.
* Don't re-add to itself to suppress kernel warnings.
*/
if (&lock->l_res_link != prev->res_link)
- cfs_list_add(&lock->l_res_link, prev->res_link);
+ list_add(&lock->l_res_link, prev->res_link);
if (&lock->l_sl_mode != prev->mode_link)
- cfs_list_add(&lock->l_sl_mode, prev->mode_link);
+ list_add(&lock->l_sl_mode, prev->mode_link);
if (&lock->l_sl_policy != prev->policy_link)
- cfs_list_add(&lock->l_sl_policy, prev->policy_link);
+ list_add(&lock->l_sl_policy, prev->policy_link);
EXIT;
}
*
* must be called with lr_lock held
*/
-void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
+void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
{
struct ldlm_resource *res = lock->l_resource;
ENTRY;
* \retval a referenced lock or NULL. See the flag descriptions below, in the
* comment above ldlm_lock_match
*/
-static struct ldlm_lock *search_queue(cfs_list_t *queue,
+static struct ldlm_lock *search_queue(struct list_head *queue,
ldlm_mode_t *mode,
ldlm_policy_data_t *policy,
struct ldlm_lock *old_lock,
__u64 flags, int unref)
{
struct ldlm_lock *lock;
- cfs_list_t *tmp;
+ struct list_head *tmp;
- cfs_list_for_each(tmp, queue) {
+ list_for_each(tmp, queue) {
ldlm_mode_t match;
- lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
if (lock == old_lock)
break;
GOTO(out, rc = -ENOMEM);
}
- CFS_INIT_LIST_HEAD(&node->li_group);
+ INIT_LIST_HEAD(&node->li_group);
ldlm_interval_attach(node, lock);
node = NULL;
}
*
* Must be called with resource lock held.
*/
-int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
- cfs_list_t *work_list)
+int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
+ struct list_head *work_list)
{
- cfs_list_t *tmp, *pos;
+ struct list_head *tmp, *pos;
ldlm_processing_policy policy;
__u64 flags;
int rc = LDLM_ITER_CONTINUE;
policy = ldlm_processing_policy_table[res->lr_type];
LASSERT(policy);
- cfs_list_for_each_safe(tmp, pos, queue) {
+ list_for_each_safe(tmp, pos, queue) {
struct ldlm_lock *pending;
- pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ pending = list_entry(tmp, struct ldlm_lock, l_res_link);
CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
struct ldlm_lock *lock;
ENTRY;
- if (cfs_list_empty(arg->list))
+ if (list_empty(arg->list))
RETURN(-ENOENT);
- lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
+ lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
/* nobody should touch l_bl_ast */
lock_res_and_lock(lock);
- cfs_list_del_init(&lock->l_bl_ast);
+ list_del_init(&lock->l_bl_ast);
LASSERT(ldlm_is_ast_sent(lock));
LASSERT(lock->l_bl_ast_run == 0);
ldlm_completion_callback completion_callback;
ENTRY;
- if (cfs_list_empty(arg->list))
+ if (list_empty(arg->list))
RETURN(-ENOENT);
- lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
+ lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
/* It's possible to receive a completion AST before we've set
* the l_completion_ast pointer: either because the AST arrived
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
- cfs_list_del_init(&lock->l_cp_ast);
+ list_del_init(&lock->l_cp_ast);
LASSERT(ldlm_is_cp_reqd(lock));
/* save l_completion_ast since it can be changed by
* mds_intent_policy(), see bug 14225 */
struct ldlm_lock *lock;
ENTRY;
- if (cfs_list_empty(arg->list))
+ if (list_empty(arg->list))
RETURN(-ENOENT);
- lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
- cfs_list_del_init(&lock->l_rk_ast);
+ lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
+ list_del_init(&lock->l_rk_ast);
/* the desc just pretend to exclusive */
ldlm_lock2desc(lock, &desc);
int rc = 0;
ENTRY;
- if (cfs_list_empty(arg->list))
+ if (list_empty(arg->list))
RETURN(-ENOENT);
- gl_work = cfs_list_entry(arg->list->next, struct ldlm_glimpse_work,
+ gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
gl_list);
- cfs_list_del_init(&gl_work->gl_list);
+ list_del_init(&gl_work->gl_list);
lock = gl_work->gl_lock;
* Used on server to send multiple ASTs together instead of sending one by
* one.
*/
-int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list,
+int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
ldlm_desc_ast_t ast_type)
{
struct ldlm_cb_set_arg *arg;
set_producer_func work_ast_lock;
int rc;
- if (cfs_list_empty(rpc_list))
+ if (list_empty(rpc_list))
RETURN(0);
OBD_ALLOC_PTR(arg);
}
static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *arg)
+ struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
int rc;
*/
void ldlm_reprocess_all(struct ldlm_resource *res)
{
- CFS_LIST_HEAD(rpc_list);
-
+ struct list_head rpc_list;
#ifdef HAVE_SERVER_SUPPORT
int rc;
ENTRY;
+
+ INIT_LIST_HEAD(&rpc_list);
/* Local lock trees don't get reprocessed. */
if (ns_is_client(ldlm_res_to_ns(res))) {
EXIT;
rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
LDLM_WORK_CP_AST);
if (rc == -ERESTART) {
- LASSERT(cfs_list_empty(&rpc_list));
+ LASSERT(list_empty(&rpc_list));
goto restart;
}
#else
ENTRY;
+
+ INIT_LIST_HEAD(&rpc_list);
if (!ns_is_client(ldlm_res_to_ns(res))) {
CERROR("This is client-side-only module, cannot handle "
"LDLM_NAMESPACE_SERVER resource type lock.\n");
req->l_resource->lr_type != LDLM_IBITS)
return;
- cfs_list_del_init(&req->l_sl_policy);
- cfs_list_del_init(&req->l_sl_mode);
+ list_del_init(&req->l_sl_policy);
+ list_del_init(&req->l_sl_mode);
}
/**
* Cancels passed locks.
*/
int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
struct export_cl_data *ecl = (struct export_cl_data *)data;
struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
__u32 *flags)
{
- CFS_LIST_HEAD(rpc_list);
+ struct list_head rpc_list;
struct ldlm_resource *res;
struct ldlm_namespace *ns;
int granted = 0;
struct ldlm_interval *node;
ENTRY;
+ INIT_LIST_HEAD(&rpc_list);
/* Just return if mode is unchanged. */
if (new_mode == lock->l_granted_mode) {
*flags |= LDLM_FL_BLOCK_GRANTED;
/* FIXME: ugly code, I have to attach the lock to a
* interval node again since perhaps it will be granted
* soon */
- CFS_INIT_LIST_HEAD(&node->li_group);
+ INIT_LIST_HEAD(&node->li_group);
ldlm_interval_attach(node, lock);
node = NULL;
}
* as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
* see bug 13843
*/
- cfs_list_t blp_prio_list;
+ struct list_head blp_prio_list;
/*
* blp_list is used for all other callbacks which are likely
* to take longer to process.
*/
- cfs_list_t blp_list;
+ struct list_head blp_list;
wait_queue_head_t blp_waitq;
struct completion blp_comp;
};
struct ldlm_bl_work_item {
- cfs_list_t blwi_entry;
+ struct list_head blwi_entry;
struct ldlm_namespace *blwi_ns;
struct ldlm_lock_desc blwi_ld;
struct ldlm_lock *blwi_lock;
- cfs_list_t blwi_head;
+ struct list_head blwi_head;
int blwi_count;
struct completion blwi_comp;
ldlm_cancel_flags_t blwi_flags;
*
* All access to it should be under waiting_locks_spinlock.
*/
-static cfs_list_t waiting_locks_list;
+static struct list_head waiting_locks_list;
static struct timer_list waiting_locks_timer;
static struct expired_lock_thread {
wait_queue_head_t elt_waitq;
int elt_state;
int elt_dump;
- cfs_list_t elt_expired_locks;
+ struct list_head elt_expired_locks;
} expired_lock_thread;
static inline int have_expired_locks(void)
ENTRY;
spin_lock_bh(&waiting_locks_spinlock);
- need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
+ need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
spin_unlock_bh(&waiting_locks_spinlock);
RETURN(need_to_run);
*/
static int expired_lock_main(void *arg)
{
- cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
+ struct list_head *expired = &expired_lock_thread.elt_expired_locks;
struct l_wait_info lwi = { 0 };
int do_dump;
do_dump = 0;
- while (!cfs_list_empty(expired)) {
+ while (!list_empty(expired)) {
struct obd_export *export;
struct ldlm_lock *lock;
- lock = cfs_list_entry(expired->next, struct ldlm_lock,
+ lock = list_entry(expired->next, struct ldlm_lock,
l_pending_chain);
if ((void *)lock < LP_POISON + PAGE_CACHE_SIZE &&
(void *)lock >= LP_POISON) {
CERROR("free lock on elt list %p\n", lock);
LBUG();
}
- cfs_list_del_init(&lock->l_pending_chain);
+ list_del_init(&lock->l_pending_chain);
if ((void *)lock->l_export <
LP_POISON + PAGE_CACHE_SIZE &&
(void *)lock->l_export >= LP_POISON) {
return 0;
spin_lock_bh(&lock->l_export->exp_rpc_lock);
- cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
+ list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
rq_exp_list) {
if (req->rq_ops->hpreq_lock_match) {
match = req->rq_ops->hpreq_lock_match(req, lock);
int need_dump = 0;
spin_lock_bh(&waiting_locks_spinlock);
- while (!cfs_list_empty(&waiting_locks_list)) {
- lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
+ while (!list_empty(&waiting_locks_list)) {
+ lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
l_pending_chain);
if (cfs_time_after(lock->l_callback_timeout,
cfs_time_current()) ||
/* no needs to take an extra ref on the lock since it was in
* the waiting_locks_list and ldlm_add_waiting_lock()
* already grabbed a ref */
- cfs_list_del(&lock->l_pending_chain);
- cfs_list_add(&lock->l_pending_chain,
+ list_del(&lock->l_pending_chain);
+ list_add(&lock->l_pending_chain,
&expired_lock_thread.elt_expired_locks);
need_dump = 1;
}
- if (!cfs_list_empty(&expired_lock_thread.elt_expired_locks)) {
+ if (!list_empty(&expired_lock_thread.elt_expired_locks)) {
if (obd_dump_on_timeout && need_dump)
expired_lock_thread.elt_dump = __LINE__;
* Make sure the timer will fire again if we have any locks
* left.
*/
- if (!cfs_list_empty(&waiting_locks_list)) {
+ if (!list_empty(&waiting_locks_list)) {
cfs_time_t timeout_rounded;
- lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
+ lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
l_pending_chain);
timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
cfs_time_t timeout;
cfs_time_t timeout_rounded;
- if (!cfs_list_empty(&lock->l_pending_chain))
+ if (!list_empty(&lock->l_pending_chain))
return 0;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
/* if the new lock has a shorter timeout than something earlier on
the list, we'll wait the longer amount of time; no big deal. */
/* FIFO */
- cfs_list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
+ list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
return 1;
}
if (ret) {
spin_lock_bh(&lock->l_export->exp_bl_list_lock);
- if (cfs_list_empty(&lock->l_exp_list))
- cfs_list_add(&lock->l_exp_list,
+ if (list_empty(&lock->l_exp_list))
+ list_add(&lock->l_exp_list,
&lock->l_export->exp_bl_list);
spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
}
*/
static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
{
- cfs_list_t *list_next;
+ struct list_head *list_next;
- if (cfs_list_empty(&lock->l_pending_chain))
+ if (list_empty(&lock->l_pending_chain))
return 0;
list_next = lock->l_pending_chain.next;
cfs_timer_disarm(&waiting_locks_timer);
} else {
struct ldlm_lock *next;
- next = cfs_list_entry(list_next, struct ldlm_lock,
+ next = list_entry(list_next, struct ldlm_lock,
l_pending_chain);
cfs_timer_arm(&waiting_locks_timer,
round_timeout(next->l_callback_timeout));
}
}
- cfs_list_del_init(&lock->l_pending_chain);
+ list_del_init(&lock->l_pending_chain);
return 1;
}
/* remove the lock out of export blocking list */
spin_lock_bh(&lock->l_export->exp_bl_list_lock);
- cfs_list_del_init(&lock->l_exp_list);
+ list_del_init(&lock->l_exp_list);
spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
if (ret) {
spin_lock_bh(&waiting_locks_spinlock);
- if (cfs_list_empty(&lock->l_pending_chain)) {
+ if (list_empty(&lock->l_pending_chain)) {
spin_unlock_bh(&waiting_locks_spinlock);
LDLM_DEBUG(lock, "wasn't waiting");
return 0;
/* the lock was not in any list, grab an extra ref before adding
* the lock to the expired list */
LDLM_LOCK_GET(lock);
- cfs_list_add(&lock->l_pending_chain,
+ list_add(&lock->l_pending_chain,
&expired_lock_thread.elt_expired_locks);
wake_up(&expired_lock_thread.elt_waitq);
spin_unlock_bh(&waiting_locks_spinlock);
}
spin_lock_bh(&lock->l_export->exp_rpc_lock);
- cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
- rq_exp_list) {
+ list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
+ rq_exp_list) {
/* Do not process requests that were not yet added to there
* incoming queue or were already removed from there for
* processing. We evaluate ptlrpc_nrs_req_can_move() without
}
EXPORT_SYMBOL(ldlm_server_glimpse_ast);
-int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list)
+int ldlm_glimpse_locks(struct ldlm_resource *res,
+ struct list_head *gl_work_list)
{
int rc;
ENTRY;
struct ldlm_request *dlm_req,
struct ldlm_lock *lock)
{
+ struct list_head ast_list;
int lvb_len;
- CFS_LIST_HEAD(ast_list);
int rc = 0;
ENTRY;
LDLM_DEBUG(lock, "client completion callback handler START");
+ INIT_LIST_HEAD(&ast_list);
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1);
while (to > 0) {
if (blwi->blwi_lock &&
ldlm_is_discard_data(blwi->blwi_lock)) {
/* add LDLM_FL_DISCARD_DATA requests to the priority list */
- cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
+ list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
} else {
/* other blocking callbacks are added to the regular list */
- cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
+ list_add_tail(&blwi->blwi_entry, &blp->blp_list);
}
spin_unlock(&blp->blp_lock);
static inline void init_blwi(struct ldlm_bl_work_item *blwi,
struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld,
- cfs_list_t *cancels, int count,
+ struct list_head *cancels, int count,
struct ldlm_lock *lock,
ldlm_cancel_flags_t cancel_flags)
{
init_completion(&blwi->blwi_comp);
- CFS_INIT_LIST_HEAD(&blwi->blwi_head);
+ INIT_LIST_HEAD(&blwi->blwi_head);
if (memory_pressure_get())
blwi->blwi_mem_pressure = 1;
if (ld != NULL)
blwi->blwi_ld = *ld;
if (count) {
- cfs_list_add(&blwi->blwi_head, cancels);
- cfs_list_del_init(cancels);
+ list_add(&blwi->blwi_head, cancels);
+ list_del_init(cancels);
blwi->blwi_count = count;
} else {
blwi->blwi_lock = lock;
static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld,
struct ldlm_lock *lock,
- cfs_list_t *cancels, int count,
+ struct list_head *cancels, int count,
ldlm_cancel_flags_t cancel_flags)
{
ENTRY;
}
int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- cfs_list_t *cancels, int count,
+ struct list_head *cancels, int count,
ldlm_cancel_flags_t cancel_flags)
{
#ifdef __KERNEL__
}
int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
- cfs_list_t *rpc_list = data;
+ struct list_head *rpc_list = data;
struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
lock_res_and_lock(lock);
&lock->l_remote_handle, &lock->l_exp_hash);
}
- cfs_list_add_tail(&lock->l_rk_ast, rpc_list);
+ list_add_tail(&lock->l_rk_ast, rpc_list);
LDLM_LOCK_GET(lock);
unlock_res_and_lock(lock);
void ldlm_revoke_export_locks(struct obd_export *exp)
{
- cfs_list_t rpc_list;
+ struct list_head rpc_list;
ENTRY;
- CFS_INIT_LIST_HEAD(&rpc_list);
+ INIT_LIST_HEAD(&rpc_list);
cfs_hash_for_each_empty(exp->exp_lock_hash,
ldlm_revoke_lock_cb, &rpc_list);
ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
spin_lock(&blp->blp_lock);
/* process a request from the blp_list at least every blp_num_threads */
- if (!cfs_list_empty(&blp->blp_list) &&
- (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
- blwi = cfs_list_entry(blp->blp_list.next,
- struct ldlm_bl_work_item, blwi_entry);
+ if (!list_empty(&blp->blp_list) &&
+ (list_empty(&blp->blp_prio_list) || num_bl == 0))
+ blwi = list_entry(blp->blp_list.next,
+ struct ldlm_bl_work_item, blwi_entry);
else
- if (!cfs_list_empty(&blp->blp_prio_list))
- blwi = cfs_list_entry(blp->blp_prio_list.next,
- struct ldlm_bl_work_item,
- blwi_entry);
+ if (!list_empty(&blp->blp_prio_list))
+ blwi = list_entry(blp->blp_prio_list.next,
+ struct ldlm_bl_work_item,
+ blwi_entry);
if (blwi) {
if (++num_bl >= atomic_read(&blp->blp_num_threads))
num_bl = 0;
- cfs_list_del(&blwi->blwi_entry);
+ list_del(&blwi->blwi_entry);
}
spin_unlock(&blp->blp_lock);
}
static void *
-ldlm_export_lock_key(cfs_hlist_node_t *hnode)
+ldlm_export_lock_key(struct hlist_node *hnode)
{
struct ldlm_lock *lock;
- lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
return &lock->l_remote_handle;
}
static void
-ldlm_export_lock_keycpy(cfs_hlist_node_t *hnode, void *key)
+ldlm_export_lock_keycpy(struct hlist_node *hnode, void *key)
{
struct ldlm_lock *lock;
- lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
lock->l_remote_handle = *(struct lustre_handle *)key;
}
static int
-ldlm_export_lock_keycmp(const void *key, cfs_hlist_node_t *hnode)
+ldlm_export_lock_keycmp(const void *key, struct hlist_node *hnode)
{
return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
}
static void *
-ldlm_export_lock_object(cfs_hlist_node_t *hnode)
+ldlm_export_lock_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ return hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
}
static void
-ldlm_export_lock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_lock_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct ldlm_lock *lock;
- lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
LDLM_LOCK_GET(lock);
}
static void
-ldlm_export_lock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_lock_put(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct ldlm_lock *lock;
- lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
LDLM_LOCK_RELEASE(lock);
}
ldlm_state->ldlm_bl_pool = blp;
spin_lock_init(&blp->blp_lock);
- CFS_INIT_LIST_HEAD(&blp->blp_list);
- CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
+ INIT_LIST_HEAD(&blp->blp_list);
+ INIT_LIST_HEAD(&blp->blp_prio_list);
init_waitqueue_head(&blp->blp_waitq);
atomic_set(&blp->blp_num_threads, 0);
atomic_set(&blp->blp_busy_threads, 0);
}
# ifdef HAVE_SERVER_SUPPORT
- CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
+ INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
expired_lock_thread.elt_state = ELT_STOPPED;
init_waitqueue_head(&expired_lock_thread.elt_waitq);
- CFS_INIT_LIST_HEAD(&waiting_locks_list);
+ INIT_LIST_HEAD(&waiting_locks_list);
spin_lock_init(&waiting_locks_spinlock);
cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
{
ENTRY;
- if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
- !cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
+ if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
+ !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
CERROR("ldlm still has namespaces; clean these up first.\n");
ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
init_completion(&blp->blp_comp);
spin_lock(&blp->blp_lock);
- cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
+ list_add_tail(&blwi.blwi_entry, &blp->blp_list);
wake_up(&blp->blp_waitq);
spin_unlock(&blp->blp_lock);
mutex_init(&ldlm_ref_mutex);
mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+
+ INIT_LIST_HEAD(&ldlm_srv_namespace_list);
+ INIT_LIST_HEAD(&ldlm_cli_active_namespace_list);
+ INIT_LIST_HEAD(&ldlm_cli_inactive_namespace_list);
+
ldlm_resource_slab = kmem_cache_create("ldlm_resources",
sizeof(struct ldlm_resource), 0,
SLAB_HWCACHE_ALIGN, NULL);
* \retval 1 if the lock is compatible to all locks in \a queue
*/
static inline int
-ldlm_plain_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
- cfs_list_t *work_list)
+ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+ struct list_head *work_list)
{
- cfs_list_t *tmp;
- struct ldlm_lock *lock;
- ldlm_mode_t req_mode = req->l_req_mode;
- int compat = 1;
- ENTRY;
+ ldlm_mode_t req_mode = req->l_req_mode;
+ struct ldlm_lock *lock;
+ struct list_head *tmp;
+ int compat = 1;
+ ENTRY;
- lockmode_verify(req_mode);
-
- cfs_list_for_each(tmp, queue) {
- lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ lockmode_verify(req_mode);
+ list_for_each_entry(lock, queue, l_res_link) {
/* We stop walking the queue if we hit ourselves so we don't
* take conflicting locks enqueued after us into account,
* or we'd wait forever. */
RETURN(compat);
/* Advance loop cursor to last lock of mode group. */
- tmp = &cfs_list_entry(lock->l_sl_mode.prev,
- struct ldlm_lock,
- l_sl_mode)->l_res_link;
+ tmp = &list_entry(lock->l_sl_mode.prev, struct ldlm_lock,
+ l_sl_mode)->l_res_link;
if (lockmode_compat(lock->l_req_mode, req_mode))
continue;
ldlm_add_ast_work_item(lock, req, work_list);
{
- cfs_list_t *head;
+ struct list_head *head;
head = &lock->l_sl_mode;
- cfs_list_for_each_entry(lock, head, l_sl_mode)
+ list_for_each_entry(lock, head, l_sl_mode)
if (lock->l_blocking_ast)
ldlm_add_ast_work_item(lock, req,
work_list);
*/
int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
int first_enq, ldlm_error_t *err,
- cfs_list_t *work_list)
+ struct list_head *work_list)
{
- struct ldlm_resource *res = lock->l_resource;
- CFS_LIST_HEAD(rpc_list);
- int rc;
- ENTRY;
+ struct ldlm_resource *res = lock->l_resource;
+ struct list_head rpc_list;
+ int rc;
+ ENTRY;
LASSERT(lock->l_granted_mode != lock->l_req_mode);
- check_res_locked(res);
- LASSERT(cfs_list_empty(&res->lr_converting));
+ check_res_locked(res);
+ LASSERT(list_empty(&res->lr_converting));
+ INIT_LIST_HEAD(&rpc_list);
if (!first_enq) {
LASSERT(work_list != NULL);
* bug 2322: we used to unlink and re-add here, which was a
* terrible folly -- if we goto restart, we could get
* re-ordered! Causes deadlock, because ASTs aren't sent! */
- if (cfs_list_empty(&lock->l_res_link))
+ if (list_empty(&lock->l_res_link))
ldlm_resource_add_lock(res, &res->lr_waiting, lock);
unlock_res(res);
rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
int nr, equal = 0;
int time = 50; /* seconds of sleep if no active namespaces */
- /*
- * No need to setup pool limit for client pools.
- */
- if (client == LDLM_NAMESPACE_SERVER) {
- /*
- * Check all modest namespaces first.
- */
+ /*
+ * No need to setup pool limit for client pools.
+ */
+ if (client == LDLM_NAMESPACE_SERVER) {
+ /*
+ * Check all modest namespaces first.
+ */
mutex_lock(ldlm_namespace_lock(client));
- cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain)
- {
- if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
- continue;
+ list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
+ {
+ if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
+ continue;
l = ldlm_pool_granted(&ns->ns_pool);
if (l == 0)
equal = 1;
}
- /*
- * The rest is given to greedy namespaces.
- */
- cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain)
- {
- if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
- continue;
+ /*
+ * The rest is given to greedy namespaces.
+ */
+ list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
+ {
+ if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
+ continue;
if (equal) {
/*
* locks synchronously.
*/
mutex_lock(ldlm_namespace_lock(client));
- if (cfs_list_empty(ldlm_namespace_list(client))) {
+ if (list_empty(ldlm_namespace_list(client))) {
mutex_unlock(ldlm_namespace_lock(client));
break;
}
EXPORT_SYMBOL(ldlm_prep_elc_req);
int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
- cfs_list_t *cancels, int count)
+ struct list_head *cancels, int count)
{
return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
* Pack \a count locks in \a head into ldlm_request buffer of request \a req.
*/
static void ldlm_cancel_pack(struct ptlrpc_request *req,
- cfs_list_t *head, int count)
+ struct list_head *head, int count)
{
struct ldlm_request *dlm;
struct ldlm_lock *lock;
/* XXX: it would be better to pack lock handles grouped by resource.
* so that the server cancel would call filter_lvbo_update() less
* frequently. */
- cfs_list_for_each_entry(lock, head, l_bl_ast) {
+ list_for_each_entry(lock, head, l_bl_ast) {
if (!count--)
break;
LASSERT(lock->l_conn_export);
/**
* Prepare and send a batched cancel RPC. It will include \a count lock
* handles of locks given in \a cancels list. */
-int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels,
+int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
int count, ldlm_cancel_flags_t flags)
{
struct ptlrpc_request *req = NULL;
/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
* RPC which goes to canceld portal, so we can cancel other LRU locks
* here and send them all as one LDLM_CANCEL RPC. */
- LASSERT(cfs_list_empty(&lock->l_bl_ast));
- cfs_list_add(&lock->l_bl_ast, &cancels);
+ LASSERT(list_empty(&lock->l_bl_ast));
+ list_add(&lock->l_bl_ast, &cancels);
exp = lock->l_conn_export;
if (exp_connect_cancelset(exp)) {
* Locally cancel up to \a count locks in list \a cancels.
* Return the number of cancelled locks.
*/
-int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
+int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
ldlm_cancel_flags_t flags)
{
struct list_head head = LIST_HEAD_INIT(head);
__u64 rc;
left = count;
- cfs_list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
+ list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
if (left-- == 0)
break;
* the one being generated now. */
if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
LDLM_DEBUG(lock, "Cancel lock separately");
- cfs_list_del_init(&lock->l_bl_ast);
- cfs_list_add(&lock->l_bl_ast, &head);
+ list_del_init(&lock->l_bl_ast);
+ list_add(&lock->l_bl_ast, &head);
bl_ast++;
continue;
}
if (rc == LDLM_FL_LOCAL_ONLY) {
/* CANCEL RPC should not be sent to server. */
- cfs_list_del_init(&lock->l_bl_ast);
+ list_del_init(&lock->l_bl_ast);
LDLM_LOCK_RELEASE(lock);
count--;
}
* sending any RPCs or waiting for any
* outstanding RPC to complete.
*/
-static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
- int count, int max, int flags)
+static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
+ struct list_head *cancels, int count, int max,
+ int flags)
{
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
pf = ldlm_cancel_lru_policy(ns, flags);
LASSERT(pf != NULL);
- while (!cfs_list_empty(&ns->ns_unused_list)) {
+ while (!list_empty(&ns->ns_unused_list)) {
ldlm_policy_res_t result;
/* all unused locks */
if (max && added >= max)
break;
- cfs_list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
+ list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
l_lru) {
/* No locks which got blocking requests. */
LASSERT(!ldlm_is_bl_ast(lock));
* and can't use l_pending_chain as it is used both on
* server and client nevertheless bug 5666 says it is
* used only on server */
- LASSERT(cfs_list_empty(&lock->l_bl_ast));
- cfs_list_add(&lock->l_bl_ast, cancels);
+ LASSERT(list_empty(&lock->l_bl_ast));
+ list_add(&lock->l_bl_ast, cancels);
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference, __FUNCTION__, current);
spin_lock(&ns->ns_lock);
RETURN(added);
}
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
int count, int max, ldlm_cancel_flags_t cancel_flags,
int flags)
{
* list.
*/
int ldlm_cancel_resource_local(struct ldlm_resource *res,
- cfs_list_t *cancels,
+ struct list_head *cancels,
ldlm_policy_data_t *policy,
ldlm_mode_t mode, __u64 lock_flags,
ldlm_cancel_flags_t cancel_flags, void *opaque)
ENTRY;
lock_res(res);
- cfs_list_for_each_entry(lock, &res->lr_granted, l_res_link) {
+ list_for_each_entry(lock, &res->lr_granted, l_res_link) {
if (opaque != NULL && lock->l_ast_data != opaque) {
LDLM_ERROR(lock, "data %p doesn't match opaque %p",
lock->l_ast_data, opaque);
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
lock_flags;
- LASSERT(cfs_list_empty(&lock->l_bl_ast));
- cfs_list_add(&lock->l_bl_ast, cancels);
+ LASSERT(list_empty(&lock->l_bl_ast));
+ list_add(&lock->l_bl_ast, cancels);
LDLM_LOCK_GET(lock);
count++;
}
* buffer at the offset \a off.
* Destroy \a cancels at the end.
*/
-int ldlm_cli_cancel_list(cfs_list_t *cancels, int count,
+int ldlm_cli_cancel_list(struct list_head *cancels, int count,
struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
{
struct ldlm_lock *lock;
int res = 0;
ENTRY;
- if (cfs_list_empty(cancels) || count == 0)
+ if (list_empty(cancels) || count == 0)
RETURN(0);
/* XXX: requests (both batched and not) could be sent in parallel.
* It would also speed up the case when the server does not support
* the feature. */
while (count > 0) {
- LASSERT(!cfs_list_empty(cancels));
- lock = cfs_list_entry(cancels->next, struct ldlm_lock,
+ LASSERT(!list_empty(cancels));
+ lock = list_entry(cancels->next, struct ldlm_lock,
l_bl_ast);
LASSERT(lock->l_conn_export);
};
static int ldlm_cli_hash_cancel_unused(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *arg)
+ struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
struct ldlm_cli_cancel_arg *lc = arg;
int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
void *closure)
{
- cfs_list_t *tmp, *next;
+ struct list_head *tmp, *next;
struct ldlm_lock *lock;
int rc = LDLM_ITER_CONTINUE;
RETURN(LDLM_ITER_CONTINUE);
lock_res(res);
- cfs_list_for_each_safe(tmp, next, &res->lr_granted) {
- lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ list_for_each_safe(tmp, next, &res->lr_granted) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
if (iter(lock, closure) == LDLM_ITER_STOP)
GOTO(out, rc = LDLM_ITER_STOP);
}
- cfs_list_for_each_safe(tmp, next, &res->lr_converting) {
- lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ list_for_each_safe(tmp, next, &res->lr_converting) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
if (iter(lock, closure) == LDLM_ITER_STOP)
GOTO(out, rc = LDLM_ITER_STOP);
}
- cfs_list_for_each_safe(tmp, next, &res->lr_waiting) {
- lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ list_for_each_safe(tmp, next, &res->lr_waiting) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
if (iter(lock, closure) == LDLM_ITER_STOP)
GOTO(out, rc = LDLM_ITER_STOP);
}
static int ldlm_res_iter_helper(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *arg)
+ struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
{
- cfs_list_t *list = closure;
+ struct list_head *list = closure;
/* we use l_pending_chain here, because it's unused on clients. */
- LASSERTF(cfs_list_empty(&lock->l_pending_chain),
+ LASSERTF(list_empty(&lock->l_pending_chain),
"lock %p next %p prev %p\n",
lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
/* bug 9573: don't replay locks left after eviction, or
* on a lock so that it does not disapear under us (e.g. due to cancel)
*/
if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
- cfs_list_add(&lock->l_pending_chain, list);
+ list_add(&lock->l_pending_chain, list);
LDLM_LOCK_GET(lock);
}
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
else if (lock->l_granted_mode)
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
- else if (!cfs_list_empty(&lock->l_res_link))
+ else if (!list_empty(&lock->l_res_link))
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
else
flags = LDLM_FL_REPLAY;
ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
- cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
- cfs_list_del_init(&lock->l_pending_chain);
+ list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
+ list_del_init(&lock->l_pending_chain);
if (rc) {
LDLM_LOCK_RELEASE(lock);
continue; /* or try to do the rest? */
int ldlm_cli_namespace_nr = 0;
struct mutex ldlm_srv_namespace_lock;
-CFS_LIST_HEAD(ldlm_srv_namespace_list);
+struct list_head ldlm_srv_namespace_list;
struct mutex ldlm_cli_namespace_lock;
/* Client Namespaces that have active resources in them.
* Once all resources go away, ldlm_poold moves such namespaces to the
* inactive list */
-CFS_LIST_HEAD(ldlm_cli_active_namespace_list);
+struct list_head ldlm_cli_active_namespace_list;
/* Client namespaces that don't have any locks in them */
-CFS_LIST_HEAD(ldlm_cli_inactive_namespace_list);
+struct list_head ldlm_cli_inactive_namespace_list;
struct proc_dir_entry *ldlm_type_proc_dir = NULL;
struct proc_dir_entry *ldlm_ns_proc_dir = NULL;
return hash & mask;
}
-static void *ldlm_res_hop_key(cfs_hlist_node_t *hnode)
+static void *ldlm_res_hop_key(struct hlist_node *hnode)
{
struct ldlm_resource *res;
- res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
return &res->lr_name;
}
-static int ldlm_res_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
{
struct ldlm_resource *res;
- res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
return ldlm_res_eq((const struct ldlm_res_id *)key,
(const struct ldlm_res_id *)&res->lr_name);
}
-static void *ldlm_res_hop_object(cfs_hlist_node_t *hnode)
+static void *ldlm_res_hop_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ return hlist_entry(hnode, struct ldlm_resource, lr_hash);
}
-static void ldlm_res_hop_get_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct ldlm_resource *res;
- res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
ldlm_resource_getref(res);
}
-static void ldlm_res_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct ldlm_resource *res;
- res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* cfs_hash_for_each_nolock is the only chance we call it */
ldlm_resource_putref_locked(res);
}
-static void ldlm_res_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void ldlm_res_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct ldlm_resource *res;
- res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
ldlm_resource_putref(res);
}
ns->ns_appetite = apt;
ns->ns_client = client;
- CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
- CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
+ INIT_LIST_HEAD(&ns->ns_list_chain);
+ INIT_LIST_HEAD(&ns->ns_unused_list);
spin_lock_init(&ns->ns_lock);
atomic_set(&ns->ns_bref, 0);
init_waitqueue_head(&ns->ns_waitq);
* certain assumptions as a result--notably, that we shouldn't cancel
* locks with refs.
*/
-static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
+static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
__u64 flags)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
/* First, we look for non-cleaned-yet lock
* all cleaned locks are marked by CLEANED flag. */
- lock_res(res);
- cfs_list_for_each(tmp, q) {
- lock = cfs_list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ lock_res(res);
+ list_for_each(tmp, q) {
+ lock = list_entry(tmp, struct ldlm_lock,
+ l_res_link);
if (ldlm_is_cleaned(lock)) {
- lock = NULL;
- continue;
- }
- LDLM_LOCK_GET(lock);
+ lock = NULL;
+ continue;
+ }
+ LDLM_LOCK_GET(lock);
ldlm_set_cleaned(lock);
- break;
- }
+ break;
+ }
if (lock == NULL) {
unlock_res(res);
}
static int ldlm_resource_clean(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *arg)
+ struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
__u64 flags = *(__u64 *)arg;
}
static int ldlm_resource_complain(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *arg)
+ struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
/* Namespace \a ns should be not on list at this time, otherwise
* this will cause issues related to using freed \a ns in poold
* thread. */
- LASSERT(cfs_list_empty(&ns->ns_list_chain));
+ LASSERT(list_empty(&ns->ns_list_chain));
OBD_FREE_PTR(ns);
ldlm_put_ref();
EXIT;
void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
{
mutex_lock(ldlm_namespace_lock(client));
- LASSERT(cfs_list_empty(&ns->ns_list_chain));
- cfs_list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
+ LASSERT(list_empty(&ns->ns_list_chain));
+ list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
ldlm_namespace_nr_inc(client);
mutex_unlock(ldlm_namespace_lock(client));
}
void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
{
mutex_lock(ldlm_namespace_lock(client));
- LASSERT(!cfs_list_empty(&ns->ns_list_chain));
+ LASSERT(!list_empty(&ns->ns_list_chain));
/* Some asserts and possibly other parts of the code are still
* using list_empty(&ns->ns_list_chain). This is why it is
* important to use list_del_init() here. */
- cfs_list_del_init(&ns->ns_list_chain);
+ list_del_init(&ns->ns_list_chain);
ldlm_namespace_nr_dec(client);
mutex_unlock(ldlm_namespace_lock(client));
}
void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
ldlm_side_t client)
{
- LASSERT(!cfs_list_empty(&ns->ns_list_chain));
+ LASSERT(!list_empty(&ns->ns_list_chain));
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
- cfs_list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
+ list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
}
/** Should be called with ldlm_namespace_lock(client) taken. */
void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
ldlm_side_t client)
{
- LASSERT(!cfs_list_empty(&ns->ns_list_chain));
+ LASSERT(!list_empty(&ns->ns_list_chain));
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
- cfs_list_move_tail(&ns->ns_list_chain,
- ldlm_namespace_inactive_list(client));
+ list_move_tail(&ns->ns_list_chain,
+ ldlm_namespace_inactive_list(client));
}
/** Should be called with ldlm_namespace_lock(client) taken. */
struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
{
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
- LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
+ LASSERT(!list_empty(ldlm_namespace_list(client)));
return container_of(ldlm_namespace_list(client)->next,
struct ldlm_namespace, ns_list_chain);
}
if (res == NULL)
return NULL;
- CFS_INIT_LIST_HEAD(&res->lr_granted);
- CFS_INIT_LIST_HEAD(&res->lr_converting);
- CFS_INIT_LIST_HEAD(&res->lr_waiting);
+ INIT_LIST_HEAD(&res->lr_granted);
+ INIT_LIST_HEAD(&res->lr_converting);
+ INIT_LIST_HEAD(&res->lr_waiting);
/* Initialize interval trees for each lock mode. */
for (idx = 0; idx < LCK_MODE_NUM; idx++) {
{
struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
- if (!cfs_list_empty(&res->lr_granted)) {
+ if (!list_empty(&res->lr_granted)) {
ldlm_resource_dump(D_ERROR, res);
LBUG();
}
- if (!cfs_list_empty(&res->lr_converting)) {
+ if (!list_empty(&res->lr_converting)) {
ldlm_resource_dump(D_ERROR, res);
LBUG();
}
- if (!cfs_list_empty(&res->lr_waiting)) {
+ if (!list_empty(&res->lr_waiting)) {
ldlm_resource_dump(D_ERROR, res);
LBUG();
}
/**
* Add a lock into a given resource into specified lock list.
*/
-void ldlm_resource_add_lock(struct ldlm_resource *res, cfs_list_t *head,
+void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
struct ldlm_lock *lock)
{
check_res_locked(res);
return;
}
- LASSERT(cfs_list_empty(&lock->l_res_link));
+ LASSERT(list_empty(&lock->l_res_link));
- cfs_list_add_tail(&lock->l_res_link, head);
+ list_add_tail(&lock->l_res_link, head);
}
/**
goto out;
}
- LASSERT(cfs_list_empty(&new->l_res_link));
+ LASSERT(list_empty(&new->l_res_link));
- cfs_list_add(&new->l_res_link, &original->l_res_link);
+ list_add(&new->l_res_link, &original->l_res_link);
out:;
}
ldlm_unlink_lock_skiplist(lock);
else if (type == LDLM_EXTENT)
ldlm_extent_unlink_lock(lock);
- cfs_list_del_init(&lock->l_res_link);
+ list_del_init(&lock->l_res_link);
}
EXPORT_SYMBOL(ldlm_resource_unlink_lock);
*/
void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
if (!((libcfs_debug | D_ERROR) & level))
return;
mutex_lock(ldlm_namespace_lock(client));
- cfs_list_for_each(tmp, ldlm_namespace_list(client)) {
+ list_for_each(tmp, ldlm_namespace_list(client)) {
struct ldlm_namespace *ns;
- ns = cfs_list_entry(tmp, struct ldlm_namespace, ns_list_chain);
+ ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
ldlm_namespace_dump(level, ns);
}
EXPORT_SYMBOL(ldlm_dump_all_namespaces);
static int ldlm_res_hash_dump(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *arg)
+ struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
int level = (int)(unsigned long)arg;
CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
PLDLMRES(res), res, atomic_read(&res->lr_refcount));
- if (!cfs_list_empty(&res->lr_granted)) {
+ if (!list_empty(&res->lr_granted)) {
CDEBUG(level, "Granted locks (in reverse order):\n");
- cfs_list_for_each_entry_reverse(lock, &res->lr_granted,
+ list_for_each_entry_reverse(lock, &res->lr_granted,
l_res_link) {
LDLM_DEBUG_LIMIT(level, lock, "###");
if (!(level & D_CANTMASK) &&
}
}
}
- if (!cfs_list_empty(&res->lr_converting)) {
+ if (!list_empty(&res->lr_converting)) {
CDEBUG(level, "Converting locks:\n");
- cfs_list_for_each_entry(lock, &res->lr_converting, l_res_link)
+ list_for_each_entry(lock, &res->lr_converting, l_res_link)
LDLM_DEBUG_LIMIT(level, lock, "###");
}
- if (!cfs_list_empty(&res->lr_waiting)) {
+ if (!list_empty(&res->lr_waiting)) {
CDEBUG(level, "Waiting locks:\n");
- cfs_list_for_each_entry(lock, &res->lr_waiting, l_res_link)
+ list_for_each_entry(lock, &res->lr_waiting, l_res_link)
LDLM_DEBUG_LIMIT(level, lock, "###");
}
}