Whamcloud - gitweb
LU-3963 ldlm: convert to linux list api 45/10945/4
authorJames Simmons <uja.ornl@gmail.com>
Tue, 19 Aug 2014 14:00:03 +0000 (10:00 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Mon, 25 Aug 2014 04:24:18 +0000 (04:24 +0000)
Move from the cfs_[h]list api to the native linux api for
all the code related to ldlm.

Change-Id: Ibedd3870c530318dc4cba6a27dfb7005e1961ece
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Reviewed-on: http://review.whamcloud.com/10945
Tested-by: Jenkins
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Reviewed-by: frank zago <fzago@cray.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/ldlm/ldlm_extent.c
lustre/ldlm/ldlm_flock.c
lustre/ldlm/ldlm_inodebits.c
lustre/ldlm/ldlm_internal.h
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_plain.c
lustre/ldlm/ldlm_pool.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c

index c9f1618..f2aabd6 100644 (file)
@@ -187,29 +187,25 @@ static void
 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
                                     struct ldlm_extent *new_ex)
 {
-        cfs_list_t *tmp;
-        struct ldlm_resource *res = req->l_resource;
-        ldlm_mode_t req_mode = req->l_req_mode;
-        __u64 req_start = req->l_req_extent.start;
-        __u64 req_end = req->l_req_extent.end;
-        int conflicting = 0;
-        ENTRY;
-
-        lockmode_verify(req_mode);
+       struct ldlm_resource *res = req->l_resource;
+       ldlm_mode_t req_mode = req->l_req_mode;
+       __u64 req_start = req->l_req_extent.start;
+       __u64 req_end = req->l_req_extent.end;
+       struct ldlm_lock *lock;
+       int conflicting = 0;
+       ENTRY;
 
-        /* for waiting locks */
-        cfs_list_for_each(tmp, &res->lr_waiting) {
-                struct ldlm_lock *lock;
-                struct ldlm_extent *l_extent;
+       lockmode_verify(req_mode);
 
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
-                l_extent = &lock->l_policy_data.l_extent;
+       /* for waiting locks */
+       list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
+               struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
 
-                /* We already hit the minimum requested size, search no more */
-                if (new_ex->start == req_start && new_ex->end == req_end) {
-                        EXIT;
-                        return;
-                }
+               /* We already hit the minimum requested size, search no more */
+               if (new_ex->start == req_start && new_ex->end == req_end) {
+                       EXIT;
+                       return;
+               }
 
                 /* Don't conflict with ourselves */
                 if (req == lock)
@@ -319,7 +315,7 @@ static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
 }
 
 struct ldlm_extent_compat_args {
-        cfs_list_t *work_list;
+       struct list_head *work_list;
         struct ldlm_lock *lock;
         ldlm_mode_t mode;
         int *locks;
@@ -332,15 +328,15 @@ static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
         struct ldlm_extent_compat_args *priv = data;
         struct ldlm_interval *node = to_ldlm_interval(n);
         struct ldlm_extent *extent;
-        cfs_list_t *work_list = priv->work_list;
+       struct list_head *work_list = priv->work_list;
         struct ldlm_lock *lock, *enq = priv->lock;
         ldlm_mode_t mode = priv->mode;
         int count = 0;
         ENTRY;
 
-        LASSERT(!cfs_list_empty(&node->li_group));
+       LASSERT(!list_empty(&node->li_group));
 
-        cfs_list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+       list_for_each_entry(lock, &node->li_group, l_sl_policy) {
                 /* interval tree is for granted lock */
                 LASSERTF(mode == lock->l_granted_mode,
                          "mode = %s, lock->l_granted_mode = %s\n",
@@ -376,20 +372,19 @@ static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
  * \retval negative error, such as EWOULDBLOCK for group locks
  */
 static int
-ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
+ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                         __u64 *flags, ldlm_error_t *err,
-                        cfs_list_t *work_list, int *contended_locks)
+                        struct list_head *work_list, int *contended_locks)
 {
-        cfs_list_t *tmp;
-        struct ldlm_lock *lock;
-        struct ldlm_resource *res = req->l_resource;
-        ldlm_mode_t req_mode = req->l_req_mode;
-        __u64 req_start = req->l_req_extent.start;
-        __u64 req_end = req->l_req_extent.end;
-        int compat = 1;
-        int scan = 0;
-        int check_contention;
-        ENTRY;
+       struct ldlm_resource *res = req->l_resource;
+       ldlm_mode_t req_mode = req->l_req_mode;
+       __u64 req_start = req->l_req_extent.start;
+       __u64 req_end = req->l_req_extent.end;
+       struct ldlm_lock *lock;
+       int check_contention;
+       int compat = 1;
+       int scan = 0;
+       ENTRY;
 
         lockmode_verify(req_mode);
 
@@ -451,17 +446,14 @@ ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                         } else {
                                 interval_search(tree->lit_root, &ex,
                                                 ldlm_extent_compat_cb, &data);
-                                if (!cfs_list_empty(work_list) && compat)
+                               if (!list_empty(work_list) && compat)
                                         compat = 0;
                         }
                 }
         } else { /* for waiting queue */
-                cfs_list_for_each(tmp, queue) {
+               list_for_each_entry(lock, queue, l_res_link) {
                         check_contention = 1;
 
-                        lock = cfs_list_entry(tmp, struct ldlm_lock,
-                                              l_res_link);
-
                        /* We stop walking the queue if we hit ourselves so
                         * we don't take conflicting locks enqueued after us
                         * into account, or we'd wait forever. */
@@ -480,7 +472,7 @@ ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                                          * front of first non-GROUP lock */
 
                                         ldlm_resource_insert_lock_after(lock, req);
-                                        cfs_list_del_init(&lock->l_res_link);
+                                       list_del_init(&lock->l_res_link);
                                         ldlm_resource_insert_lock_after(req, lock);
                                         compat = 0;
                                         break;
@@ -575,7 +567,7 @@ ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                                            first non-GROUP lock */
 
                                         ldlm_resource_insert_lock_after(lock, req);
-                                        cfs_list_del_init(&lock->l_res_link);
+                                       list_del_init(&lock->l_res_link);
                                         ldlm_resource_insert_lock_after(req, lock);
                                         break;
                                 }
@@ -635,7 +627,7 @@ ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
 
         RETURN(compat);
 destroylock:
-        cfs_list_del_init(&req->l_res_link);
+       list_del_init(&req->l_res_link);
         ldlm_lock_destroy_nolock(req);
         *err = compat;
         RETURN(compat);
@@ -647,16 +639,16 @@ destroylock:
  * If for whatever reason we do not want to send ASTs to conflicting locks
  * anymore, disassemble the list with this function.
  */
-static void discard_bl_list(cfs_list_t *bl_list)
+static void discard_bl_list(struct list_head *bl_list)
 {
-        cfs_list_t *tmp, *pos;
+       struct list_head *tmp, *pos;
         ENTRY;
 
-        cfs_list_for_each_safe(pos, tmp, bl_list) {
+       list_for_each_safe(pos, tmp, bl_list) {
                 struct ldlm_lock *lock =
-                        cfs_list_entry(pos, struct ldlm_lock, l_bl_ast);
+                       list_entry(pos, struct ldlm_lock, l_bl_ast);
 
-                cfs_list_del_init(&lock->l_bl_ast);
+               list_del_init(&lock->l_bl_ast);
                LASSERT(ldlm_is_ast_sent(lock));
                ldlm_clear_ast_sent(lock);
                 LASSERT(lock->l_bl_ast_run == 0);
@@ -685,20 +677,21 @@ static void discard_bl_list(cfs_list_t *bl_list)
  */
 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
                             int first_enq, ldlm_error_t *err,
-                            cfs_list_t *work_list)
+                            struct list_head *work_list)
 {
-        struct ldlm_resource *res = lock->l_resource;
-        CFS_LIST_HEAD(rpc_list);
-        int rc, rc2;
-        int contended_locks = 0;
-        ENTRY;
+       struct ldlm_resource *res = lock->l_resource;
+       struct list_head rpc_list;
+       int rc, rc2;
+       int contended_locks = 0;
+       ENTRY;
 
        LASSERT(lock->l_granted_mode != lock->l_req_mode);
-        LASSERT(cfs_list_empty(&res->lr_converting));
-        LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
+       LASSERT(list_empty(&res->lr_converting));
+       LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
                !ldlm_is_ast_discard_data(lock));
-        check_res_locked(res);
-        *err = ELDLM_OK;
+       INIT_LIST_HEAD(&rpc_list);
+       check_res_locked(res);
+       *err = ELDLM_OK;
 
         if (!first_enq) {
                 /* Careful observers will note that we don't handle -EWOULDBLOCK
@@ -751,7 +744,7 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
                  * bug 2322: we used to unlink and re-add here, which was a
                  * terrible folly -- if we goto restart, we could get
                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
-                if (cfs_list_empty(&lock->l_res_link))
+               if (list_empty(&lock->l_res_link))
                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
                 unlock_res(res);
                 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
@@ -795,7 +788,7 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
        }
        RETURN(0);
 out:
-       if (!cfs_list_empty(&rpc_list)) {
+       if (!list_empty(&rpc_list)) {
                LASSERT(!ldlm_is_ast_discard_data(lock));
                discard_bl_list(&rpc_list);
        }
@@ -811,7 +804,7 @@ out:
 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
 {
         struct ldlm_resource *res = lock->l_resource;
-        cfs_list_t *tmp;
+       struct list_head *tmp;
         struct ldlm_lock *lck;
         __u64 kms = 0;
         ENTRY;
@@ -821,8 +814,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
          * calculation of the kms */
        ldlm_set_kms_ignore(lock);
 
-        cfs_list_for_each(tmp, &res->lr_granted) {
-                lck = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+       list_for_each(tmp, &res->lr_granted) {
+               lck = list_entry(tmp, struct ldlm_lock, l_res_link);
 
                if (ldlm_is_kms_ignore(lck))
                         continue;
@@ -852,7 +845,7 @@ struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
        if (node == NULL)
                RETURN(NULL);
 
-       CFS_INIT_LIST_HEAD(&node->li_group);
+       INIT_LIST_HEAD(&node->li_group);
        ldlm_interval_attach(node, lock);
        RETURN(node);
 }
@@ -860,7 +853,7 @@ struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
 void ldlm_interval_free(struct ldlm_interval *node)
 {
         if (node) {
-                LASSERT(cfs_list_empty(&node->li_group));
+               LASSERT(list_empty(&node->li_group));
                 LASSERT(!interval_is_intree(&node->li_node));
                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
         }
@@ -873,7 +866,7 @@ void ldlm_interval_attach(struct ldlm_interval *n,
         LASSERT(l->l_tree_node == NULL);
         LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
 
-        cfs_list_add_tail(&l->l_sl_policy, &n->li_group);
+       list_add_tail(&l->l_sl_policy, &n->li_group);
         l->l_tree_node = n;
 }
 
@@ -884,11 +877,11 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
         if (n == NULL)
                 return NULL;
 
-        LASSERT(!cfs_list_empty(&n->li_group));
+       LASSERT(!list_empty(&n->li_group));
         l->l_tree_node = NULL;
-        cfs_list_del_init(&l->l_sl_policy);
+       list_del_init(&l->l_sl_policy);
 
-        return (cfs_list_empty(&n->li_group) ? n : NULL);
+       return list_empty(&n->li_group) ? n : NULL;
 }
 
 static inline int lock_mode_to_index(ldlm_mode_t mode)
index 59e5b74..268617d 100644 (file)
@@ -107,7 +107,7 @@ static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
         if (req->l_export == NULL)
                return;
 
-       LASSERT(cfs_hlist_unhashed(&req->l_exp_flock_hash));
+       LASSERT(hlist_unhashed(&req->l_exp_flock_hash));
 
         req->l_policy_data.l_flock.blocking_owner =
                 lock->l_policy_data.l_flock.owner;
@@ -128,7 +128,7 @@ static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
 
        check_res_locked(req->l_resource);
        if (req->l_export->exp_flock_hash != NULL &&
-           !cfs_hlist_unhashed(&req->l_exp_flock_hash))
+           !hlist_unhashed(&req->l_exp_flock_hash))
                cfs_hash_del(req->l_export->exp_flock_hash,
                             &req->l_policy_data.l_flock.owner,
                             &req->l_exp_flock_hash);
@@ -143,9 +143,9 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
                   mode, flags);
 
        /* Safe to not lock here, since it should be empty anyway */
-       LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
+       LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
 
-       cfs_list_del_init(&lock->l_res_link);
+       list_del_init(&lock->l_res_link);
        if (flags == LDLM_FL_WAIT_NOREPROC) {
                /* client side - set a flag to prevent sending a CANCEL */
                lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
@@ -176,7 +176,7 @@ struct ldlm_flock_lookup_cb_data {
 };
 
 static int ldlm_flock_lookup_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                               cfs_hlist_node_t *hnode, void *data)
+                               struct hlist_node *hnode, void *data)
 {
        struct ldlm_flock_lookup_cb_data *cb_data = data;
        struct obd_export *exp = cfs_hash_object(hs, hnode);
@@ -253,7 +253,7 @@ ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
 }
 
 static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
-                                                cfs_list_t *work_list)
+                                         struct list_head *work_list)
 {
        CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock);
 
@@ -292,12 +292,12 @@ static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
  */
 int
 ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
-                       ldlm_error_t *err, cfs_list_t *work_list)
+                       ldlm_error_t *err, struct list_head *work_list)
 {
         struct ldlm_resource *res = req->l_resource;
         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
-        cfs_list_t *tmp;
-        cfs_list_t *ownlocks = NULL;
+       struct list_head *tmp;
+       struct list_head *ownlocks = NULL;
         struct ldlm_lock *lock = NULL;
         struct ldlm_lock *new = req;
         struct ldlm_lock *new2 = NULL;
@@ -331,8 +331,8 @@ reprocess:
         if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
                 /* This loop determines where this processes locks start
                  * in the resource lr_granted list. */
-                cfs_list_for_each(tmp, &res->lr_granted) {
-                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+               list_for_each(tmp, &res->lr_granted) {
+                       lock = list_entry(tmp, struct ldlm_lock,
                                               l_res_link);
                         if (ldlm_same_flock_owner(lock, req)) {
                                 ownlocks = tmp;
@@ -345,8 +345,8 @@ reprocess:
 
                 /* This loop determines if there are existing locks
                  * that conflict with the new lock request. */
-                cfs_list_for_each(tmp, &res->lr_granted) {
-                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+               list_for_each(tmp, &res->lr_granted) {
+                       lock = list_entry(tmp, struct ldlm_lock,
                                               l_res_link);
 
                         if (ldlm_same_flock_owner(lock, req)) {
@@ -428,7 +428,7 @@ reprocess:
                 ownlocks = &res->lr_granted;
 
         list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
-                lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
+               lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
 
                 if (!ldlm_same_flock_owner(lock, new))
                         break;
@@ -548,7 +548,7 @@ reprocess:
                 if (lock->l_export != NULL) {
                         new2->l_export = class_export_lock_get(lock->l_export, new2);
                         if (new2->l_export->exp_lock_hash &&
-                            cfs_hlist_unhashed(&new2->l_exp_hash))
+                           hlist_unhashed(&new2->l_exp_hash))
                                 cfs_hash_add(new2->l_export->exp_lock_hash,
                                              &new2->l_remote_handle,
                                              &new2->l_exp_hash);
@@ -572,7 +572,7 @@ reprocess:
 
         /* Add req to the granted queue before calling ldlm_reprocess_all(). */
         if (!added) {
-                cfs_list_del_init(&req->l_res_link);
+               list_del_init(&req->l_res_link);
                 /* insert new lock before ownlocks in list. */
                 ldlm_resource_add_lock(res, ownlocks, req);
         }
@@ -589,9 +589,11 @@ reprocess:
                          * note that ldlm_process_flock_lock() will recurse,
                          * but only once because first_enq will be false from
                          * ldlm_reprocess_queue. */
-                        if ((mode == LCK_NL) && overlaps) {
-                                CFS_LIST_HEAD(rpc_list);
+                       if ((mode == LCK_NL) && overlaps) {
+                               struct list_head rpc_list;
                                 int rc;
+
+                               INIT_LIST_HEAD(&rpc_list);
 restart:
                                 ldlm_reprocess_queue(res, &res->lr_waiting,
                                                      &rpc_list);
@@ -888,33 +890,33 @@ ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
 }
 
 static void *
-ldlm_export_flock_key(cfs_hlist_node_t *hnode)
+ldlm_export_flock_key(struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
 
-       lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+       lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
        return &lock->l_policy_data.l_flock.owner;
 }
 
 static int
-ldlm_export_flock_keycmp(const void *key, cfs_hlist_node_t *hnode)
+ldlm_export_flock_keycmp(const void *key, struct hlist_node *hnode)
 {
        return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
 }
 
 static void *
-ldlm_export_flock_object(cfs_hlist_node_t *hnode)
+ldlm_export_flock_object(struct hlist_node *hnode)
 {
-       return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+       return hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
 }
 
 static void
-ldlm_export_flock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_flock_get(cfs_hash_t *hs, struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
        struct ldlm_flock *flock;
 
-       lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+       lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
        LDLM_LOCK_GET(lock);
 
        flock = &lock->l_policy_data.l_flock;
@@ -924,12 +926,12 @@ ldlm_export_flock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 }
 
 static void
-ldlm_export_flock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_flock_put(cfs_hash_t *hs, struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
        struct ldlm_flock *flock;
 
-       lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+       lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
        LDLM_LOCK_RELEASE(lock);
 
        flock = &lock->l_policy_data.l_flock;
index e43cbea..b7dc3a9 100644 (file)
  * locks if first lock of the bunch is not conflicting with us.
  */
 static int
-ldlm_inodebits_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
-                            cfs_list_t *work_list)
+ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+                           struct list_head *work_list)
 {
-        cfs_list_t *tmp;
+       struct list_head *tmp;
         struct ldlm_lock *lock;
         ldlm_mode_t req_mode = req->l_req_mode;
         __u64 req_bits = req->l_policy_data.l_inodebits.bits;
@@ -93,10 +93,10 @@ ldlm_inodebits_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                               I think. Also such a lock would be compatible
                                with any other bit lock */
 
-        cfs_list_for_each(tmp, queue) {
-                cfs_list_t *mode_tail;
+       list_for_each(tmp, queue) {
+               struct list_head *mode_tail;
 
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+               lock = list_entry(tmp, struct ldlm_lock, l_res_link);
 
                /* We stop walking the queue if we hit ourselves so we don't
                 * take conflicting locks enqueued after us into account,
@@ -106,7 +106,7 @@ ldlm_inodebits_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
 
                 /* last lock in mode group */
                 LASSERT(lock->l_sl_mode.prev != NULL);
-                mode_tail = &cfs_list_entry(lock->l_sl_mode.prev,
+               mode_tail = &list_entry(lock->l_sl_mode.prev,
                                             struct ldlm_lock,
                                             l_sl_mode)->l_res_link;
 
@@ -118,10 +118,10 @@ ldlm_inodebits_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                 }
 
                 for (;;) {
-                        cfs_list_t *head;
+                       struct list_head *head;
 
                        /* Advance loop cursor to last lock in policy group. */
-                       tmp = &cfs_list_entry(lock->l_sl_policy.prev,
+                       tmp = &list_entry(lock->l_sl_policy.prev,
                                              struct ldlm_lock,
                                              l_sl_policy)->l_res_link;
 
@@ -145,7 +145,7 @@ ldlm_inodebits_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                                         ldlm_add_ast_work_item(lock, req,
                                                                work_list);
                                 head = &lock->l_sl_policy;
-                                cfs_list_for_each_entry(lock, head, l_sl_policy)
+                               list_for_each_entry(lock, head, l_sl_policy)
                                         if (lock->l_blocking_ast)
                                                 ldlm_add_ast_work_item(lock, req,
                                                                        work_list);
@@ -155,7 +155,7 @@ ldlm_inodebits_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                                 break;
 
                         tmp = tmp->next;
-                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                       lock = list_entry(tmp, struct ldlm_lock,
                                               l_res_link);
                } /* Loop over policy groups within one mode group. */
        } /* Loop over mode groups within @queue. */
@@ -180,16 +180,17 @@ ldlm_inodebits_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
  */
 int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
                                 int first_enq, ldlm_error_t *err,
-                                cfs_list_t *work_list)
+                               struct list_head *work_list)
 {
-        struct ldlm_resource *res = lock->l_resource;
-        CFS_LIST_HEAD(rpc_list);
-        int rc;
-        ENTRY;
+       struct ldlm_resource *res = lock->l_resource;
+       struct list_head rpc_list;
+       int rc;
+       ENTRY;
 
        LASSERT(lock->l_granted_mode != lock->l_req_mode);
-        LASSERT(cfs_list_empty(&res->lr_converting));
-        check_res_locked(res);
+       LASSERT(list_empty(&res->lr_converting));
+       INIT_LIST_HEAD(&rpc_list);
+       check_res_locked(res);
 
        /* (*flags & LDLM_FL_BLOCK_NOWAIT) is for layout lock right now. */
         if (!first_enq || (*flags & LDLM_FL_BLOCK_NOWAIT)) {
@@ -222,7 +223,7 @@ int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
                  * bug 2322: we used to unlink and re-add here, which was a
                  * terrible folly -- if we goto restart, we could get
                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
-                if (cfs_list_empty(&lock->l_res_link))
+               if (list_empty(&lock->l_res_link))
                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
                 unlock_res(res);
                 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
index aa2e34f..db32d47 100644 (file)
 extern int ldlm_srv_namespace_nr;
 extern int ldlm_cli_namespace_nr;
 extern struct mutex ldlm_srv_namespace_lock;
-extern cfs_list_t ldlm_srv_namespace_list;
+extern struct list_head ldlm_srv_namespace_list;
 extern struct mutex ldlm_cli_namespace_lock;
-extern cfs_list_t ldlm_cli_active_namespace_list;
-extern cfs_list_t ldlm_cli_inactive_namespace_list;
+extern struct list_head ldlm_cli_active_namespace_list;
+extern struct list_head ldlm_cli_inactive_namespace_list;
 
 static inline int ldlm_namespace_nr_read(ldlm_side_t client)
 {
@@ -66,13 +66,13 @@ static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
                ldlm_cli_namespace_nr--;
 }
 
-static inline cfs_list_t *ldlm_namespace_list(ldlm_side_t client)
+static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
 {
         return client == LDLM_NAMESPACE_SERVER ?
                &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
 }
 
-static inline cfs_list_t *ldlm_namespace_inactive_list(ldlm_side_t client)
+static inline struct list_head *ldlm_namespace_inactive_list(ldlm_side_t client)
 {
         return client == LDLM_NAMESPACE_SERVER ?
                &ldlm_srv_namespace_list : &ldlm_cli_inactive_namespace_list;
@@ -108,7 +108,7 @@ enum {
 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
                    ldlm_cancel_flags_t sync, int flags);
 int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
-                          cfs_list_t *cancels, int count, int max,
+                         struct list_head *cancels, int count, int max,
                           ldlm_cancel_flags_t cancel_flags, int flags);
 extern int ldlm_enqueue_min;
 int ldlm_get_enq_timeout(struct ldlm_lock *lock);
@@ -126,7 +126,7 @@ struct ldlm_cb_set_arg {
        struct ptlrpc_request_set       *set;
        int                              type; /* LDLM_{CP,BL,GL}_CALLBACK */
        atomic_t                         restart;
-       cfs_list_t                      *list;
+       struct list_head                        *list;
        union ldlm_gl_desc              *gl_desc; /* glimpse AST descriptor */
 };
 
@@ -137,7 +137,7 @@ typedef enum {
        LDLM_WORK_GL_AST
 } ldlm_desc_ast_t;
 
-void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list);
+void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
                  enum req_location loc, void *data, int size);
 struct ldlm_lock *
@@ -152,12 +152,12 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
 void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
-                            cfs_list_t *work_list);
+                           struct list_head *work_list);
 #ifdef HAVE_SERVER_SUPPORT
-int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
-                         cfs_list_t *work_list);
+int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
+                        struct list_head *work_list);
 #endif
-int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list,
+int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
                       ldlm_desc_ast_t ast_type);
 int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq);
 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
@@ -174,7 +174,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
                            struct ldlm_lock *lock);
 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
                           struct ldlm_lock_desc *ld,
-                          cfs_list_t *cancels, int count,
+                          struct list_head *cancels, int count,
                           ldlm_cancel_flags_t cancel_flags);
 
 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
@@ -184,19 +184,19 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
 /* ldlm_plain.c */
 int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
                            int first_enq, ldlm_error_t *err,
-                           cfs_list_t *work_list);
+                           struct list_head *work_list);
 
 /* ldlm_inodebits.c */
 int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
                                 int first_enq, ldlm_error_t *err,
-                                cfs_list_t *work_list);
+                               struct list_head *work_list);
 #endif
 
 /* ldlm_extent.c */
 #ifdef HAVE_SERVER_SUPPORT
 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
                             int first_enq, ldlm_error_t *err,
-                            cfs_list_t *work_list);
+                            struct list_head *work_list);
 #endif
 void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
 void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
@@ -204,7 +204,7 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
 /* ldlm_flock.c */
 int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
                            int first_enq, ldlm_error_t *err,
-                           cfs_list_t *work_list);
+                           struct list_head *work_list);
 int ldlm_init_flock_export(struct obd_export *exp);
 void ldlm_destroy_flock_export(struct obd_export *exp);
 
@@ -234,9 +234,9 @@ static inline struct ldlm_extent *
 ldlm_interval_extent(struct ldlm_interval *node)
 {
         struct ldlm_lock *lock;
-        LASSERT(!cfs_list_empty(&node->li_group));
+       LASSERT(!list_empty(&node->li_group));
 
-        lock = cfs_list_entry(node->li_group.next, struct ldlm_lock,
+       lock = list_entry(node->li_group.next, struct ldlm_lock,
                               l_sl_policy);
         return &lock->l_policy_data.l_extent;
 }
index cc7e67d..f0f94d4 100644 (file)
@@ -85,11 +85,11 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
         }
 
        spin_lock(&imp->imp_lock);
-        cfs_list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
+       list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
                 if (obd_uuid_equals(uuid, &item->oic_uuid)) {
                         if (priority) {
-                                cfs_list_del(&item->oic_item);
-                                cfs_list_add(&item->oic_item,
+                               list_del(&item->oic_item);
+                               list_add(&item->oic_item,
                                              &imp->imp_conn_list);
                                 item->oic_last_attempt = 0;
                         }
@@ -106,9 +106,9 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
                 imp_conn->oic_uuid = *uuid;
                 imp_conn->oic_last_attempt = 0;
                 if (priority)
-                        cfs_list_add(&imp_conn->oic_item, &imp->imp_conn_list);
+                       list_add(&imp_conn->oic_item, &imp->imp_conn_list);
                 else
-                        cfs_list_add_tail(&imp_conn->oic_item,
+                       list_add_tail(&imp_conn->oic_item,
                                           &imp->imp_conn_list);
                 CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
                        imp, imp->imp_obd->obd_name, uuid->uuid,
@@ -148,12 +148,12 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
        ENTRY;
 
        spin_lock(&imp->imp_lock);
-        if (cfs_list_empty(&imp->imp_conn_list)) {
+       if (list_empty(&imp->imp_conn_list)) {
                 LASSERT(!imp->imp_connection);
                 GOTO(out, rc);
         }
 
-        cfs_list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
+       list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
                 if (!obd_uuid_equals(uuid, &imp_conn->oic_uuid))
                         continue;
                 LASSERT(imp_conn->oic_conn);
@@ -179,7 +179,7 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
                         }
                 }
 
-                cfs_list_del(&imp_conn->oic_item);
+               list_del(&imp_conn->oic_item);
                 ptlrpc_connection_put(imp_conn->oic_conn);
                 OBD_FREE(imp_conn, sizeof(*imp_conn));
                 CDEBUG(D_HA, "imp %p@%s: remove connection %s\n",
@@ -207,7 +207,7 @@ int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
        ENTRY;
 
        spin_lock(&imp->imp_lock);
-        cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
+       list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
                /* Check if conn UUID does have this peer NID. */
                 if (class_check_uuid(&conn->oic_uuid, peer)) {
                         *uuid = conn->oic_uuid;
@@ -352,11 +352,11 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
        /* cl_dirty_max_pages may be changed at connect time in
         * ptlrpc_connect_interpret(). */
        client_adjust_max_dirty(cli);
-       CFS_INIT_LIST_HEAD(&cli->cl_cache_waiters);
-       CFS_INIT_LIST_HEAD(&cli->cl_loi_ready_list);
-       CFS_INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
-       CFS_INIT_LIST_HEAD(&cli->cl_loi_write_list);
-       CFS_INIT_LIST_HEAD(&cli->cl_loi_read_list);
+       INIT_LIST_HEAD(&cli->cl_cache_waiters);
+       INIT_LIST_HEAD(&cli->cl_loi_ready_list);
+       INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
+       INIT_LIST_HEAD(&cli->cl_loi_write_list);
+       INIT_LIST_HEAD(&cli->cl_loi_read_list);
        client_obd_list_lock_init(&cli->cl_loi_list_lock);
        atomic_set(&cli->cl_pending_w_pages, 0);
        atomic_set(&cli->cl_pending_r_pages, 0);
@@ -371,11 +371,11 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
        spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
 
        /* lru for osc. */
-       CFS_INIT_LIST_HEAD(&cli->cl_lru_osc);
+       INIT_LIST_HEAD(&cli->cl_lru_osc);
        atomic_set(&cli->cl_lru_shrinkers, 0);
        atomic_set(&cli->cl_lru_busy, 0);
        atomic_set(&cli->cl_lru_in_list, 0);
-       CFS_INIT_LIST_HEAD(&cli->cl_lru_list);
+       INIT_LIST_HEAD(&cli->cl_lru_list);
        client_obd_list_lock_init(&cli->cl_lru_list_lock);
        atomic_set(&cli->cl_unstable_count, 0);
 
@@ -656,15 +656,15 @@ int server_disconnect_export(struct obd_export *exp)
 
         /* complete all outstanding replies */
        spin_lock(&exp->exp_lock);
-       while (!cfs_list_empty(&exp->exp_outstanding_replies)) {
+       while (!list_empty(&exp->exp_outstanding_replies)) {
                struct ptlrpc_reply_state *rs =
-                       cfs_list_entry(exp->exp_outstanding_replies.next,
+                       list_entry(exp->exp_outstanding_replies.next,
                                       struct ptlrpc_reply_state, rs_exp_list);
                struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
 
                spin_lock(&svcpt->scp_rep_lock);
 
-               cfs_list_del_init(&rs->rs_exp_list);
+               list_del_init(&rs->rs_exp_list);
                spin_lock(&rs->rs_lock);
                ptlrpc_schedule_difficult_reply(rs);
                spin_unlock(&rs->rs_lock);
@@ -1136,7 +1136,7 @@ dont_check_exports:
                spin_unlock(&export->exp_lock);
 
                spin_lock(&target->obd_dev_lock);
-               cfs_list_del_init(&export->exp_obd_chain_timed);
+               list_del_init(&export->exp_obd_chain_timed);
                spin_unlock(&target->obd_dev_lock);
        } else {
                spin_unlock(&export->exp_lock);
@@ -1145,7 +1145,7 @@ dont_check_exports:
         if (export->exp_connection != NULL) {
                /* Check to see if connection came from another NID. */
                 if ((export->exp_connection->c_peer.nid != req->rq_peer.nid) &&
-                    !cfs_hlist_unhashed(&export->exp_nid_hash))
+                   !hlist_unhashed(&export->exp_nid_hash))
                         cfs_hash_del(export->exp_obd->obd_nid_hash,
                                      &export->exp_connection->c_peer.nid,
                                      &export->exp_nid_hash);
@@ -1156,7 +1156,7 @@ dont_check_exports:
         export->exp_connection = ptlrpc_connection_get(req->rq_peer,
                                                        req->rq_self,
                                                        &remote_uuid);
-        if (cfs_hlist_unhashed(&export->exp_nid_hash)) {
+       if (hlist_unhashed(&export->exp_nid_hash)) {
                 cfs_hash_add(export->exp_obd->obd_nid_hash,
                              &export->exp_connection->c_peer.nid,
                              &export->exp_nid_hash);
@@ -1320,8 +1320,8 @@ EXPORT_SYMBOL(target_destroy_export);
 static void target_request_copy_get(struct ptlrpc_request *req)
 {
        class_export_rpc_inc(req->rq_export);
-       LASSERT(cfs_list_empty(&req->rq_list));
-       CFS_INIT_LIST_HEAD(&req->rq_replay_list);
+       LASSERT(list_empty(&req->rq_list));
+       INIT_LIST_HEAD(&req->rq_replay_list);
 
        /* Increase refcount to keep request in queue. */
        atomic_inc(&req->rq_refcount);
@@ -1331,7 +1331,7 @@ static void target_request_copy_get(struct ptlrpc_request *req)
 
 static void target_request_copy_put(struct ptlrpc_request *req)
 {
-       LASSERT(cfs_list_empty(&req->rq_replay_list));
+       LASSERT(list_empty(&req->rq_replay_list));
        LASSERT_ATOMIC_POS(&req->rq_export->exp_replay_count);
 
        atomic_dec(&req->rq_export->exp_replay_count);
@@ -1349,7 +1349,7 @@ static int target_exp_enqueue_req_replay(struct ptlrpc_request *req)
         LASSERT(exp);
 
        spin_lock(&exp->exp_lock);
-        cfs_list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
+       list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
                                 rq_replay_list) {
                 if (lustre_msg_get_transno(reqiter->rq_reqmsg) == transno) {
                         dup = 1;
@@ -1364,7 +1364,7 @@ static int target_exp_enqueue_req_replay(struct ptlrpc_request *req)
                         CERROR("invalid flags %x of resent replay\n",
                                lustre_msg_get_flags(req->rq_reqmsg));
         } else {
-                cfs_list_add_tail(&req->rq_replay_list,
+               list_add_tail(&req->rq_replay_list,
                                   &exp->exp_req_replay_queue);
         }
 
@@ -1374,11 +1374,11 @@ static int target_exp_enqueue_req_replay(struct ptlrpc_request *req)
 
 static void target_exp_dequeue_req_replay(struct ptlrpc_request *req)
 {
-       LASSERT(!cfs_list_empty(&req->rq_replay_list));
+       LASSERT(!list_empty(&req->rq_replay_list));
        LASSERT(req->rq_export);
 
        spin_lock(&req->rq_export->exp_lock);
-       cfs_list_del_init(&req->rq_replay_list);
+       list_del_init(&req->rq_replay_list);
        spin_unlock(&req->rq_export->exp_lock);
 }
 
@@ -1402,15 +1402,15 @@ static void target_finish_recovery(struct obd_device *obd)
 
         ldlm_reprocess_all_ns(obd->obd_namespace);
        spin_lock(&obd->obd_recovery_task_lock);
-        if (!cfs_list_empty(&obd->obd_req_replay_queue) ||
-            !cfs_list_empty(&obd->obd_lock_replay_queue) ||
-            !cfs_list_empty(&obd->obd_final_req_queue)) {
+       if (!list_empty(&obd->obd_req_replay_queue) ||
+           !list_empty(&obd->obd_lock_replay_queue) ||
+           !list_empty(&obd->obd_final_req_queue)) {
                 CERROR("%s: Recovery queues ( %s%s%s) are not empty\n",
                        obd->obd_name,
-                       cfs_list_empty(&obd->obd_req_replay_queue) ? "" : "req ",
-                       cfs_list_empty(&obd->obd_lock_replay_queue) ? \
+                      list_empty(&obd->obd_req_replay_queue) ? "" : "req ",
+                      list_empty(&obd->obd_lock_replay_queue) ? \
                                "" : "lock ",
-                       cfs_list_empty(&obd->obd_final_req_queue) ? \
+                      list_empty(&obd->obd_final_req_queue) ? \
                                "" : "final ");
                spin_unlock(&obd->obd_recovery_task_lock);
                LBUG();
@@ -1432,13 +1432,13 @@ static void target_finish_recovery(struct obd_device *obd)
 static void abort_req_replay_queue(struct obd_device *obd)
 {
        struct ptlrpc_request *req, *n;
-       cfs_list_t abort_list;
+       struct list_head abort_list;
 
-       CFS_INIT_LIST_HEAD(&abort_list);
+       INIT_LIST_HEAD(&abort_list);
        spin_lock(&obd->obd_recovery_task_lock);
-       cfs_list_splice_init(&obd->obd_req_replay_queue, &abort_list);
+       list_splice_init(&obd->obd_req_replay_queue, &abort_list);
        spin_unlock(&obd->obd_recovery_task_lock);
-        cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list) {
+       list_for_each_entry_safe(req, n, &abort_list, rq_list) {
                 DEBUG_REQ(D_WARNING, req, "aborted:");
                 req->rq_status = -ENOTCONN;
                 if (ptlrpc_error(req)) {
@@ -1453,13 +1453,13 @@ static void abort_req_replay_queue(struct obd_device *obd)
 static void abort_lock_replay_queue(struct obd_device *obd)
 {
        struct ptlrpc_request *req, *n;
-       cfs_list_t abort_list;
+       struct list_head abort_list;
 
-       CFS_INIT_LIST_HEAD(&abort_list);
+       INIT_LIST_HEAD(&abort_list);
        spin_lock(&obd->obd_recovery_task_lock);
-       cfs_list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
+       list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
        spin_unlock(&obd->obd_recovery_task_lock);
-        cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list){
+       list_for_each_entry_safe(req, n, &abort_list, rq_list) {
                 DEBUG_REQ(D_ERROR, req, "aborted:");
                 req->rq_status = -ENOTCONN;
                 if (ptlrpc_error(req)) {
@@ -1482,10 +1482,10 @@ static void abort_lock_replay_queue(struct obd_device *obd)
 void target_cleanup_recovery(struct obd_device *obd)
 {
         struct ptlrpc_request *req, *n;
-        cfs_list_t clean_list;
+       struct list_head clean_list;
         ENTRY;
 
-        CFS_INIT_LIST_HEAD(&clean_list);
+       INIT_LIST_HEAD(&clean_list);
        spin_lock(&obd->obd_dev_lock);
        if (!obd->obd_recovering) {
                spin_unlock(&obd->obd_dev_lock);
@@ -1497,21 +1497,21 @@ void target_cleanup_recovery(struct obd_device *obd)
 
        spin_lock(&obd->obd_recovery_task_lock);
        target_cancel_recovery_timer(obd);
-       cfs_list_splice_init(&obd->obd_req_replay_queue, &clean_list);
+       list_splice_init(&obd->obd_req_replay_queue, &clean_list);
        spin_unlock(&obd->obd_recovery_task_lock);
 
-       cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list) {
+       list_for_each_entry_safe(req, n, &clean_list, rq_list) {
                LASSERT(req->rq_reply_state == 0);
                target_exp_dequeue_req_replay(req);
                target_request_copy_put(req);
        }
 
        spin_lock(&obd->obd_recovery_task_lock);
-       cfs_list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
-       cfs_list_splice_init(&obd->obd_final_req_queue, &clean_list);
+       list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
+       list_splice_init(&obd->obd_final_req_queue, &clean_list);
        spin_unlock(&obd->obd_recovery_task_lock);
 
-        cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list){
+       list_for_each_entry_safe(req, n, &clean_list, rq_list) {
                 LASSERT(req->rq_reply_state == 0);
                 target_request_copy_put(req);
         }
@@ -1699,8 +1699,8 @@ static int check_for_next_transno(struct obd_device *obd)
        ENTRY;
 
        spin_lock(&obd->obd_recovery_task_lock);
-       if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
-               req = cfs_list_entry(obd->obd_req_replay_queue.next,
+       if (!list_empty(&obd->obd_req_replay_queue)) {
+               req = list_entry(obd->obd_req_replay_queue.next,
                                     struct ptlrpc_request, rq_list);
                req_transno = lustre_msg_get_transno(req->rq_reqmsg);
        } else {
@@ -1763,7 +1763,7 @@ static int check_for_next_lock(struct obd_device *obd)
        int wake_up = 0;
 
        spin_lock(&obd->obd_recovery_task_lock);
-       if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
+       if (!list_empty(&obd->obd_lock_replay_queue)) {
                CDEBUG(D_HA, "waking for next lock\n");
                wake_up = 1;
        } else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
@@ -1834,15 +1834,15 @@ static struct ptlrpc_request *target_next_replay_req(struct obd_device *obd)
         }
 
        spin_lock(&obd->obd_recovery_task_lock);
-       if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
-               req = cfs_list_entry(obd->obd_req_replay_queue.next,
+       if (!list_empty(&obd->obd_req_replay_queue)) {
+               req = list_entry(obd->obd_req_replay_queue.next,
                                     struct ptlrpc_request, rq_list);
-               cfs_list_del_init(&req->rq_list);
+               list_del_init(&req->rq_list);
                obd->obd_requests_queued_for_recovery--;
                spin_unlock(&obd->obd_recovery_task_lock);
        } else {
                spin_unlock(&obd->obd_recovery_task_lock);
-               LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
+               LASSERT(list_empty(&obd->obd_req_replay_queue));
                LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
                /** evict exports failed VBR */
                class_disconnect_stale_exports(obd, exp_vbr_healthy);
@@ -1860,14 +1860,14 @@ static struct ptlrpc_request *target_next_replay_lock(struct obd_device *obd)
                abort_lock_replay_queue(obd);
 
        spin_lock(&obd->obd_recovery_task_lock);
-       if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
-               req = cfs_list_entry(obd->obd_lock_replay_queue.next,
+       if (!list_empty(&obd->obd_lock_replay_queue)) {
+               req = list_entry(obd->obd_lock_replay_queue.next,
                                     struct ptlrpc_request, rq_list);
-               cfs_list_del_init(&req->rq_list);
+               list_del_init(&req->rq_list);
                spin_unlock(&obd->obd_recovery_task_lock);
        } else {
                spin_unlock(&obd->obd_recovery_task_lock);
-               LASSERT(cfs_list_empty(&obd->obd_lock_replay_queue));
+               LASSERT(list_empty(&obd->obd_lock_replay_queue));
                LASSERT(atomic_read(&obd->obd_lock_replay_clients) == 0);
                /** evict exports failed VBR */
                class_disconnect_stale_exports(obd, exp_vbr_healthy);
@@ -1880,10 +1880,10 @@ static struct ptlrpc_request *target_next_final_ping(struct obd_device *obd)
        struct ptlrpc_request *req = NULL;
 
        spin_lock(&obd->obd_recovery_task_lock);
-       if (!cfs_list_empty(&obd->obd_final_req_queue)) {
-               req = cfs_list_entry(obd->obd_final_req_queue.next,
+       if (!list_empty(&obd->obd_final_req_queue)) {
+               req = list_entry(obd->obd_final_req_queue.next,
                                     struct ptlrpc_request, rq_list);
-               cfs_list_del_init(&req->rq_list);
+               list_del_init(&req->rq_list);
                spin_unlock(&obd->obd_recovery_task_lock);
                if (req->rq_export->exp_in_recovery) {
                        spin_lock(&req->rq_export->exp_lock);
@@ -2207,10 +2207,10 @@ static int target_process_req_flags(struct obd_device *obd,
 int target_queue_recovery_request(struct ptlrpc_request *req,
                                   struct obd_device *obd)
 {
-        cfs_list_t *tmp;
-        int inserted = 0;
         __u64 transno = lustre_msg_get_transno(req->rq_reqmsg);
-        ENTRY;
+       struct ptlrpc_request *reqiter;
+       int inserted = 0;
+       ENTRY;
 
        if (obd->obd_recovery_data.trd_processing_task == current_pid()) {
                /* Processing the queue right now, don't re-add. */
@@ -2227,7 +2227,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                wake_up(&obd->obd_next_transno_waitq);
                spin_lock(&obd->obd_recovery_task_lock);
                if (obd->obd_recovering) {
-                       cfs_list_add_tail(&req->rq_list,
+                       list_add_tail(&req->rq_list,
                                          &obd->obd_final_req_queue);
                } else {
                        spin_unlock(&obd->obd_recovery_task_lock);
@@ -2251,7 +2251,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                        RETURN(-ENOTCONN);
                }
                LASSERT(req->rq_export->exp_lock_replay_needed);
-               cfs_list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
+               list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
                spin_unlock(&obd->obd_recovery_task_lock);
                RETURN(0);
        }
@@ -2261,7 +2261,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
          * buffers (eg mdt_body, ost_body etc) have NOT been swabbed. */
 
         if (!transno) {
-                CFS_INIT_LIST_HEAD(&req->rq_list);
+               INIT_LIST_HEAD(&req->rq_list);
                 DEBUG_REQ(D_HA, req, "not queueing");
                 RETURN(1);
         }
@@ -2282,7 +2282,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
        spin_lock(&obd->obd_recovery_task_lock);
        if (transno < obd->obd_next_recovery_transno) {
                /* Processing the queue right now, don't re-add. */
-               LASSERT(cfs_list_empty(&req->rq_list));
+               LASSERT(list_empty(&req->rq_list));
                spin_unlock(&obd->obd_recovery_task_lock);
                RETURN(1);
        }
@@ -2304,18 +2304,15 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                 RETURN(0);
         }
 
-        /* XXX O(n^2) */
+       /* XXX O(n^2) */
        spin_lock(&obd->obd_recovery_task_lock);
-        LASSERT(obd->obd_recovering);
-        cfs_list_for_each(tmp, &obd->obd_req_replay_queue) {
-                struct ptlrpc_request *reqiter =
-                        cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
-
-                if (lustre_msg_get_transno(reqiter->rq_reqmsg) > transno) {
-                        cfs_list_add_tail(&req->rq_list, &reqiter->rq_list);
-                        inserted = 1;
-                        break;
-                }
+       LASSERT(obd->obd_recovering);
+       list_for_each_entry(reqiter, &obd->obd_req_replay_queue, rq_list) {
+               if (lustre_msg_get_transno(reqiter->rq_reqmsg) > transno) {
+                       list_add_tail(&req->rq_list, &reqiter->rq_list);
+                       inserted = 1;
+                       goto added;
+               }
 
                 if (unlikely(lustre_msg_get_transno(reqiter->rq_reqmsg) ==
                              transno)) {
@@ -2327,9 +2324,9 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                         RETURN(0);
                 }
         }
-
+added:
         if (!inserted)
-                cfs_list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
+               list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
 
         obd->obd_requests_queued_for_recovery++;
        spin_unlock(&obd->obd_recovery_task_lock);
@@ -2434,20 +2431,20 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
         }
 
         /* must be an export if locks saved */
-        LASSERT (req->rq_export != NULL);
+       LASSERT(req->rq_export != NULL);
         /* req/reply consistent */
        LASSERT(rs->rs_svcpt == svcpt);
 
         /* "fresh" reply */
-        LASSERT (!rs->rs_scheduled);
-        LASSERT (!rs->rs_scheduled_ever);
-        LASSERT (!rs->rs_handled);
-        LASSERT (!rs->rs_on_net);
-        LASSERT (rs->rs_export == NULL);
-        LASSERT (cfs_list_empty(&rs->rs_obd_list));
-        LASSERT (cfs_list_empty(&rs->rs_exp_list));
+       LASSERT(!rs->rs_scheduled);
+       LASSERT(!rs->rs_scheduled_ever);
+       LASSERT(!rs->rs_handled);
+       LASSERT(!rs->rs_on_net);
+       LASSERT(rs->rs_export == NULL);
+       LASSERT(list_empty(&rs->rs_obd_list));
+       LASSERT(list_empty(&rs->rs_exp_list));
 
-        exp = class_export_get (req->rq_export);
+       exp = class_export_get(req->rq_export);
 
         /* disable reply scheduling while I'm setting up */
         rs->rs_scheduled = 1;
@@ -2462,13 +2459,13 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
               rs->rs_transno, exp->exp_last_committed);
        if (rs->rs_transno > exp->exp_last_committed) {
                /* not committed already */
-               cfs_list_add_tail(&rs->rs_obd_list,
+               list_add_tail(&rs->rs_obd_list,
                                  &exp->exp_uncommitted_replies);
        }
        spin_unlock(&exp->exp_uncommitted_replies_lock);
 
        spin_lock(&exp->exp_lock);
-       cfs_list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
+       list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
        spin_unlock(&exp->exp_lock);
 
        netrc = target_send_reply_msg(req, rc, fail_id);
@@ -2490,12 +2487,12 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
        spin_lock(&rs->rs_lock);
        if (rs->rs_transno <= exp->exp_last_committed ||
            (!rs->rs_on_net && !rs->rs_no_ack) ||
-           cfs_list_empty(&rs->rs_exp_list) ||     /* completed already */
-           cfs_list_empty(&rs->rs_obd_list)) {
+           list_empty(&rs->rs_exp_list) ||     /* completed already */
+           list_empty(&rs->rs_obd_list)) {
                CDEBUG(D_HA, "Schedule reply immediately\n");
                ptlrpc_dispatch_difficult_reply(rs);
        } else {
-               cfs_list_add(&rs->rs_list, &svcpt->scp_rep_active);
+               list_add(&rs->rs_list, &svcpt->scp_rep_active);
                rs->rs_scheduled = 0;   /* allow notifier to schedule */
        }
        spin_unlock(&rs->rs_lock);
@@ -2597,12 +2594,12 @@ EXPORT_SYMBOL(ldlm_errno2error);
 void ldlm_dump_export_locks(struct obd_export *exp)
 {
        spin_lock(&exp->exp_locks_list_guard);
-       if (!cfs_list_empty(&exp->exp_locks_list)) {
+       if (!list_empty(&exp->exp_locks_list)) {
                struct ldlm_lock *lock;
 
                CERROR("dumping locks for export %p,"
                       "ignore if the unmount doesn't hang\n", exp);
-               cfs_list_for_each_entry(lock, &exp->exp_locks_list,
+               list_for_each_entry(lock, &exp->exp_locks_list,
                                        l_exp_refs_link)
                        LDLM_ERROR(lock, "lock:");
        }
index b19b807..8d4a39a 100644 (file)
@@ -220,8 +220,8 @@ void ldlm_lock_put(struct ldlm_lock *lock)
 
                 res = lock->l_resource;
                LASSERT(ldlm_is_destroyed(lock));
-                LASSERT(cfs_list_empty(&lock->l_res_link));
-                LASSERT(cfs_list_empty(&lock->l_pending_chain));
+               LASSERT(list_empty(&lock->l_res_link));
+               LASSERT(list_empty(&lock->l_pending_chain));
 
                 lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
                                      LDLM_NSS_LOCKS);
@@ -251,11 +251,11 @@ EXPORT_SYMBOL(ldlm_lock_put);
 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
 {
        int rc = 0;
-       if (!cfs_list_empty(&lock->l_lru)) {
+       if (!list_empty(&lock->l_lru)) {
                struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 
                LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
-               cfs_list_del_init(&lock->l_lru);
+               list_del_init(&lock->l_lru);
                LASSERT(ns->ns_nr_unused > 0);
                ns->ns_nr_unused--;
                rc = 1;
@@ -273,7 +273,7 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
 
        ENTRY;
        if (ldlm_is_ns_srv(lock)) {
-               LASSERT(cfs_list_empty(&lock->l_lru));
+               LASSERT(list_empty(&lock->l_lru));
                RETURN(0);
        }
 
@@ -292,9 +292,9 @@ void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
        struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 
        lock->l_last_used = cfs_time_current();
-       LASSERT(cfs_list_empty(&lock->l_lru));
+       LASSERT(list_empty(&lock->l_lru));
        LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
-       cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
+       list_add_tail(&lock->l_lru, &ns->ns_unused_list);
        ldlm_clear_skipped(lock);
        LASSERT(ns->ns_nr_unused >= 0);
        ns->ns_nr_unused++;
@@ -325,13 +325,13 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
 
        ENTRY;
        if (ldlm_is_ns_srv(lock)) {
-               LASSERT(cfs_list_empty(&lock->l_lru));
+               LASSERT(list_empty(&lock->l_lru));
                EXIT;
                return;
        }
 
        spin_lock(&ns->ns_lock);
-       if (!cfs_list_empty(&lock->l_lru)) {
+       if (!list_empty(&lock->l_lru)) {
                ldlm_lock_remove_from_lru_nolock(lock);
                ldlm_lock_add_to_lru_nolock(lock);
        }
@@ -367,13 +367,13 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
                 LBUG();
         }
 
-        if (!cfs_list_empty(&lock->l_res_link)) {
+       if (!list_empty(&lock->l_res_link)) {
                 LDLM_ERROR(lock, "lock still on resource");
                 LBUG();
         }
 
        if (ldlm_is_destroyed(lock)) {
-               LASSERT(cfs_list_empty(&lock->l_lru));
+               LASSERT(list_empty(&lock->l_lru));
                EXIT;
                return 0;
        }
@@ -483,22 +483,22 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
        lu_ref_add(&resource->lr_reference, "lock", lock);
 
        atomic_set(&lock->l_refc, 2);
-       CFS_INIT_LIST_HEAD(&lock->l_res_link);
-       CFS_INIT_LIST_HEAD(&lock->l_lru);
-       CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
-       CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
-       CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
-       CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
+       INIT_LIST_HEAD(&lock->l_res_link);
+       INIT_LIST_HEAD(&lock->l_lru);
+       INIT_LIST_HEAD(&lock->l_pending_chain);
+       INIT_LIST_HEAD(&lock->l_bl_ast);
+       INIT_LIST_HEAD(&lock->l_cp_ast);
+       INIT_LIST_HEAD(&lock->l_rk_ast);
        init_waitqueue_head(&lock->l_waitq);
        lock->l_blocking_lock = NULL;
-       CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
-       CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
-       CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
-       CFS_INIT_HLIST_NODE(&lock->l_exp_flock_hash);
+       INIT_LIST_HEAD(&lock->l_sl_mode);
+       INIT_LIST_HEAD(&lock->l_sl_policy);
+       INIT_HLIST_NODE(&lock->l_exp_hash);
+       INIT_HLIST_NODE(&lock->l_exp_flock_hash);
 
         lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
                              LDLM_NSS_LOCKS);
-        CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
+       INIT_LIST_HEAD(&lock->l_handle.h_link);
        class_handle_hash(&lock->l_handle, &lock_handle_ops);
 
         lu_ref_init(&lock->l_reference);
@@ -506,11 +506,11 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
         lock->l_callback_timeout = 0;
 
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
-        CFS_INIT_LIST_HEAD(&lock->l_exp_refs_link);
+       INIT_LIST_HEAD(&lock->l_exp_refs_link);
         lock->l_exp_refs_nr = 0;
         lock->l_exp_refs_target = NULL;
 #endif
-        CFS_INIT_LIST_HEAD(&lock->l_exp_list);
+       INIT_LIST_HEAD(&lock->l_exp_list);
 
         RETURN(lock);
 }
@@ -541,7 +541,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
         LASSERT(new_resid->name[0] != 0);
 
         /* This function assumes that the lock isn't on any lists */
-        LASSERT(cfs_list_empty(&lock->l_res_link));
+       LASSERT(list_empty(&lock->l_res_link));
 
         type = oldres->lr_type;
         unlock_res_and_lock(lock);
@@ -670,7 +670,7 @@ EXPORT_SYMBOL(ldlm_lock2desc);
  * Only add if we have not sent a blocking AST to the lock yet.
  */
 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
-                           cfs_list_t *work_list)
+                          struct list_head *work_list)
 {
        if (!ldlm_is_ast_sent(lock)) {
                LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
@@ -679,8 +679,8 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
                 * discard dirty data, rather than writing back. */
                if (ldlm_is_ast_discard_data(new))
                        ldlm_set_discard_data(lock);
-                LASSERT(cfs_list_empty(&lock->l_bl_ast));
-                cfs_list_add(&lock->l_bl_ast, work_list);
+               LASSERT(list_empty(&lock->l_bl_ast));
+               list_add(&lock->l_bl_ast, work_list);
                 LDLM_LOCK_GET(lock);
                 LASSERT(lock->l_blocking_lock == NULL);
                 lock->l_blocking_lock = LDLM_LOCK_GET(new);
@@ -690,13 +690,13 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
 /**
  * Add a lock to list of just granted locks to send completion AST to.
  */
-void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
+void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
 {
        if (!ldlm_is_cp_reqd(lock)) {
                ldlm_set_cp_reqd(lock);
                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
-                LASSERT(cfs_list_empty(&lock->l_cp_ast));
-                cfs_list_add(&lock->l_cp_ast, work_list);
+               LASSERT(list_empty(&lock->l_cp_ast));
+               list_add(&lock->l_cp_ast, work_list);
                 LDLM_LOCK_GET(lock);
         }
 }
@@ -708,7 +708,7 @@ void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
  * Must be called with lr_lock held.
  */
 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
-                            cfs_list_t *work_list)
+                           struct list_head *work_list)
 {
         ENTRY;
         check_res_locked(lock->l_resource);
@@ -936,9 +936,9 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
 
 struct sl_insert_point {
-        cfs_list_t *res_link;
-        cfs_list_t *mode_link;
-        cfs_list_t *policy_link;
+       struct list_head *res_link;
+       struct list_head *mode_link;
+       struct list_head *policy_link;
 };
 
 /**
@@ -955,18 +955,18 @@ struct sl_insert_point {
  * NOTE: called by
  *  - ldlm_grant_lock_with_skiplist
  */
-static void search_granted_lock(cfs_list_t *queue,
+static void search_granted_lock(struct list_head *queue,
                                 struct ldlm_lock *req,
                                 struct sl_insert_point *prev)
 {
-        cfs_list_t *tmp;
+       struct list_head *tmp;
         struct ldlm_lock *lock, *mode_end, *policy_end;
         ENTRY;
 
-        cfs_list_for_each(tmp, queue) {
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+       list_for_each(tmp, queue) {
+               lock = list_entry(tmp, struct ldlm_lock, l_res_link);
 
-                mode_end = cfs_list_entry(lock->l_sl_mode.prev,
+               mode_end = list_entry(lock->l_sl_mode.prev,
                                           struct ldlm_lock, l_sl_mode);
 
                 if (lock->l_req_mode != req->l_req_mode) {
@@ -986,7 +986,7 @@ static void search_granted_lock(cfs_list_t *queue,
                 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
                         for (;;) {
                                 policy_end =
-                                        cfs_list_entry(lock->l_sl_policy.prev,
+                                       list_entry(lock->l_sl_policy.prev,
                                                        struct ldlm_lock,
                                                        l_sl_policy);
 
@@ -1010,7 +1010,7 @@ static void search_granted_lock(cfs_list_t *queue,
 
                                 /* go to next policy group within mode group */
                                 tmp = policy_end->l_res_link.next;
-                                lock = cfs_list_entry(tmp, struct ldlm_lock,
+                               lock = list_entry(tmp, struct ldlm_lock,
                                                       l_res_link);
                         }  /* loop over policy groups within the mode group */
 
@@ -1056,20 +1056,20 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
                 return;
         }
 
-        LASSERT(cfs_list_empty(&lock->l_res_link));
-        LASSERT(cfs_list_empty(&lock->l_sl_mode));
-        LASSERT(cfs_list_empty(&lock->l_sl_policy));
+       LASSERT(list_empty(&lock->l_res_link));
+       LASSERT(list_empty(&lock->l_sl_mode));
+       LASSERT(list_empty(&lock->l_sl_policy));
 
        /*
         * lock->link == prev->link means lock is first starting the group.
         * Don't re-add to itself to suppress kernel warnings.
         */
        if (&lock->l_res_link != prev->res_link)
-               cfs_list_add(&lock->l_res_link, prev->res_link);
+               list_add(&lock->l_res_link, prev->res_link);
        if (&lock->l_sl_mode != prev->mode_link)
-               cfs_list_add(&lock->l_sl_mode, prev->mode_link);
+               list_add(&lock->l_sl_mode, prev->mode_link);
        if (&lock->l_sl_policy != prev->policy_link)
-               cfs_list_add(&lock->l_sl_policy, prev->policy_link);
+               list_add(&lock->l_sl_policy, prev->policy_link);
 
         EXIT;
 }
@@ -1101,7 +1101,7 @@ static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
  *
  * must be called with lr_lock held
  */
-void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
+void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
 {
         struct ldlm_resource *res = lock->l_resource;
         ENTRY;
@@ -1144,19 +1144,19 @@ void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
  * \retval a referenced lock or NULL.  See the flag descriptions below, in the
  * comment above ldlm_lock_match
  */
-static struct ldlm_lock *search_queue(cfs_list_t *queue,
+static struct ldlm_lock *search_queue(struct list_head *queue,
                                       ldlm_mode_t *mode,
                                       ldlm_policy_data_t *policy,
                                       struct ldlm_lock *old_lock,
                                      __u64 flags, int unref)
 {
         struct ldlm_lock *lock;
-        cfs_list_t       *tmp;
+       struct list_head       *tmp;
 
-        cfs_list_for_each(tmp, queue) {
+       list_for_each(tmp, queue) {
                 ldlm_mode_t match;
 
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+               lock = list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (lock == old_lock)
                         break;
@@ -1685,7 +1685,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
                         GOTO(out, rc = -ENOMEM);
                 }
 
-                CFS_INIT_LIST_HEAD(&node->li_group);
+               INIT_LIST_HEAD(&node->li_group);
                 ldlm_interval_attach(node, lock);
                 node = NULL;
         }
@@ -1758,10 +1758,10 @@ out:
  *
  * Must be called with resource lock held.
  */
-int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
-                         cfs_list_t *work_list)
+int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
+                        struct list_head *work_list)
 {
-        cfs_list_t *tmp, *pos;
+       struct list_head *tmp, *pos;
         ldlm_processing_policy policy;
        __u64 flags;
         int rc = LDLM_ITER_CONTINUE;
@@ -1773,9 +1773,9 @@ int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
         policy = ldlm_processing_policy_table[res->lr_type];
         LASSERT(policy);
 
-        cfs_list_for_each_safe(tmp, pos, queue) {
+       list_for_each_safe(tmp, pos, queue) {
                 struct ldlm_lock *pending;
-                pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+               pending = list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
 
@@ -1801,14 +1801,14 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
        struct ldlm_lock       *lock;
        ENTRY;
 
-       if (cfs_list_empty(arg->list))
+       if (list_empty(arg->list))
                RETURN(-ENOENT);
 
-       lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
+       lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
 
        /* nobody should touch l_bl_ast */
        lock_res_and_lock(lock);
-       cfs_list_del_init(&lock->l_bl_ast);
+       list_del_init(&lock->l_bl_ast);
 
        LASSERT(ldlm_is_ast_sent(lock));
        LASSERT(lock->l_bl_ast_run == 0);
@@ -1838,10 +1838,10 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
        ldlm_completion_callback completion_callback;
        ENTRY;
 
-       if (cfs_list_empty(arg->list))
+       if (list_empty(arg->list))
                RETURN(-ENOENT);
 
-       lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
+       lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
 
        /* It's possible to receive a completion AST before we've set
         * the l_completion_ast pointer: either because the AST arrived
@@ -1856,7 +1856,7 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
 
        /* nobody should touch l_cp_ast */
        lock_res_and_lock(lock);
-       cfs_list_del_init(&lock->l_cp_ast);
+       list_del_init(&lock->l_cp_ast);
        LASSERT(ldlm_is_cp_reqd(lock));
        /* save l_completion_ast since it can be changed by
         * mds_intent_policy(), see bug 14225 */
@@ -1883,11 +1883,11 @@ ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
        struct ldlm_lock       *lock;
        ENTRY;
 
-       if (cfs_list_empty(arg->list))
+       if (list_empty(arg->list))
                RETURN(-ENOENT);
 
-       lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
-       cfs_list_del_init(&lock->l_rk_ast);
+       lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
+       list_del_init(&lock->l_rk_ast);
 
        /* the desc just pretend to exclusive */
        ldlm_lock2desc(lock, &desc);
@@ -1911,12 +1911,12 @@ int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
        int                              rc = 0;
        ENTRY;
 
-       if (cfs_list_empty(arg->list))
+       if (list_empty(arg->list))
                RETURN(-ENOENT);
 
-       gl_work = cfs_list_entry(arg->list->next, struct ldlm_glimpse_work,
+       gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
                                 gl_list);
-       cfs_list_del_init(&gl_work->gl_list);
+       list_del_init(&gl_work->gl_list);
 
        lock = gl_work->gl_lock;
 
@@ -1941,14 +1941,14 @@ int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
  * Used on server to send multiple ASTs together instead of sending one by
  * one.
  */
-int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list,
+int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
                       ldlm_desc_ast_t ast_type)
 {
        struct ldlm_cb_set_arg *arg;
        set_producer_func       work_ast_lock;
        int                     rc;
 
-       if (cfs_list_empty(rpc_list))
+       if (list_empty(rpc_list))
                RETURN(0);
 
        OBD_ALLOC_PTR(arg);
@@ -2005,7 +2005,7 @@ static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
 }
 
 static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                              cfs_hlist_node_t *hnode, void *arg)
+                             struct hlist_node *hnode, void *arg)
 {
         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
         int    rc;
@@ -2041,11 +2041,12 @@ EXPORT_SYMBOL(ldlm_reprocess_all_ns);
  */
 void ldlm_reprocess_all(struct ldlm_resource *res)
 {
-        CFS_LIST_HEAD(rpc_list);
-
+       struct list_head rpc_list;
 #ifdef HAVE_SERVER_SUPPORT
         int rc;
         ENTRY;
+
+       INIT_LIST_HEAD(&rpc_list);
         /* Local lock trees don't get reprocessed. */
         if (ns_is_client(ldlm_res_to_ns(res))) {
                 EXIT;
@@ -2062,11 +2063,13 @@ restart:
         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
                                LDLM_WORK_CP_AST);
         if (rc == -ERESTART) {
-                LASSERT(cfs_list_empty(&rpc_list));
+               LASSERT(list_empty(&rpc_list));
                 goto restart;
         }
 #else
         ENTRY;
+
+       INIT_LIST_HEAD(&rpc_list);
         if (!ns_is_client(ldlm_res_to_ns(res))) {
                 CERROR("This is client-side-only module, cannot handle "
                        "LDLM_NAMESPACE_SERVER resource type lock.\n");
@@ -2107,8 +2110,8 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
             req->l_resource->lr_type != LDLM_IBITS)
                 return;
 
-        cfs_list_del_init(&req->l_sl_policy);
-        cfs_list_del_init(&req->l_sl_mode);
+       list_del_init(&req->l_sl_policy);
+       list_del_init(&req->l_sl_mode);
 }
 
 /**
@@ -2188,7 +2191,7 @@ struct export_cl_data {
  * Cancels passed locks.
  */
 int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                    cfs_hlist_node_t *hnode, void *data)
+                                   struct hlist_node *hnode, void *data)
 
 {
        struct export_cl_data   *ecl = (struct export_cl_data *)data;
@@ -2278,7 +2281,7 @@ EXPORT_SYMBOL(ldlm_lock_downgrade);
 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
                                        __u32 *flags)
 {
-       CFS_LIST_HEAD(rpc_list);
+       struct list_head rpc_list;
        struct ldlm_resource *res;
        struct ldlm_namespace *ns;
        int granted = 0;
@@ -2289,6 +2292,7 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
        struct ldlm_interval *node;
        ENTRY;
 
+       INIT_LIST_HEAD(&rpc_list);
        /* Just return if mode is unchanged. */
        if (new_mode == lock->l_granted_mode) {
                *flags |= LDLM_FL_BLOCK_GRANTED;
@@ -2329,7 +2333,7 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
                         /* FIXME: ugly code, I have to attach the lock to a
                          * interval node again since perhaps it will be granted
                          * soon */
-                        CFS_INIT_LIST_HEAD(&node->li_group);
+                       INIT_LIST_HEAD(&node->li_group);
                         ldlm_interval_attach(node, lock);
                         node = NULL;
                 }
index fb7fc3b..9562229 100644 (file)
@@ -100,13 +100,13 @@ struct ldlm_bl_pool {
         * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
         * see bug 13843
         */
-       cfs_list_t              blp_prio_list;
+       struct list_head              blp_prio_list;
 
        /*
         * blp_list is used for all other callbacks which are likely
         * to take longer to process.
         */
-       cfs_list_t              blp_list;
+       struct list_head              blp_list;
 
        wait_queue_head_t       blp_waitq;
        struct completion       blp_comp;
@@ -117,11 +117,11 @@ struct ldlm_bl_pool {
 };
 
 struct ldlm_bl_work_item {
-        cfs_list_t              blwi_entry;
+       struct list_head              blwi_entry;
         struct ldlm_namespace  *blwi_ns;
         struct ldlm_lock_desc   blwi_ld;
         struct ldlm_lock       *blwi_lock;
-        cfs_list_t              blwi_head;
+       struct list_head              blwi_head;
         int                     blwi_count;
        struct completion        blwi_comp;
        ldlm_cancel_flags_t     blwi_flags;
@@ -146,14 +146,14 @@ static spinlock_t waiting_locks_spinlock;   /* BH lock (timer) */
  *
  * All access to it should be under waiting_locks_spinlock.
  */
-static cfs_list_t waiting_locks_list;
+static struct list_head waiting_locks_list;
 static struct timer_list waiting_locks_timer;
 
 static struct expired_lock_thread {
        wait_queue_head_t       elt_waitq;
        int                     elt_state;
        int                     elt_dump;
-       cfs_list_t              elt_expired_locks;
+       struct list_head                elt_expired_locks;
 } expired_lock_thread;
 
 static inline int have_expired_locks(void)
@@ -162,7 +162,7 @@ static inline int have_expired_locks(void)
 
        ENTRY;
        spin_lock_bh(&waiting_locks_spinlock);
-       need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
+       need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
        spin_unlock_bh(&waiting_locks_spinlock);
 
        RETURN(need_to_run);
@@ -173,7 +173,7 @@ static inline int have_expired_locks(void)
  */
 static int expired_lock_main(void *arg)
 {
-       cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
+       struct list_head *expired = &expired_lock_thread.elt_expired_locks;
        struct l_wait_info lwi = { 0 };
        int do_dump;
 
@@ -206,11 +206,11 @@ static int expired_lock_main(void *arg)
 
                do_dump = 0;
 
-               while (!cfs_list_empty(expired)) {
+               while (!list_empty(expired)) {
                        struct obd_export *export;
                        struct ldlm_lock *lock;
 
-                       lock = cfs_list_entry(expired->next, struct ldlm_lock,
+                       lock = list_entry(expired->next, struct ldlm_lock,
                                          l_pending_chain);
                        if ((void *)lock < LP_POISON + PAGE_CACHE_SIZE &&
                            (void *)lock >= LP_POISON) {
@@ -218,7 +218,7 @@ static int expired_lock_main(void *arg)
                                CERROR("free lock on elt list %p\n", lock);
                                LBUG();
                        }
-                       cfs_list_del_init(&lock->l_pending_chain);
+                       list_del_init(&lock->l_pending_chain);
                        if ((void *)lock->l_export <
                             LP_POISON + PAGE_CACHE_SIZE &&
                            (void *)lock->l_export >= LP_POISON) {
@@ -285,7 +285,7 @@ static int ldlm_lock_busy(struct ldlm_lock *lock)
                return 0;
 
        spin_lock_bh(&lock->l_export->exp_rpc_lock);
-       cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
+       list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
                                rq_exp_list) {
                if (req->rq_ops->hpreq_lock_match) {
                        match = req->rq_ops->hpreq_lock_match(req, lock);
@@ -304,8 +304,8 @@ static void waiting_locks_callback(unsigned long unused)
        int                     need_dump = 0;
 
        spin_lock_bh(&waiting_locks_spinlock);
-        while (!cfs_list_empty(&waiting_locks_list)) {
-                lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
+       while (!list_empty(&waiting_locks_list)) {
+               lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
                                       l_pending_chain);
                 if (cfs_time_after(lock->l_callback_timeout,
                                    cfs_time_current()) ||
@@ -346,13 +346,13 @@ static void waiting_locks_callback(unsigned long unused)
                 /* no needs to take an extra ref on the lock since it was in
                  * the waiting_locks_list and ldlm_add_waiting_lock()
                  * already grabbed a ref */
-                cfs_list_del(&lock->l_pending_chain);
-                cfs_list_add(&lock->l_pending_chain,
+               list_del(&lock->l_pending_chain);
+               list_add(&lock->l_pending_chain,
                              &expired_lock_thread.elt_expired_locks);
                need_dump = 1;
        }
 
-       if (!cfs_list_empty(&expired_lock_thread.elt_expired_locks)) {
+       if (!list_empty(&expired_lock_thread.elt_expired_locks)) {
                if (obd_dump_on_timeout && need_dump)
                        expired_lock_thread.elt_dump = __LINE__;
 
@@ -363,9 +363,9 @@ static void waiting_locks_callback(unsigned long unused)
          * Make sure the timer will fire again if we have any locks
          * left.
          */
-        if (!cfs_list_empty(&waiting_locks_list)) {
+       if (!list_empty(&waiting_locks_list)) {
                 cfs_time_t timeout_rounded;
-                lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
+               lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
                                       l_pending_chain);
                 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
@@ -390,7 +390,7 @@ static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
         cfs_time_t timeout;
         cfs_time_t timeout_rounded;
 
-        if (!cfs_list_empty(&lock->l_pending_chain))
+       if (!list_empty(&lock->l_pending_chain))
                 return 0;
 
         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
@@ -411,7 +411,7 @@ static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
         /* if the new lock has a shorter timeout than something earlier on
            the list, we'll wait the longer amount of time; no big deal. */
         /* FIFO */
-        cfs_list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
+       list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
         return 1;
 }
 
@@ -448,8 +448,8 @@ static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
 
        if (ret) {
                spin_lock_bh(&lock->l_export->exp_bl_list_lock);
-               if (cfs_list_empty(&lock->l_exp_list))
-                       cfs_list_add(&lock->l_exp_list,
+               if (list_empty(&lock->l_exp_list))
+                       list_add(&lock->l_exp_list,
                                     &lock->l_export->exp_bl_list);
                spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
        }
@@ -471,9 +471,9 @@ static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
  */
 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
 {
-        cfs_list_t *list_next;
+       struct list_head *list_next;
 
-        if (cfs_list_empty(&lock->l_pending_chain))
+       if (list_empty(&lock->l_pending_chain))
                 return 0;
 
         list_next = lock->l_pending_chain.next;
@@ -484,13 +484,13 @@ static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
                         cfs_timer_disarm(&waiting_locks_timer);
                 } else {
                         struct ldlm_lock *next;
-                        next = cfs_list_entry(list_next, struct ldlm_lock,
+                       next = list_entry(list_next, struct ldlm_lock,
                                               l_pending_chain);
                         cfs_timer_arm(&waiting_locks_timer,
                                       round_timeout(next->l_callback_timeout));
                 }
         }
-        cfs_list_del_init(&lock->l_pending_chain);
+       list_del_init(&lock->l_pending_chain);
 
         return 1;
 }
@@ -511,7 +511,7 @@ int ldlm_del_waiting_lock(struct ldlm_lock *lock)
 
        /* remove the lock out of export blocking list */
        spin_lock_bh(&lock->l_export->exp_bl_list_lock);
-       cfs_list_del_init(&lock->l_exp_list);
+       list_del_init(&lock->l_exp_list);
        spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
 
         if (ret) {
@@ -540,7 +540,7 @@ int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
 
        spin_lock_bh(&waiting_locks_spinlock);
 
-       if (cfs_list_empty(&lock->l_pending_chain)) {
+       if (list_empty(&lock->l_pending_chain)) {
                spin_unlock_bh(&waiting_locks_spinlock);
                LDLM_DEBUG(lock, "wasn't waiting");
                return 0;
@@ -600,7 +600,7 @@ static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
                /* the lock was not in any list, grab an extra ref before adding
                 * the lock to the expired list */
                LDLM_LOCK_GET(lock);
-       cfs_list_add(&lock->l_pending_chain,
+       list_add(&lock->l_pending_chain,
                     &expired_lock_thread.elt_expired_locks);
        wake_up(&expired_lock_thread.elt_waitq);
        spin_unlock_bh(&waiting_locks_spinlock);
@@ -758,8 +758,8 @@ static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
        }
 
        spin_lock_bh(&lock->l_export->exp_rpc_lock);
-       cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
-                               rq_exp_list) {
+       list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
+                           rq_exp_list) {
                /* Do not process requests that were not yet added to there
                 * incoming queue or were already removed from there for
                 * processing. We evaluate ptlrpc_nrs_req_can_move() without
@@ -1094,7 +1094,8 @@ int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
 }
 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
 
-int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list)
+int ldlm_glimpse_locks(struct ldlm_resource *res,
+                      struct list_head *gl_work_list)
 {
        int     rc;
        ENTRY;
@@ -1733,13 +1734,14 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
                                     struct ldlm_request *dlm_req,
                                     struct ldlm_lock *lock)
 {
+       struct list_head ast_list;
        int lvb_len;
-       CFS_LIST_HEAD(ast_list);
        int rc = 0;
        ENTRY;
 
        LDLM_DEBUG(lock, "client completion callback handler START");
 
+       INIT_LIST_HEAD(&ast_list);
        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
                int to = cfs_time_seconds(1);
                while (to > 0) {
@@ -1938,10 +1940,10 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
        if (blwi->blwi_lock &&
            ldlm_is_discard_data(blwi->blwi_lock)) {
                /* add LDLM_FL_DISCARD_DATA requests to the priority list */
-               cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
+               list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
        } else {
                /* other blocking callbacks are added to the regular list */
-               cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
+               list_add_tail(&blwi->blwi_entry, &blp->blp_list);
        }
        spin_unlock(&blp->blp_lock);
 
@@ -1958,12 +1960,12 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
                             struct ldlm_namespace *ns,
                             struct ldlm_lock_desc *ld,
-                            cfs_list_t *cancels, int count,
+                            struct list_head *cancels, int count,
                             struct ldlm_lock *lock,
                             ldlm_cancel_flags_t cancel_flags)
 {
        init_completion(&blwi->blwi_comp);
-        CFS_INIT_LIST_HEAD(&blwi->blwi_head);
+       INIT_LIST_HEAD(&blwi->blwi_head);
 
        if (memory_pressure_get())
                 blwi->blwi_mem_pressure = 1;
@@ -1973,8 +1975,8 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi,
         if (ld != NULL)
                 blwi->blwi_ld = *ld;
         if (count) {
-                cfs_list_add(&blwi->blwi_head, cancels);
-                cfs_list_del_init(cancels);
+               list_add(&blwi->blwi_head, cancels);
+               list_del_init(cancels);
                 blwi->blwi_count = count;
         } else {
                 blwi->blwi_lock = lock;
@@ -1993,7 +1995,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi,
 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
                             struct ldlm_lock_desc *ld,
                             struct ldlm_lock *lock,
-                            cfs_list_t *cancels, int count,
+                            struct list_head *cancels, int count,
                             ldlm_cancel_flags_t cancel_flags)
 {
        ENTRY;
@@ -2035,7 +2037,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
 }
 
 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
-                          cfs_list_t *cancels, int count,
+                          struct list_head *cancels, int count,
                           ldlm_cancel_flags_t cancel_flags)
 {
 #ifdef __KERNEL__
@@ -2463,10 +2465,10 @@ static int ldlm_hpreq_handler(struct ptlrpc_request *req)
 }
 
 int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                        cfs_hlist_node_t *hnode, void *data)
+                       struct hlist_node *hnode, void *data)
 
 {
-        cfs_list_t         *rpc_list = data;
+       struct list_head         *rpc_list = data;
         struct ldlm_lock   *lock = cfs_hash_object(hs, hnode);
 
         lock_res_and_lock(lock);
@@ -2502,7 +2504,7 @@ int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
                             &lock->l_remote_handle, &lock->l_exp_hash);
        }
 
-        cfs_list_add_tail(&lock->l_rk_ast, rpc_list);
+       list_add_tail(&lock->l_rk_ast, rpc_list);
         LDLM_LOCK_GET(lock);
 
         unlock_res_and_lock(lock);
@@ -2511,10 +2513,10 @@ int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 
 void ldlm_revoke_export_locks(struct obd_export *exp)
 {
-        cfs_list_t  rpc_list;
+       struct list_head  rpc_list;
         ENTRY;
 
-        CFS_INIT_LIST_HEAD(&rpc_list);
+       INIT_LIST_HEAD(&rpc_list);
         cfs_hash_for_each_empty(exp->exp_lock_hash,
                                 ldlm_revoke_lock_cb, &rpc_list);
         ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
@@ -2533,20 +2535,20 @@ static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
 
        spin_lock(&blp->blp_lock);
        /* process a request from the blp_list at least every blp_num_threads */
-       if (!cfs_list_empty(&blp->blp_list) &&
-           (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
-               blwi = cfs_list_entry(blp->blp_list.next,
-                                     struct ldlm_bl_work_item, blwi_entry);
+       if (!list_empty(&blp->blp_list) &&
+           (list_empty(&blp->blp_prio_list) || num_bl == 0))
+               blwi = list_entry(blp->blp_list.next,
+                                 struct ldlm_bl_work_item, blwi_entry);
        else
-               if (!cfs_list_empty(&blp->blp_prio_list))
-                       blwi = cfs_list_entry(blp->blp_prio_list.next,
-                                             struct ldlm_bl_work_item,
-                                             blwi_entry);
+               if (!list_empty(&blp->blp_prio_list))
+                       blwi = list_entry(blp->blp_prio_list.next,
+                                         struct ldlm_bl_work_item,
+                                         blwi_entry);
 
        if (blwi) {
                if (++num_bl >= atomic_read(&blp->blp_num_threads))
                        num_bl = 0;
-               cfs_list_del(&blwi->blwi_entry);
+               list_del(&blwi->blwi_entry);
        }
        spin_unlock(&blp->blp_lock);
 
@@ -2717,50 +2719,50 @@ ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
 }
 
 static void *
-ldlm_export_lock_key(cfs_hlist_node_t *hnode)
+ldlm_export_lock_key(struct hlist_node *hnode)
 {
         struct ldlm_lock *lock;
 
-        lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+       lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
         return &lock->l_remote_handle;
 }
 
 static void
-ldlm_export_lock_keycpy(cfs_hlist_node_t *hnode, void *key)
+ldlm_export_lock_keycpy(struct hlist_node *hnode, void *key)
 {
         struct ldlm_lock     *lock;
 
-        lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+       lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
         lock->l_remote_handle = *(struct lustre_handle *)key;
 }
 
 static int
-ldlm_export_lock_keycmp(const void *key, cfs_hlist_node_t *hnode)
+ldlm_export_lock_keycmp(const void *key, struct hlist_node *hnode)
 {
         return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
 }
 
 static void *
-ldlm_export_lock_object(cfs_hlist_node_t *hnode)
+ldlm_export_lock_object(struct hlist_node *hnode)
 {
-        return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+       return hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
 }
 
 static void
-ldlm_export_lock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_lock_get(cfs_hash_t *hs, struct hlist_node *hnode)
 {
         struct ldlm_lock *lock;
 
-        lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+       lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
         LDLM_LOCK_GET(lock);
 }
 
 static void
-ldlm_export_lock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_lock_put(cfs_hash_t *hs, struct hlist_node *hnode)
 {
         struct ldlm_lock *lock;
 
-        lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+       lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
         LDLM_LOCK_RELEASE(lock);
 }
 
@@ -2930,8 +2932,8 @@ static int ldlm_setup(void)
        ldlm_state->ldlm_bl_pool = blp;
 
        spin_lock_init(&blp->blp_lock);
-       CFS_INIT_LIST_HEAD(&blp->blp_list);
-       CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
+       INIT_LIST_HEAD(&blp->blp_list);
+       INIT_LIST_HEAD(&blp->blp_prio_list);
        init_waitqueue_head(&blp->blp_waitq);
        atomic_set(&blp->blp_num_threads, 0);
        atomic_set(&blp->blp_busy_threads, 0);
@@ -2953,11 +2955,11 @@ static int ldlm_setup(void)
        }
 
 # ifdef HAVE_SERVER_SUPPORT
-       CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
+       INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
        expired_lock_thread.elt_state = ELT_STOPPED;
        init_waitqueue_head(&expired_lock_thread.elt_waitq);
 
-       CFS_INIT_LIST_HEAD(&waiting_locks_list);
+       INIT_LIST_HEAD(&waiting_locks_list);
        spin_lock_init(&waiting_locks_spinlock);
        cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
 
@@ -2989,8 +2991,8 @@ static int ldlm_cleanup(void)
 {
         ENTRY;
 
-        if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
-            !cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
+       if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
+           !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
                 CERROR("ldlm still has namespaces; clean these up first.\n");
                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
@@ -3009,7 +3011,7 @@ static int ldlm_cleanup(void)
                        init_completion(&blp->blp_comp);
 
                        spin_lock(&blp->blp_lock);
-                       cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
+                       list_add_tail(&blwi.blwi_entry, &blp->blp_list);
                        wake_up(&blp->blp_waitq);
                        spin_unlock(&blp->blp_lock);
 
@@ -3051,6 +3053,11 @@ int ldlm_init(void)
        mutex_init(&ldlm_ref_mutex);
        mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
        mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+
+       INIT_LIST_HEAD(&ldlm_srv_namespace_list);
+       INIT_LIST_HEAD(&ldlm_cli_active_namespace_list);
+       INIT_LIST_HEAD(&ldlm_cli_inactive_namespace_list);
+
        ldlm_resource_slab = kmem_cache_create("ldlm_resources",
                                               sizeof(struct ldlm_resource), 0,
                                               SLAB_HWCACHE_ALIGN, NULL);
index f6a37f0..14a76b6 100644 (file)
  * \retval 1 if the lock is compatible to all locks in \a queue
  */
 static inline int
-ldlm_plain_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
-                        cfs_list_t *work_list)
+ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+                       struct list_head *work_list)
 {
-        cfs_list_t *tmp;
-        struct ldlm_lock *lock;
-        ldlm_mode_t req_mode = req->l_req_mode;
-        int compat = 1;
-        ENTRY;
+       ldlm_mode_t req_mode = req->l_req_mode;
+       struct ldlm_lock *lock;
+       struct list_head *tmp;
+       int compat = 1;
+       ENTRY;
 
-        lockmode_verify(req_mode);
-
-        cfs_list_for_each(tmp, queue) {
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+       lockmode_verify(req_mode);
 
+       list_for_each_entry(lock, queue, l_res_link) {
                /* We stop walking the queue if we hit ourselves so we don't
                 * take conflicting locks enqueued after us into account,
                 * or we'd wait forever. */
@@ -94,9 +92,8 @@ ldlm_plain_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                        RETURN(compat);
 
                /* Advance loop cursor to last lock of mode group. */
-               tmp = &cfs_list_entry(lock->l_sl_mode.prev,
-                                     struct ldlm_lock,
-                                     l_sl_mode)->l_res_link;
+               tmp = &list_entry(lock->l_sl_mode.prev, struct ldlm_lock,
+                                 l_sl_mode)->l_res_link;
 
                if (lockmode_compat(lock->l_req_mode, req_mode))
                         continue;
@@ -112,10 +109,10 @@ ldlm_plain_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                         ldlm_add_ast_work_item(lock, req, work_list);
 
                 {
-                        cfs_list_t *head;
+                       struct list_head *head;
 
                         head = &lock->l_sl_mode;
-                        cfs_list_for_each_entry(lock, head, l_sl_mode)
+                       list_for_each_entry(lock, head, l_sl_mode)
                                 if (lock->l_blocking_ast)
                                         ldlm_add_ast_work_item(lock, req,
                                                                work_list);
@@ -142,16 +139,17 @@ ldlm_plain_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
  */
 int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
                            int first_enq, ldlm_error_t *err,
-                           cfs_list_t *work_list)
+                           struct list_head *work_list)
 {
-        struct ldlm_resource *res = lock->l_resource;
-        CFS_LIST_HEAD(rpc_list);
-        int rc;
-        ENTRY;
+       struct ldlm_resource *res = lock->l_resource;
+       struct list_head rpc_list;
+       int rc;
+       ENTRY;
 
        LASSERT(lock->l_granted_mode != lock->l_req_mode);
-        check_res_locked(res);
-        LASSERT(cfs_list_empty(&res->lr_converting));
+       check_res_locked(res);
+       LASSERT(list_empty(&res->lr_converting));
+       INIT_LIST_HEAD(&rpc_list);
 
         if (!first_enq) {
                 LASSERT(work_list != NULL);
@@ -178,7 +176,7 @@ int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
                  * bug 2322: we used to unlink and re-add here, which was a
                  * terrible folly -- if we goto restart, we could get
                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
-                if (cfs_list_empty(&lock->l_res_link))
+               if (list_empty(&lock->l_res_link))
                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
                 unlock_res(res);
                 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
index 522f947..295cf0e 100644 (file)
@@ -1241,19 +1241,19 @@ int ldlm_pools_recalc(ldlm_side_t client)
         int nr, equal = 0;
        int time = 50; /* seconds of sleep if no active namespaces */
 
-        /*
-         * No need to setup pool limit for client pools.
-         */
-        if (client == LDLM_NAMESPACE_SERVER) {
-                /*
-                 * Check all modest namespaces first.
-                 */
+       /*
+        * No need to setup pool limit for client pools.
+        */
+       if (client == LDLM_NAMESPACE_SERVER) {
+               /*
+                * Check all modest namespaces first.
+                */
                mutex_lock(ldlm_namespace_lock(client));
-                cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
-                                        ns_list_chain)
-                {
-                        if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
-                                continue;
+               list_for_each_entry(ns, ldlm_namespace_list(client),
+                                   ns_list_chain)
+               {
+                       if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
+                               continue;
 
                         l = ldlm_pool_granted(&ns->ns_pool);
                         if (l == 0)
@@ -1281,14 +1281,14 @@ int ldlm_pools_recalc(ldlm_side_t client)
                         equal = 1;
                 }
 
-                /*
-                 * The rest is given to greedy namespaces.
-                 */
-                cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
-                                        ns_list_chain)
-                {
-                        if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
-                                continue;
+               /*
+                * The rest is given to greedy namespaces.
+                */
+               list_for_each_entry(ns, ldlm_namespace_list(client),
+                                   ns_list_chain)
+               {
+                       if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
+                               continue;
 
                         if (equal) {
                                 /*
@@ -1325,7 +1325,7 @@ int ldlm_pools_recalc(ldlm_side_t client)
                  * locks synchronously.
                  */
                mutex_lock(ldlm_namespace_lock(client));
-               if (cfs_list_empty(ldlm_namespace_list(client))) {
+               if (list_empty(ldlm_namespace_list(client))) {
                        mutex_unlock(ldlm_namespace_lock(client));
                        break;
                }
index 19a3595..079edc8 100644 (file)
@@ -831,7 +831,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
 EXPORT_SYMBOL(ldlm_prep_elc_req);
 
 int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
-                          cfs_list_t *cancels, int count)
+                         struct list_head *cancels, int count)
 {
         return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
                                  LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
@@ -1157,7 +1157,7 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
  * Pack \a count locks in \a head into ldlm_request buffer of request \a req.
  */
 static void ldlm_cancel_pack(struct ptlrpc_request *req,
-                             cfs_list_t *head, int count)
+                            struct list_head *head, int count)
 {
         struct ldlm_request *dlm;
         struct ldlm_lock *lock;
@@ -1177,7 +1177,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
         /* XXX: it would be better to pack lock handles grouped by resource.
          * so that the server cancel would call filter_lvbo_update() less
          * frequently. */
-        cfs_list_for_each_entry(lock, head, l_bl_ast) {
+       list_for_each_entry(lock, head, l_bl_ast) {
                 if (!count--)
                         break;
                 LASSERT(lock->l_conn_export);
@@ -1193,7 +1193,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
 /**
  * Prepare and send a batched cancel RPC. It will include \a count lock
  * handles of locks given in \a cancels list. */
-int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels,
+int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
                         int count, ldlm_cancel_flags_t flags)
 {
         struct ptlrpc_request *req = NULL;
@@ -1366,8 +1366,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh,
        /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
         * RPC which goes to canceld portal, so we can cancel other LRU locks
         * here and send them all as one LDLM_CANCEL RPC. */
-        LASSERT(cfs_list_empty(&lock->l_bl_ast));
-        cfs_list_add(&lock->l_bl_ast, &cancels);
+       LASSERT(list_empty(&lock->l_bl_ast));
+       list_add(&lock->l_bl_ast, &cancels);
 
         exp = lock->l_conn_export;
         if (exp_connect_cancelset(exp)) {
@@ -1391,7 +1391,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel);
  * Locally cancel up to \a count locks in list \a cancels.
  * Return the number of cancelled locks.
  */
-int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
+int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
                               ldlm_cancel_flags_t flags)
 {
        struct list_head head = LIST_HEAD_INIT(head);
@@ -1400,7 +1400,7 @@ int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
        __u64 rc;
 
         left = count;
-        cfs_list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
+       list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
                 if (left-- == 0)
                         break;
 
@@ -1416,14 +1416,14 @@ int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
                 * the one being generated now. */
                if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
                        LDLM_DEBUG(lock, "Cancel lock separately");
-                       cfs_list_del_init(&lock->l_bl_ast);
-                       cfs_list_add(&lock->l_bl_ast, &head);
+                       list_del_init(&lock->l_bl_ast);
+                       list_add(&lock->l_bl_ast, &head);
                        bl_ast++;
                         continue;
                 }
                 if (rc == LDLM_FL_LOCAL_ONLY) {
                         /* CANCEL RPC should not be sent to server. */
-                        cfs_list_del_init(&lock->l_bl_ast);
+                       list_del_init(&lock->l_bl_ast);
                         LDLM_LOCK_RELEASE(lock);
                         count--;
                 }
@@ -1639,8 +1639,9 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
  *                               sending any RPCs or waiting for any
  *                               outstanding RPC to complete.
  */
-static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
-                                 int count, int max, int flags)
+static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
+                                struct list_head *cancels, int count, int max,
+                                int flags)
 {
        ldlm_cancel_lru_policy_t pf;
        struct ldlm_lock *lock, *next;
@@ -1657,7 +1658,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
         pf = ldlm_cancel_lru_policy(ns, flags);
         LASSERT(pf != NULL);
 
-        while (!cfs_list_empty(&ns->ns_unused_list)) {
+       while (!list_empty(&ns->ns_unused_list)) {
                 ldlm_policy_res_t result;
 
                 /* all unused locks */
@@ -1668,7 +1669,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                 if (max && added >= max)
                         break;
 
-                cfs_list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
+               list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
                                             l_lru) {
                         /* No locks which got blocking requests. */
                        LASSERT(!ldlm_is_bl_ast(lock));
@@ -1758,8 +1759,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                 * and can't use l_pending_chain as it is used both on
                 * server and client nevertheless bug 5666 says it is
                 * used only on server */
-               LASSERT(cfs_list_empty(&lock->l_bl_ast));
-               cfs_list_add(&lock->l_bl_ast, cancels);
+               LASSERT(list_empty(&lock->l_bl_ast));
+               list_add(&lock->l_bl_ast, cancels);
                unlock_res_and_lock(lock);
                lu_ref_del(&lock->l_reference, __FUNCTION__, current);
                spin_lock(&ns->ns_lock);
@@ -1770,7 +1771,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
        RETURN(added);
 }
 
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                           int count, int max, ldlm_cancel_flags_t cancel_flags,
                           int flags)
 {
@@ -1816,7 +1817,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
  * list.
  */
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
-                              cfs_list_t *cancels,
+                              struct list_head *cancels,
                               ldlm_policy_data_t *policy,
                               ldlm_mode_t mode, __u64 lock_flags,
                               ldlm_cancel_flags_t cancel_flags, void *opaque)
@@ -1826,7 +1827,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
         ENTRY;
 
         lock_res(res);
-        cfs_list_for_each_entry(lock, &res->lr_granted, l_res_link) {
+       list_for_each_entry(lock, &res->lr_granted, l_res_link) {
                 if (opaque != NULL && lock->l_ast_data != opaque) {
                         LDLM_ERROR(lock, "data %p doesn't match opaque %p",
                                    lock->l_ast_data, opaque);
@@ -1856,8 +1857,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
                lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
                                 lock_flags;
 
-                LASSERT(cfs_list_empty(&lock->l_bl_ast));
-                cfs_list_add(&lock->l_bl_ast, cancels);
+               LASSERT(list_empty(&lock->l_bl_ast));
+               list_add(&lock->l_bl_ast, cancels);
                 LDLM_LOCK_GET(lock);
                 count++;
         }
@@ -1877,14 +1878,14 @@ EXPORT_SYMBOL(ldlm_cancel_resource_local);
  * buffer at the offset \a off.
  * Destroy \a cancels at the end.
  */
-int ldlm_cli_cancel_list(cfs_list_t *cancels, int count,
+int ldlm_cli_cancel_list(struct list_head *cancels, int count,
                          struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
 {
         struct ldlm_lock *lock;
         int res = 0;
         ENTRY;
 
-        if (cfs_list_empty(cancels) || count == 0)
+       if (list_empty(cancels) || count == 0)
                 RETURN(0);
 
         /* XXX: requests (both batched and not) could be sent in parallel.
@@ -1893,8 +1894,8 @@ int ldlm_cli_cancel_list(cfs_list_t *cancels, int count,
          * It would also speed up the case when the server does not support
          * the feature. */
         while (count > 0) {
-                LASSERT(!cfs_list_empty(cancels));
-                lock = cfs_list_entry(cancels->next, struct ldlm_lock,
+               LASSERT(!list_empty(cancels));
+               lock = list_entry(cancels->next, struct ldlm_lock,
                                       l_bl_ast);
                 LASSERT(lock->l_conn_export);
 
@@ -1970,7 +1971,7 @@ struct ldlm_cli_cancel_arg {
 };
 
 static int ldlm_cli_hash_cancel_unused(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                      cfs_hlist_node_t *hnode, void *arg)
+                                      struct hlist_node *hnode, void *arg)
 {
        struct ldlm_resource           *res = cfs_hash_object(hs, hnode);
        struct ldlm_cli_cancel_arg     *lc = arg;
@@ -2019,7 +2020,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel_unused);
 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
                           void *closure)
 {
-        cfs_list_t *tmp, *next;
+       struct list_head *tmp, *next;
         struct ldlm_lock *lock;
         int rc = LDLM_ITER_CONTINUE;
 
@@ -2029,22 +2030,22 @@ int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
                 RETURN(LDLM_ITER_CONTINUE);
 
         lock_res(res);
-        cfs_list_for_each_safe(tmp, next, &res->lr_granted) {
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+       list_for_each_safe(tmp, next, &res->lr_granted) {
+               lock = list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (iter(lock, closure) == LDLM_ITER_STOP)
                         GOTO(out, rc = LDLM_ITER_STOP);
         }
 
-        cfs_list_for_each_safe(tmp, next, &res->lr_converting) {
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+       list_for_each_safe(tmp, next, &res->lr_converting) {
+               lock = list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (iter(lock, closure) == LDLM_ITER_STOP)
                         GOTO(out, rc = LDLM_ITER_STOP);
         }
 
-        cfs_list_for_each_safe(tmp, next, &res->lr_waiting) {
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+       list_for_each_safe(tmp, next, &res->lr_waiting) {
+               lock = list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (iter(lock, closure) == LDLM_ITER_STOP)
                         GOTO(out, rc = LDLM_ITER_STOP);
@@ -2067,7 +2068,7 @@ static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
 }
 
 static int ldlm_res_iter_helper(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                cfs_hlist_node_t *hnode, void *arg)
+                               struct hlist_node *hnode, void *arg)
 
 {
         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
@@ -2119,10 +2120,10 @@ EXPORT_SYMBOL(ldlm_resource_iterate);
 
 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
 {
-        cfs_list_t *list = closure;
+       struct list_head *list = closure;
 
         /* we use l_pending_chain here, because it's unused on clients. */
-        LASSERTF(cfs_list_empty(&lock->l_pending_chain),
+       LASSERTF(list_empty(&lock->l_pending_chain),
                  "lock %p next %p prev %p\n",
                  lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
         /* bug 9573: don't replay locks left after eviction, or
@@ -2130,7 +2131,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
          * on a lock so that it does not disapear under us (e.g. due to cancel)
          */
         if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
-                cfs_list_add(&lock->l_pending_chain, list);
+               list_add(&lock->l_pending_chain, list);
                 LDLM_LOCK_GET(lock);
         }
 
@@ -2230,7 +2231,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
         else if (lock->l_granted_mode)
                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
-        else if (!cfs_list_empty(&lock->l_res_link))
+       else if (!list_empty(&lock->l_res_link))
                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
         else
                 flags = LDLM_FL_REPLAY;
@@ -2323,8 +2324,8 @@ int ldlm_replay_locks(struct obd_import *imp)
 
        ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
 
-       cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
-               cfs_list_del_init(&lock->l_pending_chain);
+       list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
+               list_del_init(&lock->l_pending_chain);
                if (rc) {
                        LDLM_LOCK_RELEASE(lock);
                        continue; /* or try to do the rest? */
index 85584ab..a275dbd 100644 (file)
@@ -56,15 +56,15 @@ int ldlm_srv_namespace_nr = 0;
 int ldlm_cli_namespace_nr = 0;
 
 struct mutex ldlm_srv_namespace_lock;
-CFS_LIST_HEAD(ldlm_srv_namespace_list);
+struct list_head ldlm_srv_namespace_list;
 
 struct mutex ldlm_cli_namespace_lock;
 /* Client Namespaces that have active resources in them.
  * Once all resources go away, ldlm_poold moves such namespaces to the
  * inactive list */
-CFS_LIST_HEAD(ldlm_cli_active_namespace_list);
+struct list_head ldlm_cli_active_namespace_list;
 /* Client namespaces that don't have any locks in them */
-CFS_LIST_HEAD(ldlm_cli_inactive_namespace_list);
+struct list_head ldlm_cli_inactive_namespace_list;
 
 struct proc_dir_entry *ldlm_type_proc_dir = NULL;
 struct proc_dir_entry *ldlm_ns_proc_dir = NULL;
@@ -437,50 +437,50 @@ static unsigned ldlm_res_hop_fid_hash(cfs_hash_t *hs,
        return hash & mask;
 }
 
-static void *ldlm_res_hop_key(cfs_hlist_node_t *hnode)
+static void *ldlm_res_hop_key(struct hlist_node *hnode)
 {
         struct ldlm_resource   *res;
 
-        res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+       res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
         return &res->lr_name;
 }
 
-static int ldlm_res_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
 {
         struct ldlm_resource   *res;
 
-        res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+       res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
         return ldlm_res_eq((const struct ldlm_res_id *)key,
                            (const struct ldlm_res_id *)&res->lr_name);
 }
 
-static void *ldlm_res_hop_object(cfs_hlist_node_t *hnode)
+static void *ldlm_res_hop_object(struct hlist_node *hnode)
 {
-        return cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+       return hlist_entry(hnode, struct ldlm_resource, lr_hash);
 }
 
-static void ldlm_res_hop_get_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode)
 {
         struct ldlm_resource *res;
 
-        res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+       res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
         ldlm_resource_getref(res);
 }
 
-static void ldlm_res_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
 {
         struct ldlm_resource *res;
 
-        res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+       res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
         /* cfs_hash_for_each_nolock is the only chance we call it */
         ldlm_resource_putref_locked(res);
 }
 
-static void ldlm_res_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void ldlm_res_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
 {
         struct ldlm_resource *res;
 
-        res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+       res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
         ldlm_resource_putref(res);
 }
 
@@ -621,8 +621,8 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
         ns->ns_appetite = apt;
         ns->ns_client   = client;
 
-       CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
-       CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
+       INIT_LIST_HEAD(&ns->ns_list_chain);
+       INIT_LIST_HEAD(&ns->ns_unused_list);
        spin_lock_init(&ns->ns_lock);
        atomic_set(&ns->ns_bref, 0);
        init_waitqueue_head(&ns->ns_waitq);
@@ -678,10 +678,10 @@ extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
  * certain assumptions as a result--notably, that we shouldn't cancel
  * locks with refs.
  */
-static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
+static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
                             __u64 flags)
 {
-       cfs_list_t *tmp;
+       struct list_head *tmp;
        int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
        bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
 
@@ -690,18 +690,18 @@ static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
 
                /* First, we look for non-cleaned-yet lock
                 * all cleaned locks are marked by CLEANED flag. */
-                lock_res(res);
-                cfs_list_for_each(tmp, q) {
-                        lock = cfs_list_entry(tmp, struct ldlm_lock,
-                                              l_res_link);
+               lock_res(res);
+               list_for_each(tmp, q) {
+                       lock = list_entry(tmp, struct ldlm_lock,
+                                         l_res_link);
                        if (ldlm_is_cleaned(lock)) {
-                                lock = NULL;
-                                continue;
-                        }
-                        LDLM_LOCK_GET(lock);
+                               lock = NULL;
+                               continue;
+                       }
+                       LDLM_LOCK_GET(lock);
                        ldlm_set_cleaned(lock);
-                        break;
-                }
+                       break;
+               }
 
                 if (lock == NULL) {
                         unlock_res(res);
@@ -758,7 +758,7 @@ static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
 }
 
 static int ldlm_resource_clean(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                               cfs_hlist_node_t *hnode, void *arg)
+                              struct hlist_node *hnode, void *arg)
 {
         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
        __u64 flags = *(__u64 *)arg;
@@ -771,7 +771,7 @@ static int ldlm_resource_clean(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 static int ldlm_resource_complain(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                 cfs_hlist_node_t *hnode, void *arg)
+                                 struct hlist_node *hnode, void *arg)
 {
        struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
 
@@ -924,7 +924,7 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns)
        /* Namespace \a ns should be not on list at this time, otherwise
         * this will cause issues related to using freed \a ns in poold
         * thread. */
-       LASSERT(cfs_list_empty(&ns->ns_list_chain));
+       LASSERT(list_empty(&ns->ns_list_chain));
        OBD_FREE_PTR(ns);
        ldlm_put_ref();
        EXIT;
@@ -982,8 +982,8 @@ EXPORT_SYMBOL(ldlm_namespace_put);
 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
 {
        mutex_lock(ldlm_namespace_lock(client));
-       LASSERT(cfs_list_empty(&ns->ns_list_chain));
-       cfs_list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
+       LASSERT(list_empty(&ns->ns_list_chain));
+       list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
        ldlm_namespace_nr_inc(client);
        mutex_unlock(ldlm_namespace_lock(client));
 }
@@ -992,11 +992,11 @@ void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
 {
        mutex_lock(ldlm_namespace_lock(client));
-       LASSERT(!cfs_list_empty(&ns->ns_list_chain));
+       LASSERT(!list_empty(&ns->ns_list_chain));
        /* Some asserts and possibly other parts of the code are still
         * using list_empty(&ns->ns_list_chain). This is why it is
         * important to use list_del_init() here. */
-       cfs_list_del_init(&ns->ns_list_chain);
+       list_del_init(&ns->ns_list_chain);
        ldlm_namespace_nr_dec(client);
        mutex_unlock(ldlm_namespace_lock(client));
 }
@@ -1005,26 +1005,26 @@ void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
                                       ldlm_side_t client)
 {
-       LASSERT(!cfs_list_empty(&ns->ns_list_chain));
+       LASSERT(!list_empty(&ns->ns_list_chain));
        LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
-       cfs_list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
+       list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
 }
 
 /** Should be called with ldlm_namespace_lock(client) taken. */
 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
                                         ldlm_side_t client)
 {
-       LASSERT(!cfs_list_empty(&ns->ns_list_chain));
+       LASSERT(!list_empty(&ns->ns_list_chain));
        LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
-       cfs_list_move_tail(&ns->ns_list_chain,
-                          ldlm_namespace_inactive_list(client));
+       list_move_tail(&ns->ns_list_chain,
+                      ldlm_namespace_inactive_list(client));
 }
 
 /** Should be called with ldlm_namespace_lock(client) taken. */
 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
 {
        LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
-       LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
+       LASSERT(!list_empty(ldlm_namespace_list(client)));
        return container_of(ldlm_namespace_list(client)->next,
                            struct ldlm_namespace, ns_list_chain);
 }
@@ -1039,9 +1039,9 @@ static struct ldlm_resource *ldlm_resource_new(void)
        if (res == NULL)
                return NULL;
 
-       CFS_INIT_LIST_HEAD(&res->lr_granted);
-       CFS_INIT_LIST_HEAD(&res->lr_converting);
-       CFS_INIT_LIST_HEAD(&res->lr_waiting);
+       INIT_LIST_HEAD(&res->lr_granted);
+       INIT_LIST_HEAD(&res->lr_converting);
+       INIT_LIST_HEAD(&res->lr_waiting);
 
        /* Initialize interval trees for each lock mode. */
        for (idx = 0; idx < LCK_MODE_NUM; idx++) {
@@ -1159,17 +1159,17 @@ static void __ldlm_resource_putref_final(cfs_hash_bd_t *bd,
 {
         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
 
-        if (!cfs_list_empty(&res->lr_granted)) {
+       if (!list_empty(&res->lr_granted)) {
                 ldlm_resource_dump(D_ERROR, res);
                 LBUG();
         }
 
-        if (!cfs_list_empty(&res->lr_converting)) {
+       if (!list_empty(&res->lr_converting)) {
                 ldlm_resource_dump(D_ERROR, res);
                 LBUG();
         }
 
-        if (!cfs_list_empty(&res->lr_waiting)) {
+       if (!list_empty(&res->lr_waiting)) {
                 ldlm_resource_dump(D_ERROR, res);
                 LBUG();
         }
@@ -1238,7 +1238,7 @@ int ldlm_resource_putref_locked(struct ldlm_resource *res)
 /**
  * Add a lock into a given resource into specified lock list.
  */
-void ldlm_resource_add_lock(struct ldlm_resource *res, cfs_list_t *head,
+void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
                             struct ldlm_lock *lock)
 {
        check_res_locked(res);
@@ -1250,9 +1250,9 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, cfs_list_t *head,
                return;
        }
 
-       LASSERT(cfs_list_empty(&lock->l_res_link));
+       LASSERT(list_empty(&lock->l_res_link));
 
-       cfs_list_add_tail(&lock->l_res_link, head);
+       list_add_tail(&lock->l_res_link, head);
 }
 
 /**
@@ -1275,9 +1275,9 @@ void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
                goto out;
        }
 
-        LASSERT(cfs_list_empty(&new->l_res_link));
+       LASSERT(list_empty(&new->l_res_link));
 
-        cfs_list_add(&new->l_res_link, &original->l_res_link);
+       list_add(&new->l_res_link, &original->l_res_link);
  out:;
 }
 
@@ -1290,7 +1290,7 @@ void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
                 ldlm_unlink_lock_skiplist(lock);
         else if (type == LDLM_EXTENT)
                 ldlm_extent_unlink_lock(lock);
-        cfs_list_del_init(&lock->l_res_link);
+       list_del_init(&lock->l_res_link);
 }
 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
 
@@ -1306,16 +1306,16 @@ void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
  */
 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
 {
-        cfs_list_t *tmp;
+       struct list_head *tmp;
 
         if (!((libcfs_debug | D_ERROR) & level))
                 return;
 
        mutex_lock(ldlm_namespace_lock(client));
 
-        cfs_list_for_each(tmp, ldlm_namespace_list(client)) {
+       list_for_each(tmp, ldlm_namespace_list(client)) {
                 struct ldlm_namespace *ns;
-                ns = cfs_list_entry(tmp, struct ldlm_namespace, ns_list_chain);
+               ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
                 ldlm_namespace_dump(level, ns);
         }
 
@@ -1324,7 +1324,7 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
 
 static int ldlm_res_hash_dump(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                              cfs_hlist_node_t *hnode, void *arg)
+                             struct hlist_node *hnode, void *arg)
 {
         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
         int    level = (int)(unsigned long)arg;
@@ -1377,9 +1377,9 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
        CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
               PLDLMRES(res), res, atomic_read(&res->lr_refcount));
 
-       if (!cfs_list_empty(&res->lr_granted)) {
+       if (!list_empty(&res->lr_granted)) {
                CDEBUG(level, "Granted locks (in reverse order):\n");
-               cfs_list_for_each_entry_reverse(lock, &res->lr_granted,
+               list_for_each_entry_reverse(lock, &res->lr_granted,
                                                l_res_link) {
                         LDLM_DEBUG_LIMIT(level, lock, "###");
                         if (!(level & D_CANTMASK) &&
@@ -1390,14 +1390,14 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
                         }
                 }
         }
-        if (!cfs_list_empty(&res->lr_converting)) {
+       if (!list_empty(&res->lr_converting)) {
                 CDEBUG(level, "Converting locks:\n");
-                cfs_list_for_each_entry(lock, &res->lr_converting, l_res_link)
+               list_for_each_entry(lock, &res->lr_converting, l_res_link)
                         LDLM_DEBUG_LIMIT(level, lock, "###");
         }
-        if (!cfs_list_empty(&res->lr_waiting)) {
+       if (!list_empty(&res->lr_waiting)) {
                 CDEBUG(level, "Waiting locks:\n");
-                cfs_list_for_each_entry(lock, &res->lr_waiting, l_res_link)
+               list_for_each_entry(lock, &res->lr_waiting, l_res_link)
                         LDLM_DEBUG_LIMIT(level, lock, "###");
         }
 }