Whamcloud - gitweb
LU-3963 ldlm: convert to linux list api
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
index 19a3595..079edc8 100644 (file)
@@ -831,7 +831,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
 EXPORT_SYMBOL(ldlm_prep_elc_req);
 
 int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
-                          cfs_list_t *cancels, int count)
+                         struct list_head *cancels, int count)
 {
         return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
                                  LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
@@ -1157,7 +1157,7 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
  * Pack \a count locks in \a head into ldlm_request buffer of request \a req.
  */
 static void ldlm_cancel_pack(struct ptlrpc_request *req,
-                             cfs_list_t *head, int count)
+                            struct list_head *head, int count)
 {
         struct ldlm_request *dlm;
         struct ldlm_lock *lock;
@@ -1177,7 +1177,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
         /* XXX: it would be better to pack lock handles grouped by resource.
          * so that the server cancel would call filter_lvbo_update() less
          * frequently. */
-        cfs_list_for_each_entry(lock, head, l_bl_ast) {
+       list_for_each_entry(lock, head, l_bl_ast) {
                 if (!count--)
                         break;
                 LASSERT(lock->l_conn_export);
@@ -1193,7 +1193,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
 /**
  * Prepare and send a batched cancel RPC. It will include \a count lock
  * handles of locks given in \a cancels list. */
-int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels,
+int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
                         int count, ldlm_cancel_flags_t flags)
 {
         struct ptlrpc_request *req = NULL;
@@ -1366,8 +1366,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh,
        /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
         * RPC which goes to canceld portal, so we can cancel other LRU locks
         * here and send them all as one LDLM_CANCEL RPC. */
-        LASSERT(cfs_list_empty(&lock->l_bl_ast));
-        cfs_list_add(&lock->l_bl_ast, &cancels);
+       LASSERT(list_empty(&lock->l_bl_ast));
+       list_add(&lock->l_bl_ast, &cancels);
 
         exp = lock->l_conn_export;
         if (exp_connect_cancelset(exp)) {
@@ -1391,7 +1391,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel);
  * Locally cancel up to \a count locks in list \a cancels.
  * Return the number of cancelled locks.
  */
-int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
+int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
                               ldlm_cancel_flags_t flags)
 {
        struct list_head head = LIST_HEAD_INIT(head);
@@ -1400,7 +1400,7 @@ int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
        __u64 rc;
 
         left = count;
-        cfs_list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
+       list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
                 if (left-- == 0)
                         break;
 
@@ -1416,14 +1416,14 @@ int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
                 * the one being generated now. */
                if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
                        LDLM_DEBUG(lock, "Cancel lock separately");
-                       cfs_list_del_init(&lock->l_bl_ast);
-                       cfs_list_add(&lock->l_bl_ast, &head);
+                       list_del_init(&lock->l_bl_ast);
+                       list_add(&lock->l_bl_ast, &head);
                        bl_ast++;
                         continue;
                 }
                 if (rc == LDLM_FL_LOCAL_ONLY) {
                         /* CANCEL RPC should not be sent to server. */
-                        cfs_list_del_init(&lock->l_bl_ast);
+                       list_del_init(&lock->l_bl_ast);
                         LDLM_LOCK_RELEASE(lock);
                         count--;
                 }
@@ -1639,8 +1639,9 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
  *                               sending any RPCs or waiting for any
  *                               outstanding RPC to complete.
  */
-static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
-                                 int count, int max, int flags)
+static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
+                                struct list_head *cancels, int count, int max,
+                                int flags)
 {
        ldlm_cancel_lru_policy_t pf;
        struct ldlm_lock *lock, *next;
@@ -1657,7 +1658,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
         pf = ldlm_cancel_lru_policy(ns, flags);
         LASSERT(pf != NULL);
 
-        while (!cfs_list_empty(&ns->ns_unused_list)) {
+       while (!list_empty(&ns->ns_unused_list)) {
                 ldlm_policy_res_t result;
 
                 /* all unused locks */
@@ -1668,7 +1669,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                 if (max && added >= max)
                         break;
 
-                cfs_list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
+               list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
                                             l_lru) {
                         /* No locks which got blocking requests. */
                        LASSERT(!ldlm_is_bl_ast(lock));
@@ -1758,8 +1759,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                 * and can't use l_pending_chain as it is used both on
                 * server and client nevertheless bug 5666 says it is
                 * used only on server */
-               LASSERT(cfs_list_empty(&lock->l_bl_ast));
-               cfs_list_add(&lock->l_bl_ast, cancels);
+               LASSERT(list_empty(&lock->l_bl_ast));
+               list_add(&lock->l_bl_ast, cancels);
                unlock_res_and_lock(lock);
                lu_ref_del(&lock->l_reference, __FUNCTION__, current);
                spin_lock(&ns->ns_lock);
@@ -1770,7 +1771,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
        RETURN(added);
 }
 
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                           int count, int max, ldlm_cancel_flags_t cancel_flags,
                           int flags)
 {
@@ -1816,7 +1817,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
  * list.
  */
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
-                              cfs_list_t *cancels,
+                              struct list_head *cancels,
                               ldlm_policy_data_t *policy,
                               ldlm_mode_t mode, __u64 lock_flags,
                               ldlm_cancel_flags_t cancel_flags, void *opaque)
@@ -1826,7 +1827,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
         ENTRY;
 
         lock_res(res);
-        cfs_list_for_each_entry(lock, &res->lr_granted, l_res_link) {
+       list_for_each_entry(lock, &res->lr_granted, l_res_link) {
                 if (opaque != NULL && lock->l_ast_data != opaque) {
                         LDLM_ERROR(lock, "data %p doesn't match opaque %p",
                                    lock->l_ast_data, opaque);
@@ -1856,8 +1857,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
                lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
                                 lock_flags;
 
-                LASSERT(cfs_list_empty(&lock->l_bl_ast));
-                cfs_list_add(&lock->l_bl_ast, cancels);
+               LASSERT(list_empty(&lock->l_bl_ast));
+               list_add(&lock->l_bl_ast, cancels);
                 LDLM_LOCK_GET(lock);
                 count++;
         }
@@ -1877,14 +1878,14 @@ EXPORT_SYMBOL(ldlm_cancel_resource_local);
  * buffer at the offset \a off.
  * Destroy \a cancels at the end.
  */
-int ldlm_cli_cancel_list(cfs_list_t *cancels, int count,
+int ldlm_cli_cancel_list(struct list_head *cancels, int count,
                          struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
 {
         struct ldlm_lock *lock;
         int res = 0;
         ENTRY;
 
-        if (cfs_list_empty(cancels) || count == 0)
+       if (list_empty(cancels) || count == 0)
                 RETURN(0);
 
         /* XXX: requests (both batched and not) could be sent in parallel.
@@ -1893,8 +1894,8 @@ int ldlm_cli_cancel_list(cfs_list_t *cancels, int count,
          * It would also speed up the case when the server does not support
          * the feature. */
         while (count > 0) {
-                LASSERT(!cfs_list_empty(cancels));
-                lock = cfs_list_entry(cancels->next, struct ldlm_lock,
+               LASSERT(!list_empty(cancels));
+               lock = list_entry(cancels->next, struct ldlm_lock,
                                       l_bl_ast);
                 LASSERT(lock->l_conn_export);
 
@@ -1970,7 +1971,7 @@ struct ldlm_cli_cancel_arg {
 };
 
 static int ldlm_cli_hash_cancel_unused(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                      cfs_hlist_node_t *hnode, void *arg)
+                                      struct hlist_node *hnode, void *arg)
 {
        struct ldlm_resource           *res = cfs_hash_object(hs, hnode);
        struct ldlm_cli_cancel_arg     *lc = arg;
@@ -2019,7 +2020,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel_unused);
 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
                           void *closure)
 {
-        cfs_list_t *tmp, *next;
+       struct list_head *tmp, *next;
         struct ldlm_lock *lock;
         int rc = LDLM_ITER_CONTINUE;
 
@@ -2029,22 +2030,22 @@ int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
                 RETURN(LDLM_ITER_CONTINUE);
 
         lock_res(res);
-        cfs_list_for_each_safe(tmp, next, &res->lr_granted) {
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+       list_for_each_safe(tmp, next, &res->lr_granted) {
+               lock = list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (iter(lock, closure) == LDLM_ITER_STOP)
                         GOTO(out, rc = LDLM_ITER_STOP);
         }
 
-        cfs_list_for_each_safe(tmp, next, &res->lr_converting) {
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+       list_for_each_safe(tmp, next, &res->lr_converting) {
+               lock = list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (iter(lock, closure) == LDLM_ITER_STOP)
                         GOTO(out, rc = LDLM_ITER_STOP);
         }
 
-        cfs_list_for_each_safe(tmp, next, &res->lr_waiting) {
-                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+       list_for_each_safe(tmp, next, &res->lr_waiting) {
+               lock = list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (iter(lock, closure) == LDLM_ITER_STOP)
                         GOTO(out, rc = LDLM_ITER_STOP);
@@ -2067,7 +2068,7 @@ static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
 }
 
 static int ldlm_res_iter_helper(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                cfs_hlist_node_t *hnode, void *arg)
+                               struct hlist_node *hnode, void *arg)
 
 {
         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
@@ -2119,10 +2120,10 @@ EXPORT_SYMBOL(ldlm_resource_iterate);
 
 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
 {
-        cfs_list_t *list = closure;
+       struct list_head *list = closure;
 
         /* we use l_pending_chain here, because it's unused on clients. */
-        LASSERTF(cfs_list_empty(&lock->l_pending_chain),
+       LASSERTF(list_empty(&lock->l_pending_chain),
                  "lock %p next %p prev %p\n",
                  lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
         /* bug 9573: don't replay locks left after eviction, or
@@ -2130,7 +2131,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
          * on a lock so that it does not disapear under us (e.g. due to cancel)
          */
         if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
-                cfs_list_add(&lock->l_pending_chain, list);
+               list_add(&lock->l_pending_chain, list);
                 LDLM_LOCK_GET(lock);
         }
 
@@ -2230,7 +2231,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
         else if (lock->l_granted_mode)
                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
-        else if (!cfs_list_empty(&lock->l_res_link))
+       else if (!list_empty(&lock->l_res_link))
                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
         else
                 flags = LDLM_FL_REPLAY;
@@ -2323,8 +2324,8 @@ int ldlm_replay_locks(struct obd_import *imp)
 
        ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
 
-       cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
-               cfs_list_del_init(&lock->l_pending_chain);
+       list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
+               list_del_init(&lock->l_pending_chain);
                if (rc) {
                        LDLM_LOCK_RELEASE(lock);
                        continue; /* or try to do the rest? */