Whamcloud - gitweb
LU-2901 mdt: duplicate link names in directory
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
index abd1e8c..49d0034 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2010, 2012, Intel Corporation.
+ * Copyright (c) 2010, 2013, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -219,7 +219,7 @@ void ldlm_lock_put(struct ldlm_lock *lock)
                            "final lock_put on destroyed lock, freeing it.");
 
                 res = lock->l_resource;
-                LASSERT(lock->l_destroyed);
+               LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
                 LASSERT(cfs_list_empty(&lock->l_res_link));
                 LASSERT(cfs_list_empty(&lock->l_pending_chain));
 
@@ -270,14 +270,14 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
  */
 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
 {
-        struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-        int rc;
+       struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+       int rc;
 
-        ENTRY;
-        if (lock->l_ns_srv) {
-                LASSERT(cfs_list_empty(&lock->l_lru));
-                RETURN(0);
-        }
+       ENTRY;
+       if (lock->l_flags & LDLM_FL_NS_SRV) {
+               LASSERT(cfs_list_empty(&lock->l_lru));
+               RETURN(0);
+       }
 
        spin_lock(&ns->ns_lock);
        rc = ldlm_lock_remove_from_lru_nolock(lock);
@@ -322,14 +322,14 @@ void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
  */
 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
 {
-        struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+       struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 
-        ENTRY;
-        if (lock->l_ns_srv) {
-                LASSERT(cfs_list_empty(&lock->l_lru));
-                EXIT;
-                return;
-        }
+       ENTRY;
+       if (lock->l_flags & LDLM_FL_NS_SRV) {
+               LASSERT(cfs_list_empty(&lock->l_lru));
+               EXIT;
+               return;
+       }
 
        spin_lock(&ns->ns_lock);
        if (!cfs_list_empty(&lock->l_lru)) {
@@ -373,12 +373,12 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
                 LBUG();
         }
 
-        if (lock->l_destroyed) {
-                LASSERT(cfs_list_empty(&lock->l_lru));
-                EXIT;
-                return 0;
-        }
-        lock->l_destroyed = 1;
+       if (lock->l_flags & LDLM_FL_DESTROYED) {
+               LASSERT(cfs_list_empty(&lock->l_lru));
+               EXIT;
+               return 0;
+       }
+       lock->l_flags |= LDLM_FL_DESTROYED;
 
        if (lock->l_export && lock->l_export->exp_lock_hash) {
                /* NB: it's safe to call cfs_hash_del() even lock isn't
@@ -616,7 +616,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
 
         /* It's unlikely but possible that someone marked the lock as
          * destroyed after we did handle2object on it */
-        if (flags == 0 && !lock->l_destroyed) {
+       if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
                 lu_ref_add(&lock->l_reference, "handle", cfs_current());
                 RETURN(lock);
         }
@@ -626,7 +626,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
         LASSERT(lock->l_resource != NULL);
 
         lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current());
-        if (unlikely(lock->l_destroyed)) {
+       if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
                 unlock_res_and_lock(lock);
                 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
                 LDLM_LOCK_PUT(lock);
@@ -715,7 +715,7 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
                 lock->l_flags |= LDLM_FL_AST_SENT;
                 /* If the enqueuing client said so, tell the AST recipient to
                  * discard dirty data, rather than writing back. */
-                if (new->l_flags & LDLM_AST_DISCARD_DATA)
+               if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
                         lock->l_flags |= LDLM_FL_DISCARD_DATA;
                 LASSERT(cfs_list_empty(&lock->l_bl_ast));
                 cfs_list_add(&lock->l_bl_ast, work_list);
@@ -881,20 +881,6 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
 
         ldlm_lock_decref_internal_nolock(lock, mode);
 
-       /* release lvb data for layout lock */
-       if (ns_is_client(ns) && !lock->l_readers && !lock->l_writers &&
-           ldlm_has_layout(lock) && lock->l_flags & LDLM_FL_LVB_READY) {
-               /* this is the last user of a layout lock and stripe has
-                * been set up, lvb is no longer used.
-                * This may be a large amount of memory, so we should free it
-                * when possible. */
-               if (lock->l_lvb_data != NULL) {
-                       OBD_FREE_LARGE(lock->l_lvb_data, lock->l_lvb_len);
-                       lock->l_lvb_data = NULL;
-                       lock->l_lvb_len = 0;
-               }
-       }
-
         if (lock->l_flags & LDLM_FL_LOCAL &&
             !lock->l_readers && !lock->l_writers) {
                 /* If this is a local lock on a server namespace and this was
@@ -907,7 +893,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
             (lock->l_flags & LDLM_FL_CBPENDING)) {
                 /* If we received a blocked AST and this was the last reference,
                  * run the callback. */
-                if (lock->l_ns_srv && lock->l_export)
+               if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
                         CERROR("FL_CBPENDING set on non-local lock--just a "
                                "warning\n");
 
@@ -1103,7 +1089,7 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
         ldlm_resource_dump(D_INFO, res);
         LDLM_DEBUG(lock, "About to add lock:");
 
-        if (lock->l_destroyed) {
+       if (lock->l_flags & LDLM_FL_DESTROYED) {
                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
                 return;
         }
@@ -1237,9 +1223,7 @@ static struct ldlm_lock *search_queue(cfs_list_t *queue,
                       policy->l_inodebits.bits))
                         continue;
 
-                if (!unref &&
-                    (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED ||
-                     lock->l_failed))
+               if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
                         continue;
 
                 if ((flags & LDLM_FL_LOCAL_ONLY) &&
@@ -1261,10 +1245,10 @@ static struct ldlm_lock *search_queue(cfs_list_t *queue,
 
 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
 {
-        if (!lock->l_failed) {
-                lock->l_failed = 1;
-                cfs_waitq_broadcast(&lock->l_waitq);
-        }
+       if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
+               lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
+               cfs_waitq_broadcast(&lock->l_waitq);
+       }
 }
 EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
 
@@ -1386,6 +1370,8 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
                 ldlm_lock2handle(lock, lockh);
                 if ((flags & LDLM_FL_LVB_READY) &&
                     (!(lock->l_flags & LDLM_FL_LVB_READY))) {
+                       __u64 wait_flags = LDLM_FL_LVB_READY |
+                               LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
                         struct l_wait_info lwi;
                         if (lock->l_completion_ast) {
                                 int err = lock->l_completion_ast(lock,
@@ -1405,11 +1391,10 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
                                                NULL, LWI_ON_SIGNAL_NOOP, NULL);
 
-                        /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
-                        l_wait_event(lock->l_waitq,
-                                     lock->l_flags & LDLM_FL_LVB_READY ||
-                                    lock->l_destroyed || lock->l_failed,
-                                     &lwi);
+                       /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
+                       l_wait_event(lock->l_waitq,
+                                    lock->l_flags & wait_flags,
+                                    &lwi);
                         if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
                                 if (flags & LDLM_FL_TEST_LOCK)
                                         LDLM_LOCK_RELEASE(lock);
@@ -1465,8 +1450,7 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
         lock = ldlm_handle2lock(lockh);
         if (lock != NULL) {
                 lock_res_and_lock(lock);
-                if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED ||
-                    lock->l_failed)
+               if (lock->l_flags & LDLM_FL_GONE_MASK)
                         GOTO(out, mode);
 
                 if (lock->l_flags & LDLM_FL_CBPENDING &&
@@ -1617,12 +1601,12 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
         lock->l_req_mode = mode;
         lock->l_ast_data = data;
         lock->l_pid = cfs_curproc_pid();
-        lock->l_ns_srv = !!ns_is_server(ns);
+       if (ns_is_server(ns))
+               lock->l_flags |= LDLM_FL_NS_SRV;
         if (cbs) {
                 lock->l_blocking_ast = cbs->lcs_blocking;
                 lock->l_completion_ast = cbs->lcs_completion;
                 lock->l_glimpse_ast = cbs->lcs_glimpse;
-                lock->l_weigh_ast = cbs->lcs_weigh;
         }
 
         lock->l_tree_node = NULL;
@@ -1728,21 +1712,21 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
                 node = NULL;
         }
 
-        /* Some flags from the enqueue want to make it into the AST, via the
-         * lock's l_flags. */
-        lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
-
-        /* This distinction between local lock trees is very important; a client
-         * namespace only has information about locks taken by that client, and
-         * thus doesn't have enough information to decide for itself if it can
-         * be granted (below).  In this case, we do exactly what the server
-         * tells us to do, as dictated by the 'flags'.
-         *
-         * We do exactly the same thing during recovery, when the server is
-         * more or less trusting the clients not to lie.
-         *
-         * FIXME (bug 268): Detect obvious lies by checking compatibility in
-         * granted/converting queues. */
+       /* Some flags from the enqueue want to make it into the AST, via the
+        * lock's l_flags. */
+       lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
+
+       /* This distinction between local lock trees is very important; a client
+        * namespace only has information about locks taken by that client, and
+        * thus doesn't have enough information to decide for itself if it can
+        * be granted (below).  In this case, we do exactly what the server
+        * tells us to do, as dictated by the 'flags'.
+        *
+        * We do exactly the same thing during recovery, when the server is
+        * more or less trusting the clients not to lie.
+        *
+        * FIXME (bug 268): Detect obvious lies by checking compatibility in
+        * granted/converting queues. */
         if (local) {
                 if (*flags & LDLM_FL_BLOCK_CONV)
                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
@@ -2164,15 +2148,15 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
                 LBUG();
         }
 
-       if (lock->l_waited)
+       if (lock->l_flags & LDLM_FL_WAITED)
                ldlm_del_waiting_lock(lock);
 
         /* Releases cancel callback. */
         ldlm_cancel_callback(lock);
 
-        /* Yes, second time, just in case it was added again while we were
-           running with no res lock in ldlm_cancel_callback */
-       if (lock->l_waited)
+       /* Yes, second time, just in case it was added again while we were
+        * running with no res lock in ldlm_cancel_callback */
+       if (lock->l_flags & LDLM_FL_WAITED)
                ldlm_del_waiting_lock(lock);
 
         ldlm_resource_unlink_lock(lock);
@@ -2330,7 +2314,8 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
         /* I can't check the type of lock here because the bitlock of lock
          * is not held here, so do the allocation blindly. -jay */
         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
-        if (node == NULL)  /* Actually, this causes EDEADLOCK to be returned */
+       if (node == NULL)
+               /* Actually, this causes LUSTRE_EDEADLK to be returned */
                 RETURN(NULL);
 
         LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
@@ -2490,96 +2475,95 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                 return;
         }
 
-        switch (resource->lr_type) {
-        case LDLM_EXTENT:
-                libcfs_debug_vmsg2(msgdata, fmt, args,
-                       " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
-                       "res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64
-                       "] (req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote:"
-                       " "LPX64" expref: %d pid: %u timeout: %lu lvb_type: %d\n",
-                       ldlm_lock_to_ns_name(lock), lock,
-                       lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
-                       lock->l_readers, lock->l_writers,
-                       ldlm_lockname[lock->l_granted_mode],
-                       ldlm_lockname[lock->l_req_mode],
-                       resource->lr_name.name[0],
-                       resource->lr_name.name[1],
-                       cfs_atomic_read(&resource->lr_refcount),
-                       ldlm_typename[resource->lr_type],
-                       lock->l_policy_data.l_extent.start,
-                       lock->l_policy_data.l_extent.end,
-                       lock->l_req_extent.start, lock->l_req_extent.end,
-                       lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
-                       lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
-                break;
+       switch (resource->lr_type) {
+       case LDLM_EXTENT:
+               libcfs_debug_vmsg2(msgdata, fmt, args,
+                       " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+                       "res: "DLDLMRES" rrc: %d type: %s ["LPU64"->"LPU64"] "
+                       "(req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote: "
+                       LPX64" expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+                       ldlm_lock_to_ns_name(lock), lock,
+                       lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
+                       lock->l_readers, lock->l_writers,
+                       ldlm_lockname[lock->l_granted_mode],
+                       ldlm_lockname[lock->l_req_mode],
+                       PLDLMRES(resource),
+                       cfs_atomic_read(&resource->lr_refcount),
+                       ldlm_typename[resource->lr_type],
+                       lock->l_policy_data.l_extent.start,
+                       lock->l_policy_data.l_extent.end,
+                       lock->l_req_extent.start, lock->l_req_extent.end,
+                       lock->l_flags, nid, lock->l_remote_handle.cookie,
+                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
+                       lock->l_pid, lock->l_callback_timeout,
+                       lock->l_lvb_type);
+               break;
 
-        case LDLM_FLOCK:
-                libcfs_debug_vmsg2(msgdata, fmt, args,
-                       " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
-                       "res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d "
-                       "["LPU64"->"LPU64"] flags: "LPX64" nid: %s remote: "LPX64
-                       " expref: %d pid: %u timeout: %lu\n",
-                       ldlm_lock_to_ns_name(lock), lock,
-                       lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
-                       lock->l_readers, lock->l_writers,
-                       ldlm_lockname[lock->l_granted_mode],
-                       ldlm_lockname[lock->l_req_mode],
-                       resource->lr_name.name[0],
-                       resource->lr_name.name[1],
-                       cfs_atomic_read(&resource->lr_refcount),
-                       ldlm_typename[resource->lr_type],
-                       lock->l_policy_data.l_flock.pid,
-                       lock->l_policy_data.l_flock.start,
-                       lock->l_policy_data.l_flock.end,
-                       lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
-                       lock->l_pid, lock->l_callback_timeout);
-                break;
+       case LDLM_FLOCK:
+               libcfs_debug_vmsg2(msgdata, fmt, args,
+                       " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+                       "res: "DLDLMRES" rrc: %d type: %s pid: %d "
+                       "["LPU64"->"LPU64"] flags: "LPX64" nid: %s "
+                       "remote: "LPX64" expref: %d pid: %u timeout: %lu\n",
+                       ldlm_lock_to_ns_name(lock), lock,
+                       lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
+                       lock->l_readers, lock->l_writers,
+                       ldlm_lockname[lock->l_granted_mode],
+                       ldlm_lockname[lock->l_req_mode],
+                       PLDLMRES(resource),
+                       cfs_atomic_read(&resource->lr_refcount),
+                       ldlm_typename[resource->lr_type],
+                       lock->l_policy_data.l_flock.pid,
+                       lock->l_policy_data.l_flock.start,
+                       lock->l_policy_data.l_flock.end,
+                       lock->l_flags, nid, lock->l_remote_handle.cookie,
+                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
+                       lock->l_pid, lock->l_callback_timeout);
+               break;
 
-        case LDLM_IBITS:
-                libcfs_debug_vmsg2(msgdata, fmt, args,
-                       " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
-                       "res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s "
-                       "flags: "LPX64" nid: %s remote: "LPX64" expref: %d "
-                       "pid: %u timeout: %lu lvb_type: %d\n",
-                       ldlm_lock_to_ns_name(lock),
-                       lock, lock->l_handle.h_cookie,
-                       cfs_atomic_read (&lock->l_refc),
-                       lock->l_readers, lock->l_writers,
-                       ldlm_lockname[lock->l_granted_mode],
-                       ldlm_lockname[lock->l_req_mode],
-                       resource->lr_name.name[0],
-                       resource->lr_name.name[1],
-                       lock->l_policy_data.l_inodebits.bits,
-                       cfs_atomic_read(&resource->lr_refcount),
-                       ldlm_typename[resource->lr_type],
-                       lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
-                       lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
-                break;
+       case LDLM_IBITS:
+               libcfs_debug_vmsg2(msgdata, fmt, args,
+                       " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+                       "res: "DLDLMRES" bits "LPX64" rrc: %d type: %s "
+                       "flags: "LPX64" nid: %s remote: "LPX64" expref: %d "
+                       "pid: %u timeout: %lu lvb_type: %d\n",
+                       ldlm_lock_to_ns_name(lock),
+                       lock, lock->l_handle.h_cookie,
+                       cfs_atomic_read(&lock->l_refc),
+                       lock->l_readers, lock->l_writers,
+                       ldlm_lockname[lock->l_granted_mode],
+                       ldlm_lockname[lock->l_req_mode],
+                       PLDLMRES(resource),
+                       lock->l_policy_data.l_inodebits.bits,
+                       cfs_atomic_read(&resource->lr_refcount),
+                       ldlm_typename[resource->lr_type],
+                       lock->l_flags, nid, lock->l_remote_handle.cookie,
+                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
+                       lock->l_pid, lock->l_callback_timeout,
+                       lock->l_lvb_type);
+               break;
 
-        default:
-                libcfs_debug_vmsg2(msgdata, fmt, args,
-                       " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
-                       "res: "LPU64"/"LPU64" rrc: %d type: %s flags: "LPX64" "
-                       "nid: %s remote: "LPX64" expref: %d pid: %u timeout: %lu"
-                       "lvb_type: %d\n",
-                       ldlm_lock_to_ns_name(lock),
-                       lock, lock->l_handle.h_cookie,
-                       cfs_atomic_read (&lock->l_refc),
-                       lock->l_readers, lock->l_writers,
-                       ldlm_lockname[lock->l_granted_mode],
-                       ldlm_lockname[lock->l_req_mode],
-                       resource->lr_name.name[0],
-                       resource->lr_name.name[1],
-                       cfs_atomic_read(&resource->lr_refcount),
-                       ldlm_typename[resource->lr_type],
-                       lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
-                       lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
-                break;
-        }
-        va_end(args);
+       default:
+               libcfs_debug_vmsg2(msgdata, fmt, args,
+                       " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+                       "res: "DLDLMRES" rrc: %d type: %s flags: "LPX64" "
+                       "nid: %s remote: "LPX64" expref: %d pid: %u "
+                       "timeout: %lu lvb_type: %d\n",
+                       ldlm_lock_to_ns_name(lock),
+                       lock, lock->l_handle.h_cookie,
+                       cfs_atomic_read(&lock->l_refc),
+                       lock->l_readers, lock->l_writers,
+                       ldlm_lockname[lock->l_granted_mode],
+                       ldlm_lockname[lock->l_req_mode],
+                       PLDLMRES(resource),
+                       cfs_atomic_read(&resource->lr_refcount),
+                       ldlm_typename[resource->lr_type],
+                       lock->l_flags, nid, lock->l_remote_handle.cookie,
+                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
+                       lock->l_pid, lock->l_callback_timeout,
+                       lock->l_lvb_type);
+               break;
+       }
+       va_end(args);
 }
 EXPORT_SYMBOL(_ldlm_lock_debug);