Whamcloud - gitweb
status: on ./mcreate I believe the client lock on the file and the mds
authorbraam <braam>
Mon, 24 Jun 2002 17:22:36 +0000 (17:22 +0000)
committerbraam <braam>
Mon, 24 Jun 2002 17:22:36 +0000 (17:22 +0000)
lock on the directory are both correctly enqueued.

- updated documentation regarding lock / resource refcounting
- removed unnecessary resource_getref before policy (thanks phil)
- local locks to addref, server locks don't
- variety of small fixes
- print lock/type names
- add one reference on lock for creation, put it away once for destroy
  [otherwise 0 ref locks can "live" and that causes the resources grief]
- small fix to change_resource

lustre/include/linux/lustre_dlm.h
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c

index db6e04a..67bd6eb 100644 (file)
@@ -181,16 +181,21 @@ static inline struct ldlm_extent *ldlm_res2extent(struct ldlm_resource *res)
 
 extern struct obd_ops ldlm_obd_ops;
 
+
+extern char *ldlm_lockname[];
+extern char *ldlm_typename[];
+
 #define LDLM_DEBUG(lock, format, a...)                          \
 do {                                                            \
         CDEBUG(D_DLMTRACE, "### " format                        \
-               " (%s: lock %p mode %d/%d on res %Lu (rc %d) "   \
-               " type %d remote %Lx)\n" , ## a,                 \
+               " (%s: lock %p mode %s/%s on res %Lu (rc %d) "   \
+               " type %s remote %Lx)\n" , ## a,                 \
                lock->l_resource->lr_namespace->ns_name, lock,   \
-               lock->l_granted_mode, lock->l_req_mode,          \
+               ldlm_lockname[lock->l_granted_mode],             \
+               ldlm_lockname[lock->l_req_mode],                 \
                lock->l_resource->lr_name[0],                    \
                atomic_read(&lock->l_resource->lr_refcount),     \
-               lock->l_resource->lr_type,                       \
+               ldlm_typename[lock->l_resource->lr_type],        \
                lock->l_remote_handle.addr);                     \
 } while (0)
 
index 881a4d1..4a6e264 100644 (file)
 #include <linux/lustre_dlm.h>
 #include <linux/lustre_mds.h>
 
+/* lock types */
+char *ldlm_lockname[] = {
+        [LCK_EX] "EX", 
+        [LCK_PW] "PW",
+        [LCK_PR] "PR",
+        [LCK_CW] "CW",
+        [LCK_CR] "CR",
+        [LCK_NL] "NL"
+};
+
+char *ldlm_typename[] = {
+        [LDLM_PLAIN]     "PLN",
+        [LDLM_EXTENT]    "EXT",
+        [LDLM_MDSINTENT] "INT"
+};        
+
 extern kmem_cache_t *ldlm_lock_slab;
 int (*mds_reint_p)(int offset, struct ptlrpc_request *req) = NULL;
 int (*mds_getattr_name_p)(int offset, struct ptlrpc_request *req) = NULL;
@@ -45,6 +61,10 @@ void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
         lockh->cookie = lock->l_random;
 }
 
+/* 
+ *  HANDLES
+ */ 
+
 struct ldlm_lock *ldlm_handle2lock(struct lustre_handle *handle)
 {
         struct ldlm_lock *lock = NULL;
@@ -71,6 +91,17 @@ struct ldlm_lock *ldlm_handle2lock(struct lustre_handle *handle)
         return  lock;
 }
 
+/*
+ * REFCOUNTED LOCK OBJECTS
+ */ 
+
+
+/*  
+ * Lock refcounts, during creation: 
+ *   - one special one for allocation, dec'd only once in destroy
+ *   - one for being a lock that's in-use
+ *   - one for the addref associated with a new lock
+ */
 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
 {
         l_lock(&lock->l_resource->lr_namespace->ns_lock);
@@ -126,12 +157,14 @@ void ldlm_lock_destroy(struct ldlm_lock *lock)
 
         if (lock->l_flags & LDLM_FL_DESTROYED) {
                 EXIT;
+                ldlm_lock_put(lock);
                 return;
         }
 
         lock->l_flags = LDLM_FL_DESTROYED;
         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
         ldlm_lock_put(lock);
+        ldlm_lock_put(lock);
         EXIT;
         return;
 }
@@ -158,6 +191,8 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
         get_random_bytes(&lock->l_random, sizeof(__u64));
 
         lock->l_resource = resource;
+        /* this refcount matches the one of the resource passed
+           in which is not being put away */
         lock->l_refc = 1;
         INIT_LIST_HEAD(&lock->l_children);
         INIT_LIST_HEAD(&lock->l_res_link);
@@ -169,12 +204,16 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
                 list_add(&lock->l_childof, &parent->l_children);
                 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
         }
+        /* this is the extra refcount, to prevent the lock
+           evaporating */ 
+        ldlm_lock_get(lock);
         RETURN(lock);
 }
 
 int ldlm_lock_change_resource(struct ldlm_lock *lock, __u64 new_resid[3])
 {
         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
+        struct ldlm_resource *oldres = lock->l_resource;
         int type, i;
         ENTRY;
 
@@ -191,10 +230,12 @@ int ldlm_lock_change_resource(struct ldlm_lock *lock, __u64 new_resid[3])
         for (i = 0; i < lock->l_refc; i++) {
                 int rc;
                 ldlm_resource_getref(lock->l_resource);
-                rc = ldlm_resource_put(lock->l_resource);
+                rc = ldlm_resource_put(oldres);
                 if (rc == 1 && i != lock->l_refc - 1)
                         LBUG();
         }
+        /* compensate for the initial get above.. */
+        ldlm_resource_put(lock->l_resource);
 
         l_unlock(&ns->ns_lock);
         RETURN(0);
@@ -632,7 +673,6 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
         lock->l_req_mode = mode;
         lock->l_data = data;
         lock->l_data_len = data_len;
-        ldlm_lock_addref(lock, mode);
 
         return lock;
 }
@@ -650,19 +690,15 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_lock *lock,
         ENTRY;
 
         res = lock->l_resource;
-        local = res->lr_namespace->ns_client;
-
         lock->l_blocking_ast = blocking;
 
         if (res->lr_type == LDLM_EXTENT)
                 memcpy(&lock->l_extent, cookie, sizeof(lock->l_extent));
 
         /* policies are not executed on the client */
+        local = res->lr_namespace->ns_client;
         if (!local && (policy = ldlm_res_policy_table[res->lr_type])) {
                 int rc;
-
-                ldlm_resource_getref(res);
-
                 rc = policy(lock, cookie, lock->l_req_mode, NULL);
 
                 if (rc == ELDLM_LOCK_CHANGED) {
index 1d529a8..645d94c 100644 (file)
@@ -457,6 +457,8 @@ static void __exit ldlm_exit(void)
                 CERROR("couldn't free ldlm lock slab\n");
 }
 
+EXPORT_SYMBOL(ldlm_lockname);
+EXPORT_SYMBOL(ldlm_typename);
 EXPORT_SYMBOL(ldlm_lock_match);
 EXPORT_SYMBOL(ldlm_lock_addref);
 EXPORT_SYMBOL(ldlm_lock_decref);
index e09444e..aedff87 100644 (file)
@@ -38,6 +38,8 @@ int ldlm_cli_enqueue(struct ptlrpc_client *cl, struct ptlrpc_connection *conn,
                                 data, data_len);
         if (lock == NULL)
                 GOTO(out, rc = -ENOMEM);
+        /* for the local lock, add the reference */
+        ldlm_lock_addref(lock, mode);
         ldlm_lock2handle(lock, lockh);
 
         LDLM_DEBUG(lock, "client-side enqueue START");
index 5b5cb99..7d2288c 100644 (file)
@@ -326,12 +326,6 @@ void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
 }
 
-int ldlm_get_resource_handle(struct ldlm_resource *res, struct lustre_handle *h)
-{
-        LBUG();
-        return 0;
-}
-
 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
 {
         desc->lr_type = res->lr_type;