Whamcloud - gitweb
Add flock support.
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
index f6045f8..ec5f8d4 100644 (file)
@@ -106,8 +106,15 @@ noreproc:
 
         lwd.lwd_lock = lock;
 
-        lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ, ldlm_expired_completion_wait,
-                               interrupted_completion_wait, &lwd);
+        if (flags & LDLM_FL_NO_TIMEOUT) {
+                LDLM_DEBUG(lock, "waiting indefinitely for group lock\n");
+                lwi = LWI_INTR(interrupted_completion_wait, &lwd);
+        } else {
+                lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ,
+                                       ldlm_expired_completion_wait,
+                                       interrupted_completion_wait, &lwd);
+        }
+
         if (imp != NULL) {
                 spin_lock_irqsave(&imp->imp_lock, irqflags);
                 lwd.lwd_generation = imp->imp_generation;
@@ -117,9 +124,9 @@ noreproc:
         /* Go to sleep until the lock is granted or cancelled. */
         rc = l_wait_event(lock->l_waitq,
                           ((lock->l_req_mode == lock->l_granted_mode) ||
-                           (lock->l_flags & LDLM_FL_CANCEL)), &lwi);
+                           (lock->l_flags & LDLM_FL_FAILED)), &lwi);
 
-        if (lock->l_destroyed) {
+        if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
                 RETURN(-EIO);
         }
@@ -168,6 +175,9 @@ static int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
         lock->l_lvb_swabber = lvb_swabber;
         if (policy != NULL)
                 memcpy(&lock->l_policy_data, policy, sizeof(*policy));
+        if (type == LDLM_EXTENT)
+                memcpy(&lock->l_req_extent, &policy->l_extent,
+                       sizeof(policy->l_extent));
 
         err = ldlm_lock_enqueue(ns, &lock, policy, flags);
         if (err != ELDLM_OK)
@@ -227,6 +237,7 @@ int ldlm_cli_enqueue(struct obd_export *exp,
         struct ldlm_reply *reply;
         int rc, size[2] = {sizeof(*body), lvb_len}, req_passed_in = 1;
         int is_replay = *flags & LDLM_FL_REPLAY;
+        int cleanup_phase = 0;
         ENTRY;
 
         if (exp == NULL) {
@@ -248,21 +259,27 @@ int ldlm_cli_enqueue(struct obd_export *exp,
                 lock = ldlm_lock_create(ns, NULL, res_id, type, mode, blocking,
                                         completion, glimpse, data, lvb_len);
                 if (lock == NULL)
-                        GOTO(out_nolock, rc = -ENOMEM);
+                        RETURN(-ENOMEM);
                 /* for the local lock, add the reference */
                 ldlm_lock_addref_internal(lock, mode);
                 ldlm_lock2handle(lock, lockh);
                 lock->l_lvb_swabber = lvb_swabber;
                 if (policy != NULL)
                         memcpy(&lock->l_policy_data, policy, sizeof(*policy));
+                if (type == LDLM_EXTENT)
+                        memcpy(&lock->l_req_extent, &policy->l_extent,
+                               sizeof(policy->l_extent));
                 LDLM_DEBUG(lock, "client-side enqueue START");
         }
 
+        /* lock not sent to server yet */
+        cleanup_phase = 2;
+
         if (req == NULL) {
                 req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, 1,
                                       size, NULL);
                 if (req == NULL)
-                        GOTO(out_lock, rc = -ENOMEM);
+                        GOTO(cleanup, rc = -ENOMEM);
                 req_passed_in = 0;
         } else if (req->rq_reqmsg->buflens[0] != sizeof(*body))
                 LBUG();
@@ -306,38 +323,35 @@ int ldlm_cli_enqueue(struct obd_export *exp,
                                 tmplvb = lustre_swab_repbuf(req, 1, lvb_len,
                                                             lvb_swabber);
                                 if (tmplvb == NULL)
-                                        GOTO(out_lock, rc = -EPROTO);
+                                        GOTO(cleanup, rc = -EPROTO);
                                 if (lvb != NULL)
                                         memcpy(lvb, tmplvb, lvb_len);
                         }
                 }
-                GOTO(out_lock, rc);
+                GOTO(cleanup, rc);
         }
 
         reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
                                    lustre_swab_ldlm_reply);
         if (reply == NULL) {
                 CERROR("Can't unpack ldlm_reply\n");
-                GOTO(out_lock, rc = -EPROTO);
+                GOTO(cleanup, rc = -EPROTO);
         }
 
+        /* XXX - Phil, wasn't sure if this shoiuld go before or after the
+        /* lustre_swab_repbuf() ? If we can't unpack the reply then we
+        /* don't know what occurred on the server so I think the safest
+        /* bet is to cleanup the lock as if it didn't make it ? */
+
+        /* lock enqueued on the server */
+        cleanup_phase = 1;
+
         memcpy(&lock->l_remote_handle, &reply->lock_handle,
                sizeof(lock->l_remote_handle));
         *flags = reply->lock_flags;
 
         CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: 0x%x\n",
                lock, reply->lock_handle.cookie, *flags);
-        if (type == LDLM_EXTENT) {
-                CDEBUG(D_INFO, "requested extent: "LPU64" -> "LPU64", got "
-                       "extent "LPU64" -> "LPU64"\n",
-                       body->lock_desc.l_policy_data.l_extent.start,
-                       body->lock_desc.l_policy_data.l_extent.end,
-                       reply->lock_desc.l_policy_data.l_extent.start,
-                       reply->lock_desc.l_policy_data.l_extent.end);
-        }
-        if (policy != NULL)
-                memcpy(&lock->l_policy_data, &reply->lock_desc.l_policy_data,
-                       sizeof(reply->lock_desc.l_policy_data));
 
         /* If enqueue returned a blocked lock but the completion handler has
          * already run, then it fixed up the resource and we don't need to do it
@@ -352,7 +366,9 @@ int ldlm_cli_enqueue(struct obd_export *exp,
                 }
 
                 if (reply->lock_desc.l_resource.lr_name.name[0] !=
-                    lock->l_resource->lr_name.name[0]) {
+                    lock->l_resource->lr_name.name[0] ||
+                   reply->lock_desc.l_resource.lr_name.name[1] !=
+                    lock->l_resource->lr_name.name[1]) {
                         CDEBUG(D_INFO, "remote intent success, locking %ld "
                                "instead of %ld\n",
                               (long)reply->lock_desc.l_resource.lr_name.name[0],
@@ -362,11 +378,18 @@ int ldlm_cli_enqueue(struct obd_export *exp,
                                            reply->lock_desc.l_resource.lr_name);
                         if (lock->l_resource == NULL) {
                                 LBUG();
-                                GOTO(out_lock, rc = -ENOMEM);
+                                GOTO(cleanup, rc = -ENOMEM);
                         }
                         LDLM_DEBUG(lock, "client-side enqueue, new resource");
                 }
+                if (policy != NULL)
+                        memcpy(&lock->l_policy_data,
+                               &reply->lock_desc.l_policy_data,
+                               sizeof(reply->lock_desc.l_policy_data));
+                if (type != LDLM_PLAIN)
+                        LDLM_DEBUG(lock,"client-side enqueue, new policy data");
         }
+
         if ((*flags) & LDLM_FL_AST_SENT) {
                 l_lock(&ns->ns_lock);
                 lock->l_flags |= LDLM_FL_CBPENDING;
@@ -374,11 +397,13 @@ int ldlm_cli_enqueue(struct obd_export *exp,
                 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
         }
 
-        if (lvb_len) {
+        /* If the lock has already been granted by a completion AST, don't
+         * clobber the LVB with an older one. */
+        if (lvb_len && (lock->l_req_mode != lock->l_granted_mode)) {
                 void *tmplvb;
                 tmplvb = lustre_swab_repbuf(req, 1, lvb_len, lvb_swabber);
                 if (tmplvb == NULL)
-                        GOTO(out_lock, rc = -EPROTO);
+                        GOTO(cleanup, rc = -EPROTO);
                 memcpy(lock->l_lvb_data, tmplvb, lvb_len);
         }
 
@@ -399,13 +424,17 @@ int ldlm_cli_enqueue(struct obd_export *exp,
 
         LDLM_DEBUG(lock, "client-side enqueue END");
         EXIT;
- out_lock:
-        if (rc)
-                failed_lock_cleanup(ns, lock, lockh, mode);
-        if (!req_passed_in && req != NULL)
-                ptlrpc_req_finished(req);
+cleanup:
+        switch (cleanup_phase) {
+        case 2:
+                if (rc)
+                        failed_lock_cleanup(ns, lock, lockh, mode);
+        case 1:
+                if (!req_passed_in && req != NULL)
+                        ptlrpc_req_finished(req);
+        }
+
         LDLM_LOCK_PUT(lock);
- out_nolock:
         return rc;
 }
 
@@ -548,9 +577,11 @@ int ldlm_cli_cancel(struct lustre_handle *lockh)
                 rc = ptlrpc_queue_wait(req);
 
                 if (rc == ESTALE) {
-                        CERROR("client/server (nid "LPU64") out of sync--not "
-                               "fatal\n",
-                               req->rq_import->imp_connection->c_peer.peer_nid);
+                        char str[PTL_NALFMT_SIZE];
+                        CERROR("client/server (nid %s) out of sync"
+                               " -- not fatal\n",
+                               ptlrpc_peernid2str(&req->rq_import->
+                                                  imp_connection->c_peer, str));
                 } else if (rc == -ETIMEDOUT) {
                         ptlrpc_req_finished(req);
                         GOTO(restart, rc);
@@ -579,11 +610,16 @@ int ldlm_cli_cancel(struct lustre_handle *lockh)
         return rc;
 }
 
-int ldlm_cancel_lru(struct ldlm_namespace *ns)
+/* when called with LDLM_ASYNC the blocking callback will be handled
+ * in a thread and this function will return after the thread has been
+ * asked to call the callback.  when called with LDLM_SYNC the blocking
+ * callback will be performed in this function. */
+int ldlm_cancel_lru(struct ldlm_namespace *ns, ldlm_sync_t sync)
 {
-        struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
+        struct list_head *tmp, *next;
+        struct ldlm_lock *lock;
         int count, rc = 0;
-        struct ldlm_ast_work *w;
+        LIST_HEAD(cblist);
         ENTRY;
 
         l_lock(&ns->ns_lock);
@@ -595,7 +631,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns)
         }
 
         list_for_each_safe(tmp, next, &ns->ns_unused_list) {
-                struct ldlm_lock *lock;
+
                 lock = list_entry(tmp, struct ldlm_lock, l_lru);
 
                 LASSERT(!lock->l_readers && !lock->l_writers);
@@ -607,33 +643,23 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns)
                  * won't see this flag and call l_blocking_ast */
                 lock->l_flags |= LDLM_FL_CBPENDING;
 
-                OBD_ALLOC(w, sizeof(*w));
-                LASSERT(w);
-
-                w->w_lock = LDLM_LOCK_GET(lock);
-                list_add(&w->w_list, &list);
+                LDLM_LOCK_GET(lock); /* dropped by bl thread */
                 ldlm_lock_remove_from_lru(lock);
+                if (sync == LDLM_ASYNC)
+                        ldlm_bl_to_thread(ns, NULL, lock);
+                else
+                        list_add(&lock->l_lru, &cblist);
 
                 if (--count == 0)
                         break;
         }
         l_unlock(&ns->ns_lock);
 
-        list_for_each_safe(tmp, next, &list) {
-                struct lustre_handle lockh;
-                int rc;
-                w = list_entry(tmp, struct ldlm_ast_work, w_list);
-
-                ldlm_lock2handle(w->w_lock, &lockh);
-                rc = ldlm_cli_cancel(&lockh);
-                if (rc != ELDLM_OK)
-                        CDEBUG(D_INFO, "ldlm_cli_cancel: %d\n", rc);
-
-                list_del(&w->w_list);
-                LDLM_LOCK_PUT(w->w_lock);
-                OBD_FREE(w, sizeof(*w));
+        list_for_each_safe(tmp, next, &cblist) {
+                lock = list_entry(tmp, struct ldlm_lock, l_lru);
+                list_del_init(&lock->l_lru);
+                ldlm_handle_bl_callback(ns, NULL, lock);
         }
-
         RETURN(rc);
 }
 
@@ -661,15 +687,14 @@ static int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
                 if (opaque != NULL && lock->l_ast_data != opaque) {
                         LDLM_ERROR(lock, "data %p doesn't match opaque %p",
                                    lock->l_ast_data, opaque);
-                        //LBUG();
                         continue;
                 }
 
                 if (lock->l_readers || lock->l_writers) {
-                        if (flags & LDLM_FL_WARN) {
+                        if (flags & LDLM_FL_CONFIG_CHANGE)
+                                lock->l_flags |= LDLM_FL_CBPENDING;
+                        else if (flags & LDLM_FL_WARN)
                                 LDLM_ERROR(lock, "lock in use");
-                                //LBUG();
-                        }
                         continue;
                 }
 
@@ -681,11 +706,6 @@ static int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
 
                 w->w_lock = LDLM_LOCK_GET(lock);
 
-                /* Prevent the cancel callback from being called by setting
-                 * LDLM_FL_CANCEL in the lock.  Very sneaky. -p */
-                if (flags & LDLM_FL_NO_CALLBACK)
-                        w->w_lock->l_flags |= LDLM_FL_CANCEL;
-
                 list_add(&w->w_list, &list);
         }
         l_unlock(&ns->ns_lock);
@@ -713,17 +733,32 @@ static int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
         RETURN(0);
 }
 
+static inline int have_no_nsresource(struct ldlm_namespace *ns)
+{
+        int no_resource = 0;
+
+        spin_lock(&ns->ns_counter_lock);
+        if (ns->ns_resources == 0)
+                no_resource = 1;
+        spin_unlock(&ns->ns_counter_lock);
+
+        RETURN(no_resource);
+}
+
 /* Cancel all locks on a namespace (or a specific resource, if given)
  * that have 0 readers/writers.
  *
  * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
  * to notify the server.
  * If flags & LDLM_FL_NO_CALLBACK, don't run the cancel callback.
- * If flags & LDLM_FL_WARN, print a warning if some locks are still in use. */
+ * If flags & LDLM_FL_WARN, print a warning if some locks are still in use. 
+ * If flags & LDLM_FL_CONFIG_CHANGE, mark all locks as having a pending callback
+ */
 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
                            struct ldlm_res_id *res_id, int flags, void *opaque)
 {
         int i;
+        struct l_wait_info lwi = { 0 };
         ENTRY;
 
         if (ns == NULL)
@@ -735,23 +770,28 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
 
         l_lock(&ns->ns_lock);
         for (i = 0; i < RES_HASH_SIZE; i++) {
-                struct list_head *tmp, *pos;
-                list_for_each_safe(tmp, pos, &(ns->ns_hash[i])) {
+                struct list_head *tmp, *next;
+                list_for_each_safe(tmp, next, &(ns->ns_hash[i])) {
                         int rc;
                         struct ldlm_resource *res;
                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
                         ldlm_resource_getref(res);
+                        l_unlock(&ns->ns_lock);
 
                         rc = ldlm_cli_cancel_unused_resource(ns, res->lr_name,
                                                              flags, opaque);
-
                         if (rc)
                                 CERROR("cancel_unused_res ("LPU64"): %d\n",
                                        res->lr_name.name[0], rc);
+
+                        l_lock(&ns->ns_lock);
+                        next = tmp->next;
                         ldlm_resource_putref(res);
                 }
         }
         l_unlock(&ns->ns_lock);
+        if (flags & LDLM_FL_CONFIG_CHANGE)
+                l_wait_event(ns->ns_waitq, have_no_nsresource(ns), &lwi);
 
         RETURN(ELDLM_OK);
 }
@@ -913,7 +953,8 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
         struct ptlrpc_request *req;
         struct ldlm_request *body;
         struct ldlm_reply *reply;
-        int size;
+        int buffers = 1;
+        int size[2];
         int flags;
 
         /*
@@ -939,8 +980,8 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
         else
                 flags = LDLM_FL_REPLAY;
 
-        size = sizeof(*body);
-        req = ptlrpc_prep_req(imp, LDLM_ENQUEUE, 1, &size, NULL);
+        size[0] = sizeof(*body);
+        req = ptlrpc_prep_req(imp, LDLM_ENQUEUE, 1, size, NULL);
         if (!req)
                 RETURN(-ENOMEM);
 
@@ -952,8 +993,12 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
         body->lock_flags = flags;
 
         ldlm_lock2handle(lock, &body->lock_handle1);
-        size = sizeof(*reply);
-        req->rq_replen = lustre_msg_size(1, &size);
+        size[0] = sizeof(*reply);
+        if (lock->l_lvb_len != 0) {
+                buffers = 2;
+                size[1] = lock->l_lvb_len;
+        }
+        req->rq_replen = lustre_msg_size(buffers, size);
 
         LDLM_DEBUG(lock, "replaying lock:");
 
@@ -976,6 +1021,7 @@ int ldlm_replay_locks(struct obd_import *imp)
         INIT_LIST_HEAD(&list);
 
         LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
+        LASSERT(ns != NULL);
 
         /* ensure this doesn't fall to 0 before all have been queued */
         atomic_inc(&imp->imp_replay_inflight);