1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #define DEBUG_SUBSYSTEM S_LDLM
25 #include <liblustre.h>
28 #include <linux/lustre_dlm.h>
29 #include <linux/obd_class.h>
30 #include <linux/obd.h>
32 #include "ldlm_internal.h"
34 static void interrupted_completion_wait(void *data)
38 struct lock_wait_data {
39 struct ldlm_lock *lwd_lock;
43 int ldlm_expired_completion_wait(void *data)
45 struct lock_wait_data *lwd = data;
46 struct ldlm_lock *lock = lwd->lwd_lock;
47 struct obd_import *imp;
48 struct obd_device *obd;
50 if (lock->l_conn_export == NULL) {
51 static unsigned long next_dump = 0;
53 LDLM_ERROR(lock, "lock timed out; not entering recovery in "
54 "server code, just going back to sleep");
55 if (time_after(jiffies, next_dump)) {
56 ldlm_namespace_dump(lock->l_resource->lr_namespace);
58 portals_debug_dumplog();
59 next_dump = jiffies + 300 * HZ;
64 obd = lock->l_conn_export->exp_obd;
65 imp = obd->u.cli.cl_import;
66 ptlrpc_fail_import(imp, lwd->lwd_generation);
67 LDLM_ERROR(lock, "lock timed out, entering recovery for %s@%s",
68 imp->imp_target_uuid.uuid,
69 imp->imp_connection->c_remote_uuid.uuid);
74 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data)
76 /* XXX ALLOCATE - 160 bytes */
77 struct lock_wait_data lwd;
78 unsigned long irqflags;
79 struct obd_device *obd;
80 struct obd_import *imp = NULL;
81 struct l_wait_info lwi;
85 if (flags == LDLM_FL_WAIT_NOREPROC)
88 if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
89 LDLM_FL_BLOCK_CONV))) {
90 wake_up(&lock->l_waitq);
94 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
96 ldlm_lock_dump(D_OTHER, lock, 0);
97 ldlm_reprocess_all(lock->l_resource);
101 obd = class_exp2obd(lock->l_conn_export);
103 /* if this is a local lock, then there is no import */
105 imp = obd->u.cli.cl_import;
109 if (flags & LDLM_FL_NO_TIMEOUT) {
110 LDLM_DEBUG(lock, "waiting indefinitely for group lock\n");
111 lwi = LWI_INTR(interrupted_completion_wait, &lwd);
113 lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ,
114 ldlm_expired_completion_wait,
115 interrupted_completion_wait, &lwd);
119 spin_lock_irqsave(&imp->imp_lock, irqflags);
120 lwd.lwd_generation = imp->imp_generation;
121 spin_unlock_irqrestore(&imp->imp_lock, irqflags);
124 /* Go to sleep until the lock is granted or cancelled. */
125 rc = l_wait_event(lock->l_waitq,
126 ((lock->l_req_mode == lock->l_granted_mode) ||
127 (lock->l_flags & LDLM_FL_FAILED)), &lwi);
129 if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
130 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
135 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
140 LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
144 static int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
145 struct ldlm_res_id res_id,
147 ldlm_policy_data_t *policy,
150 ldlm_blocking_callback blocking,
151 ldlm_completion_callback completion,
152 ldlm_glimpse_callback glimpse,
153 void *data, __u32 lvb_len,
155 struct lustre_handle *lockh)
157 struct ldlm_lock *lock;
162 CERROR("Trying to enqueue local lock in a shadow namespace\n");
166 lock = ldlm_lock_create(ns, NULL, res_id, type, mode, blocking,
167 completion, glimpse, data, lvb_len);
169 GOTO(out_nolock, err = -ENOMEM);
170 LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
172 ldlm_lock_addref_internal(lock, mode);
173 ldlm_lock2handle(lock, lockh);
174 lock->l_flags |= LDLM_FL_LOCAL;
175 lock->l_lvb_swabber = lvb_swabber;
177 memcpy(&lock->l_policy_data, policy, sizeof(*policy));
178 if (type == LDLM_EXTENT)
179 memcpy(&lock->l_req_extent, &policy->l_extent,
180 sizeof(policy->l_extent));
182 err = ldlm_lock_enqueue(ns, &lock, policy, flags);
187 memcpy(policy, &lock->l_policy_data, sizeof(*policy));
188 if ((*flags) & LDLM_FL_LOCK_CHANGED)
189 memcpy(&res_id, &lock->l_resource->lr_name, sizeof(res_id));
191 LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
194 if (lock->l_completion_ast)
195 lock->l_completion_ast(lock, *flags, NULL);
197 LDLM_DEBUG(lock, "client-side local enqueue END");
205 static void failed_lock_cleanup(struct ldlm_namespace *ns,
206 struct ldlm_lock *lock,
207 struct lustre_handle *lockh, int mode)
209 /* Set a flag to prevent us from sending a CANCEL (bug 407) */
210 l_lock(&ns->ns_lock);
211 lock->l_flags |= LDLM_FL_LOCAL_ONLY;
212 LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
213 l_unlock(&ns->ns_lock);
215 ldlm_lock_decref_and_cancel(lockh, mode);
218 int ldlm_cli_enqueue(struct obd_export *exp,
219 struct ptlrpc_request *req,
220 struct ldlm_namespace *ns,
221 struct ldlm_res_id res_id,
223 ldlm_policy_data_t *policy,
226 ldlm_blocking_callback blocking,
227 ldlm_completion_callback completion,
228 ldlm_glimpse_callback glimpse,
233 struct lustre_handle *lockh)
235 struct ldlm_lock *lock;
236 struct ldlm_request *body;
237 struct ldlm_reply *reply;
238 int rc, size[3] = {0, sizeof(*body), lvb_len}, req_passed_in = 1;
239 int is_replay = *flags & LDLM_FL_REPLAY;
240 int cleanup_phase = 0;
245 rc = ldlm_cli_enqueue_local(ns, res_id, type, policy, mode,
246 flags, blocking, completion,
247 glimpse, data, lvb_len, lvb_swabber,
252 /* If we're replaying this lock, just check some invariants.
253 * If we're creating a new lock, get everything all setup nice. */
255 lock = ldlm_handle2lock(lockh);
256 LDLM_DEBUG(lock, "client-side enqueue START");
257 LASSERT(exp == lock->l_conn_export);
259 lock = ldlm_lock_create(ns, NULL, res_id, type, mode, blocking,
260 completion, glimpse, data, lvb_len);
263 /* for the local lock, add the reference */
264 ldlm_lock_addref_internal(lock, mode);
265 ldlm_lock2handle(lock, lockh);
266 lock->l_lvb_swabber = lvb_swabber;
268 memcpy(&lock->l_policy_data, policy, sizeof(*policy));
269 if (type == LDLM_EXTENT)
270 memcpy(&lock->l_req_extent, &policy->l_extent,
271 sizeof(policy->l_extent));
272 LDLM_DEBUG(lock, "client-side enqueue START");
275 /* lock not sent to server yet */
279 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
280 LDLM_ENQUEUE, 2, size, NULL);
282 GOTO(cleanup, rc = -ENOMEM);
286 LASSERTF(req->rq_reqmsg->buflens[MDS_REQ_INTENT_LOCKREQ_OFF] ==
287 sizeof(*body), "buflen[%d] = %d, not %d\n",
288 MDS_REQ_INTENT_LOCKREQ_OFF,
289 req->rq_reqmsg->buflens[MDS_REQ_INTENT_LOCKREQ_OFF],
292 /* Dump lock data into the request buffer */
293 body = lustre_msg_buf(req->rq_reqmsg, MDS_REQ_INTENT_LOCKREQ_OFF,
295 ldlm_lock2desc(lock, &body->lock_desc);
296 body->lock_flags = *flags;
298 memcpy(&body->lock_handle1, lockh, sizeof(*lockh));
300 /* Continue as normal. */
301 if (!req_passed_in) {
302 size[0] = sizeof(*reply);
303 req->rq_replen = lustre_msg_size(1 + (lvb_len > 0), size);
305 lock->l_conn_export = exp;
306 lock->l_export = NULL;
307 lock->l_blocking_ast = blocking;
309 LDLM_DEBUG(lock, "sending request");
310 rc = ptlrpc_queue_wait(req);
312 if (rc != ELDLM_OK) {
314 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
315 rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
316 if (rc == ELDLM_LOCK_ABORTED) {
317 /* Before we return, swab the reply */
318 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
319 lustre_swab_ldlm_reply);
321 CERROR("Can't unpack ldlm_reply\n");
326 tmplvb = lustre_swab_repbuf(req, 1, lvb_len,
329 GOTO(cleanup, rc = -EPROTO);
331 memcpy(lvb, tmplvb, lvb_len);
337 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
338 lustre_swab_ldlm_reply);
340 CERROR("Can't unpack ldlm_reply\n");
341 GOTO(cleanup, rc = -EPROTO);
344 /* XXX - Phil, wasn't sure if this should go before or after the
345 * lustre_swab_repbuf() ? If we can't unpack the reply then we
346 * don't know what occurred on the server so I think the safest
347 * bet is to cleanup the lock as if it didn't make it ? */
349 /* lock enqueued on the server */
352 memcpy(&lock->l_remote_handle, &reply->lock_handle,
353 sizeof(lock->l_remote_handle));
354 *flags = reply->lock_flags;
356 CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: 0x%x\n",
357 lock, reply->lock_handle.cookie, *flags);
359 /* If enqueue returned a blocked lock but the completion handler has
360 * already run, then it fixed up the resource and we don't need to do it
362 if ((*flags) & LDLM_FL_LOCK_CHANGED) {
363 int newmode = reply->lock_desc.l_req_mode;
365 if (newmode && newmode != lock->l_req_mode) {
366 LDLM_DEBUG(lock, "server returned different mode %s",
367 ldlm_lockname[newmode]);
368 lock->l_req_mode = newmode;
371 if (reply->lock_desc.l_resource.lr_name.name[0] !=
372 lock->l_resource->lr_name.name[0] ||
373 reply->lock_desc.l_resource.lr_name.name[1] !=
374 lock->l_resource->lr_name.name[1]) {
375 CDEBUG(D_INFO, "remote intent success, locking %ld "
377 (long)reply->lock_desc.l_resource.lr_name.name[0],
378 (long)lock->l_resource->lr_name.name[0]);
380 ldlm_lock_change_resource(ns, lock,
381 reply->lock_desc.l_resource.lr_name);
382 if (lock->l_resource == NULL) {
384 GOTO(cleanup, rc = -ENOMEM);
386 LDLM_DEBUG(lock, "client-side enqueue, new resource");
389 memcpy(&lock->l_policy_data,
390 &reply->lock_desc.l_policy_data,
391 sizeof(reply->lock_desc.l_policy_data));
392 if (type != LDLM_PLAIN)
393 LDLM_DEBUG(lock,"client-side enqueue, new policy data");
396 if ((*flags) & LDLM_FL_AST_SENT) {
397 l_lock(&ns->ns_lock);
398 lock->l_flags |= LDLM_FL_CBPENDING;
399 l_unlock(&ns->ns_lock);
400 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
403 /* If the lock has already been granted by a completion AST, don't
404 * clobber the LVB with an older one. */
405 if (lvb_len && (lock->l_req_mode != lock->l_granted_mode)) {
407 tmplvb = lustre_swab_repbuf(req, 1, lvb_len, lvb_swabber);
409 GOTO(cleanup, rc = -EPROTO);
410 memcpy(lock->l_lvb_data, tmplvb, lvb_len);
414 rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
415 if (lock->l_completion_ast != NULL) {
416 int err = lock->l_completion_ast(lock, *flags, NULL);
422 if (lvb_len && lvb != NULL) {
423 /* Copy the LVB here, and not earlier, because the completion
424 * AST (if any) can override what we got in the reply */
425 memcpy(lvb, lock->l_lvb_data, lvb_len);
428 LDLM_DEBUG(lock, "client-side enqueue END");
431 switch (cleanup_phase) {
434 failed_lock_cleanup(ns, lock, lockh, mode);
436 if (!req_passed_in && req != NULL)
437 ptlrpc_req_finished(req);
444 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
448 if (lock->l_resource->lr_namespace->ns_client) {
449 CERROR("Trying to cancel local lock\n");
452 LDLM_DEBUG(lock, "client-side local convert");
454 ldlm_lock_convert(lock, new_mode, flags);
455 ldlm_reprocess_all(lock->l_resource);
457 LDLM_DEBUG(lock, "client-side local convert handler END");
462 /* FIXME: one of ldlm_cli_convert or the server side should reject attempted
463 * conversion of locks which are on the waiting or converting queue */
464 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
466 struct ldlm_request *body;
467 struct ldlm_reply *reply;
468 struct ldlm_lock *lock;
469 struct ldlm_resource *res;
470 struct ptlrpc_request *req;
471 int rc, size = sizeof(*body);
474 lock = ldlm_handle2lock(lockh);
481 if (lock->l_conn_export == NULL)
482 RETURN(ldlm_cli_convert_local(lock, new_mode, flags));
484 LDLM_DEBUG(lock, "client-side convert");
486 req = ptlrpc_prep_req(class_exp2cliimp(lock->l_conn_export),
487 LUSTRE_DLM_VERSION, LDLM_CONVERT, 1, &size, NULL);
489 GOTO(out, rc = -ENOMEM);
491 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
492 memcpy(&body->lock_handle1, &lock->l_remote_handle,
493 sizeof(body->lock_handle1));
495 body->lock_desc.l_req_mode = new_mode;
496 body->lock_flags = *flags;
498 size = sizeof(*reply);
499 req->rq_replen = lustre_msg_size(1, &size);
501 rc = ptlrpc_queue_wait(req);
505 reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
506 lustre_swab_ldlm_reply);
508 CERROR ("Can't unpack ldlm_reply\n");
509 GOTO (out, rc = -EPROTO);
512 res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
514 ldlm_reprocess_all(res);
515 /* Go to sleep until the lock is granted. */
516 /* FIXME: or cancelled. */
517 if (lock->l_completion_ast)
518 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC, NULL);
522 ptlrpc_req_finished(req);
526 int ldlm_cli_cancel(struct lustre_handle *lockh)
528 struct ptlrpc_request *req;
529 struct ldlm_lock *lock;
530 struct ldlm_request *body;
531 int rc = 0, size = sizeof(*body);
534 /* concurrent cancels on the same handle can happen */
535 lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);
539 if (lock->l_conn_export) {
541 struct obd_import *imp;
543 LDLM_DEBUG(lock, "client-side cancel");
544 /* Set this flag to prevent others from getting new references*/
545 l_lock(&lock->l_resource->lr_namespace->ns_lock);
546 lock->l_flags |= LDLM_FL_CBPENDING;
547 local_only = (lock->l_flags & LDLM_FL_LOCAL_ONLY);
548 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
549 ldlm_cancel_callback(lock);
552 CDEBUG(D_INFO, "not sending request (at caller's "
558 imp = class_exp2cliimp(lock->l_conn_export);
559 if (imp == NULL || imp->imp_invalid) {
560 CDEBUG(D_HA, "skipping cancel on invalid import %p\n",
565 req = ptlrpc_prep_req(imp, LUSTRE_DLM_VERSION, LDLM_CANCEL,
568 GOTO(out, rc = -ENOMEM);
569 req->rq_no_resend = 1;
571 /* XXX FIXME bug 249 */
572 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
573 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
575 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
576 memcpy(&body->lock_handle1, &lock->l_remote_handle,
577 sizeof(body->lock_handle1));
579 req->rq_replen = lustre_msg_size(0, NULL);
581 rc = ptlrpc_queue_wait(req);
584 char str[PTL_NALFMT_SIZE];
585 CERROR("client/server (nid %s) out of sync"
587 ptlrpc_peernid2str(&req->rq_import->
588 imp_connection->c_peer, str));
589 } else if (rc == -ETIMEDOUT) {
590 ptlrpc_req_finished(req);
592 } else if (rc != ELDLM_OK) {
593 CERROR("Got rc %d from cancel RPC: canceling "
597 ptlrpc_req_finished(req);
599 ldlm_lock_cancel(lock);
601 if (lock->l_resource->lr_namespace->ns_client) {
602 LDLM_ERROR(lock, "Trying to cancel local lock\n");
605 LDLM_DEBUG(lock, "client-side local cancel");
606 ldlm_lock_cancel(lock);
607 ldlm_reprocess_all(lock->l_resource);
608 LDLM_DEBUG(lock, "client-side local cancel handler END");
617 /* when called with LDLM_ASYNC the blocking callback will be handled
618 * in a thread and this function will return after the thread has been
619 * asked to call the callback. when called with LDLM_SYNC the blocking
620 * callback will be performed in this function. */
621 int ldlm_cancel_lru(struct ldlm_namespace *ns, ldlm_sync_t sync)
623 struct list_head *tmp, *next;
624 struct ldlm_lock *lock;
629 l_lock(&ns->ns_lock);
630 count = ns->ns_nr_unused - ns->ns_max_unused;
633 l_unlock(&ns->ns_lock);
637 list_for_each_safe(tmp, next, &ns->ns_unused_list) {
639 lock = list_entry(tmp, struct ldlm_lock, l_lru);
641 LASSERT(!lock->l_readers && !lock->l_writers);
643 /* Setting the CBPENDING flag is a little misleading, but
644 * prevents an important race; namely, once CBPENDING is set,
645 * the lock can accumulate no more readers/writers. Since
646 * readers and writers are already zero here, ldlm_lock_decref
647 * won't see this flag and call l_blocking_ast */
648 lock->l_flags |= LDLM_FL_CBPENDING;
650 LDLM_LOCK_GET(lock); /* dropped by bl thread */
651 ldlm_lock_remove_from_lru(lock);
652 if (sync == LDLM_ASYNC)
653 ldlm_bl_to_thread(ns, NULL, lock);
655 list_add(&lock->l_lru, &cblist);
660 l_unlock(&ns->ns_lock);
662 list_for_each_safe(tmp, next, &cblist) {
663 lock = list_entry(tmp, struct ldlm_lock, l_lru);
664 list_del_init(&lock->l_lru);
665 ldlm_handle_bl_callback(ns, NULL, lock);
670 static int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
671 struct ldlm_res_id res_id, int flags,
674 struct ldlm_resource *res;
675 struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
676 struct ldlm_ast_work *w;
679 res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
681 /* This is not a problem. */
682 CDEBUG(D_INFO, "No resource "LPU64"\n", res_id.name[0]);
686 l_lock(&ns->ns_lock);
687 list_for_each(tmp, &res->lr_granted) {
688 struct ldlm_lock *lock;
689 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
691 if (opaque != NULL && lock->l_ast_data != opaque) {
692 LDLM_ERROR(lock, "data %p doesn't match opaque %p",
693 lock->l_ast_data, opaque);
697 if (lock->l_readers || lock->l_writers) {
698 if (flags & LDLM_FL_CONFIG_CHANGE)
699 lock->l_flags |= LDLM_FL_CBPENDING;
700 else if (flags & LDLM_FL_WARN)
701 LDLM_ERROR(lock, "lock in use");
705 /* See CBPENDING comment in ldlm_cancel_lru */
706 lock->l_flags |= LDLM_FL_CBPENDING;
708 OBD_ALLOC(w, sizeof(*w));
711 w->w_lock = LDLM_LOCK_GET(lock);
713 list_add(&w->w_list, &list);
715 l_unlock(&ns->ns_lock);
717 list_for_each_safe(tmp, next, &list) {
718 struct lustre_handle lockh;
720 w = list_entry(tmp, struct ldlm_ast_work, w_list);
722 if (flags & LDLM_FL_LOCAL_ONLY) {
723 ldlm_lock_cancel(w->w_lock);
725 ldlm_lock2handle(w->w_lock, &lockh);
726 rc = ldlm_cli_cancel(&lockh);
728 CERROR("ldlm_cli_cancel: %d\n", rc);
730 list_del(&w->w_list);
731 LDLM_LOCK_PUT(w->w_lock);
732 OBD_FREE(w, sizeof(*w));
735 ldlm_resource_putref(res);
740 static inline int have_no_nsresource(struct ldlm_namespace *ns)
744 spin_lock(&ns->ns_counter_lock);
745 if (ns->ns_resources == 0)
747 spin_unlock(&ns->ns_counter_lock);
752 /* Cancel all locks on a namespace (or a specific resource, if given)
753 * that have 0 readers/writers.
755 * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
756 * to notify the server.
757 * If flags & LDLM_FL_NO_CALLBACK, don't run the cancel callback.
758 * If flags & LDLM_FL_WARN, print a warning if some locks are still in use.
759 * If flags & LDLM_FL_CONFIG_CHANGE, mark all locks as having a pending callback
761 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
762 struct ldlm_res_id *res_id, int flags, void *opaque)
765 struct l_wait_info lwi = { 0 };
772 RETURN(ldlm_cli_cancel_unused_resource(ns, *res_id, flags,
775 l_lock(&ns->ns_lock);
776 for (i = 0; i < RES_HASH_SIZE; i++) {
777 struct list_head *tmp, *next;
778 list_for_each_safe(tmp, next, &(ns->ns_hash[i])) {
780 struct ldlm_resource *res;
781 res = list_entry(tmp, struct ldlm_resource, lr_hash);
782 ldlm_resource_getref(res);
783 l_unlock(&ns->ns_lock);
785 rc = ldlm_cli_cancel_unused_resource(ns, res->lr_name,
788 CERROR("cancel_unused_res ("LPU64"): %d\n",
789 res->lr_name.name[0], rc);
791 l_lock(&ns->ns_lock);
793 ldlm_resource_putref(res);
796 l_unlock(&ns->ns_lock);
797 if (flags & LDLM_FL_CONFIG_CHANGE)
798 l_wait_event(ns->ns_waitq, have_no_nsresource(ns), &lwi);
803 /* Lock iterators. */
805 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
808 struct list_head *tmp, *next;
809 struct ldlm_lock *lock;
810 int rc = LDLM_ITER_CONTINUE;
811 struct ldlm_namespace *ns = res->lr_namespace;
816 RETURN(LDLM_ITER_CONTINUE);
818 l_lock(&ns->ns_lock);
819 list_for_each_safe(tmp, next, &res->lr_granted) {
820 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
822 if (iter(lock, closure) == LDLM_ITER_STOP)
823 GOTO(out, rc = LDLM_ITER_STOP);
826 list_for_each_safe(tmp, next, &res->lr_converting) {
827 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
829 if (iter(lock, closure) == LDLM_ITER_STOP)
830 GOTO(out, rc = LDLM_ITER_STOP);
833 list_for_each_safe(tmp, next, &res->lr_waiting) {
834 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
836 if (iter(lock, closure) == LDLM_ITER_STOP)
837 GOTO(out, rc = LDLM_ITER_STOP);
840 l_unlock(&ns->ns_lock);
844 struct iter_helper_data {
845 ldlm_iterator_t iter;
849 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
851 struct iter_helper_data *helper = closure;
852 return helper->iter(lock, helper->closure);
855 static int ldlm_res_iter_helper(struct ldlm_resource *res, void *closure)
857 return ldlm_resource_foreach(res, ldlm_iter_helper, closure);
860 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
863 struct iter_helper_data helper = { iter: iter, closure: closure };
864 return ldlm_namespace_foreach_res(ns, ldlm_res_iter_helper, &helper);
867 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
868 ldlm_res_iterator_t iter, void *closure)
870 int i, rc = LDLM_ITER_CONTINUE;
872 l_lock(&ns->ns_lock);
873 for (i = 0; i < RES_HASH_SIZE; i++) {
874 struct list_head *tmp, *next;
875 list_for_each_safe(tmp, next, &(ns->ns_hash[i])) {
876 struct ldlm_resource *res =
877 list_entry(tmp, struct ldlm_resource, lr_hash);
879 ldlm_resource_getref(res);
880 rc = iter(res, closure);
881 ldlm_resource_putref(res);
882 if (rc == LDLM_ITER_STOP)
887 l_unlock(&ns->ns_lock);
891 /* non-blocking function to manipulate a lock whose cb_data is being put away.*/
892 void ldlm_change_cbdata(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
893 ldlm_iterator_t iter, void *data)
895 struct ldlm_resource *res;
899 CERROR("must pass in namespace");
903 res = ldlm_resource_get(ns, NULL, *res_id, 0, 0);
909 l_lock(&ns->ns_lock);
910 ldlm_resource_foreach(res, iter, data);
911 l_unlock(&ns->ns_lock);
912 ldlm_resource_putref(res);
918 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
920 struct list_head *list = closure;
922 /* we use l_pending_chain here, because it's unused on clients. */
923 list_add(&lock->l_pending_chain, list);
924 return LDLM_ITER_CONTINUE;
927 static int replay_lock_interpret(struct ptlrpc_request *req,
930 struct ldlm_lock *lock;
931 struct ldlm_reply *reply;
933 atomic_dec(&req->rq_import->imp_replay_inflight);
937 lock = req->rq_async_args.pointer_arg[0];
938 LASSERT(lock != NULL);
940 reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
941 lustre_swab_ldlm_reply);
943 CERROR("Can't unpack ldlm_reply\n");
944 GOTO (out, rc = -EPROTO);
947 memcpy(&lock->l_remote_handle, &reply->lock_handle,
948 sizeof(lock->l_remote_handle));
949 LDLM_DEBUG(lock, "replayed lock:");
950 ptlrpc_import_recovery_state_machine(req->rq_import);
955 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
957 struct ptlrpc_request *req;
958 struct ldlm_request *body;
959 struct ldlm_reply *reply;
961 int size[2] = {0, sizeof(*body)};
965 * If granted mode matches the requested mode, this lock is granted.
967 * If they differ, but we have a granted mode, then we were granted
968 * one mode and now want another: ergo, converting.
970 * If we haven't been granted anything and are on a resource list,
971 * then we're blocked/waiting.
973 * If we haven't been granted anything and we're NOT on a resource list,
974 * then we haven't got a reply yet and don't have a known disposition.
975 * This happens whenever a lock enqueue is the request that triggers
978 if (lock->l_granted_mode == lock->l_req_mode)
979 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
980 else if (lock->l_granted_mode)
981 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
982 else if (!list_empty(&lock->l_res_link))
983 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
985 flags = LDLM_FL_REPLAY;
987 req = ptlrpc_prep_req(imp, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
992 /* We're part of recovery, so don't wait for it. */
993 req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
995 body = lustre_msg_buf(req->rq_reqmsg, 1, sizeof (*body));
996 ldlm_lock2desc(lock, &body->lock_desc);
997 body->lock_flags = flags;
999 ldlm_lock2handle(lock, &body->lock_handle1);
1000 size[0] = sizeof(*reply);
1001 if (lock->l_lvb_len != 0) {
1003 size[1] = lock->l_lvb_len;
1005 req->rq_replen = lustre_msg_size(buffers, size);
1007 LDLM_DEBUG(lock, "replaying lock:");
1009 atomic_inc(&req->rq_import->imp_replay_inflight);
1010 req->rq_async_args.pointer_arg[0] = lock;
1011 req->rq_interpret_reply = replay_lock_interpret;
1012 ptlrpcd_add_req(req);
1017 int ldlm_replay_locks(struct obd_import *imp)
1019 struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
1020 struct list_head list, *pos, *next;
1021 struct ldlm_lock *lock;
1025 INIT_LIST_HEAD(&list);
1027 LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
1028 LASSERT(ns != NULL);
1030 /* ensure this doesn't fall to 0 before all have been queued */
1031 atomic_inc(&imp->imp_replay_inflight);
1033 l_lock(&ns->ns_lock);
1034 (void)ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
1036 list_for_each_safe(pos, next, &list) {
1037 lock = list_entry(pos, struct ldlm_lock, l_pending_chain);
1038 rc = replay_one_lock(imp, lock);
1040 break; /* or try to do the rest? */
1042 l_unlock(&ns->ns_lock);
1044 atomic_dec(&imp->imp_replay_inflight);