1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #define DEBUG_SUBSYSTEM S_LDLM
25 #include <liblustre.h>
28 #include <linux/lustre_dlm.h>
29 #include <linux/obd_class.h>
30 #include <linux/obd.h>
32 #include "ldlm_internal.h"
34 static void interrupted_completion_wait(void *data)
38 struct lock_wait_data {
39 struct ldlm_lock *lwd_lock;
43 int ldlm_expired_completion_wait(void *data)
45 struct lock_wait_data *lwd = data;
46 struct ldlm_lock *lock = lwd->lwd_lock;
47 struct obd_import *imp;
48 struct obd_device *obd;
50 if (lock->l_conn_export == NULL) {
51 static unsigned long next_dump = 0;
53 LDLM_ERROR(lock, "lock timed out; not entering recovery in "
54 "server code, just going back to sleep");
55 if (time_after(jiffies, next_dump)) {
56 ldlm_namespace_dump(lock->l_resource->lr_namespace);
58 portals_debug_dumplog();
59 next_dump = jiffies + 300 * HZ;
64 obd = lock->l_conn_export->exp_obd;
65 imp = obd->u.cli.cl_import;
66 ptlrpc_fail_import(imp, lwd->lwd_generation);
67 LDLM_ERROR(lock, "lock timed out, entering recovery for %s@%s",
68 imp->imp_target_uuid.uuid,
69 imp->imp_connection->c_remote_uuid.uuid);
74 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data)
76 /* XXX ALLOCATE - 160 bytes */
77 struct lock_wait_data lwd;
78 unsigned long irqflags;
79 struct obd_device *obd;
80 struct obd_import *imp = NULL;
81 struct l_wait_info lwi;
85 if (flags == LDLM_FL_WAIT_NOREPROC)
88 if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
89 LDLM_FL_BLOCK_CONV))) {
90 wake_up(&lock->l_waitq);
94 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
96 ldlm_lock_dump(D_OTHER, lock, 0);
97 ldlm_reprocess_all(lock->l_resource);
101 obd = class_exp2obd(lock->l_conn_export);
103 /* if this is a local lock, then there is no import */
105 imp = obd->u.cli.cl_import;
109 lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ, ldlm_expired_completion_wait,
110 interrupted_completion_wait, &lwd);
112 spin_lock_irqsave(&imp->imp_lock, irqflags);
113 lwd.lwd_generation = imp->imp_generation;
114 spin_unlock_irqrestore(&imp->imp_lock, irqflags);
117 /* Go to sleep until the lock is granted or cancelled. */
118 rc = l_wait_event(lock->l_waitq,
119 ((lock->l_req_mode == lock->l_granted_mode) ||
120 (lock->l_flags & LDLM_FL_CANCEL)), &lwi);
122 if (lock->l_destroyed) {
123 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
128 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
133 LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
137 static int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
138 struct ldlm_res_id res_id,
140 ldlm_policy_data_t *policy,
143 ldlm_blocking_callback blocking,
144 ldlm_completion_callback completion,
145 ldlm_glimpse_callback glimpse,
146 void *data, __u32 lvb_len,
148 struct lustre_handle *lockh)
150 struct ldlm_lock *lock;
155 CERROR("Trying to enqueue local lock in a shadow namespace\n");
159 lock = ldlm_lock_create(ns, NULL, res_id, type, mode, blocking,
160 completion, glimpse, data, lvb_len);
162 GOTO(out_nolock, err = -ENOMEM);
163 LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
165 ldlm_lock_addref_internal(lock, mode);
166 ldlm_lock2handle(lock, lockh);
167 lock->l_flags |= LDLM_FL_LOCAL;
168 lock->l_lvb_swabber = lvb_swabber;
170 memcpy(&lock->l_policy_data, policy, sizeof(*policy));
172 err = ldlm_lock_enqueue(ns, &lock, policy, flags);
177 memcpy(policy, &lock->l_policy_data, sizeof(*policy));
178 if ((*flags) & LDLM_FL_LOCK_CHANGED)
179 memcpy(&res_id, &lock->l_resource->lr_name, sizeof(res_id));
181 LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
184 if (lock->l_completion_ast)
185 lock->l_completion_ast(lock, *flags, NULL);
187 LDLM_DEBUG(lock, "client-side local enqueue END");
195 static void failed_lock_cleanup(struct ldlm_namespace *ns,
196 struct ldlm_lock *lock,
197 struct lustre_handle *lockh, int mode)
199 /* Set a flag to prevent us from sending a CANCEL (bug 407) */
200 l_lock(&ns->ns_lock);
201 lock->l_flags |= LDLM_FL_LOCAL_ONLY;
202 LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
203 l_unlock(&ns->ns_lock);
205 ldlm_lock_decref_and_cancel(lockh, mode);
208 int ldlm_cli_enqueue(struct obd_export *exp,
209 struct ptlrpc_request *req,
210 struct ldlm_namespace *ns,
211 struct ldlm_res_id res_id,
213 ldlm_policy_data_t *policy,
216 ldlm_blocking_callback blocking,
217 ldlm_completion_callback completion,
218 ldlm_glimpse_callback glimpse,
223 struct lustre_handle *lockh)
225 struct ldlm_lock *lock;
226 struct ldlm_request *body;
227 struct ldlm_reply *reply;
228 int rc, size[2] = {sizeof(*body), lvb_len}, req_passed_in = 1;
229 int is_replay = *flags & LDLM_FL_REPLAY;
234 rc = ldlm_cli_enqueue_local(ns, res_id, type, policy, mode,
235 flags, blocking, completion,
236 glimpse, data, lvb_len, lvb_swabber,
241 /* If we're replaying this lock, just check some invariants.
242 * If we're creating a new lock, get everything all setup nice. */
244 lock = ldlm_handle2lock(lockh);
245 LDLM_DEBUG(lock, "client-side enqueue START");
246 LASSERT(exp == lock->l_conn_export);
248 lock = ldlm_lock_create(ns, NULL, res_id, type, mode, blocking,
249 completion, glimpse, data, lvb_len);
251 GOTO(out_nolock, rc = -ENOMEM);
252 /* for the local lock, add the reference */
253 ldlm_lock_addref_internal(lock, mode);
254 ldlm_lock2handle(lock, lockh);
255 lock->l_lvb_swabber = lvb_swabber;
257 memcpy(&lock->l_policy_data, policy, sizeof(*policy));
258 LDLM_DEBUG(lock, "client-side enqueue START");
262 req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, 1,
265 GOTO(out_lock, rc = -ENOMEM);
267 } else if (req->rq_reqmsg->buflens[0] != sizeof(*body))
270 /* Dump lock data into the request buffer */
271 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
272 ldlm_lock2desc(lock, &body->lock_desc);
273 body->lock_flags = *flags;
275 memcpy(&body->lock_handle1, lockh, sizeof(*lockh));
277 /* Continue as normal. */
278 if (!req_passed_in) {
282 size[0] = sizeof(*reply);
283 req->rq_replen = lustre_msg_size(buffers, size);
285 lock->l_conn_export = exp;
286 lock->l_export = NULL;
287 lock->l_blocking_ast = blocking;
289 LDLM_DEBUG(lock, "sending request");
290 rc = ptlrpc_queue_wait(req);
292 if (rc != ELDLM_OK) {
294 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
295 rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
296 if (rc == ELDLM_LOCK_ABORTED) {
297 /* Before we return, swab the reply */
298 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
299 lustre_swab_ldlm_reply);
301 CERROR("Can't unpack ldlm_reply\n");
306 tmplvb = lustre_swab_repbuf(req, 1, lvb_len,
309 GOTO(out_lock, rc = -EPROTO);
311 memcpy(lvb, tmplvb, lvb_len);
317 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
318 lustre_swab_ldlm_reply);
320 CERROR("Can't unpack ldlm_reply\n");
321 GOTO(out_lock, rc = -EPROTO);
324 memcpy(&lock->l_remote_handle, &reply->lock_handle,
325 sizeof(lock->l_remote_handle));
326 *flags = reply->lock_flags;
328 CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: 0x%x\n",
329 lock, reply->lock_handle.cookie, *flags);
330 if (type == LDLM_EXTENT) {
331 CDEBUG(D_INFO, "requested extent: "LPU64" -> "LPU64", got "
332 "extent "LPU64" -> "LPU64"\n",
333 body->lock_desc.l_policy_data.l_extent.start,
334 body->lock_desc.l_policy_data.l_extent.end,
335 reply->lock_desc.l_policy_data.l_extent.start,
336 reply->lock_desc.l_policy_data.l_extent.end);
339 memcpy(&lock->l_policy_data, &reply->lock_desc.l_policy_data,
340 sizeof(reply->lock_desc.l_policy_data));
342 /* If enqueue returned a blocked lock but the completion handler has
343 * already run, then it fixed up the resource and we don't need to do it
345 if ((*flags) & LDLM_FL_LOCK_CHANGED) {
346 int newmode = reply->lock_desc.l_req_mode;
348 if (newmode && newmode != lock->l_req_mode) {
349 LDLM_DEBUG(lock, "server returned different mode %s",
350 ldlm_lockname[newmode]);
351 lock->l_req_mode = newmode;
354 if (reply->lock_desc.l_resource.lr_name.name[0] !=
355 lock->l_resource->lr_name.name[0]) {
356 CDEBUG(D_INFO, "remote intent success, locking %ld "
358 (long)reply->lock_desc.l_resource.lr_name.name[0],
359 (long)lock->l_resource->lr_name.name[0]);
361 ldlm_lock_change_resource(ns, lock,
362 reply->lock_desc.l_resource.lr_name);
363 if (lock->l_resource == NULL) {
365 GOTO(out_lock, rc = -ENOMEM);
367 LDLM_DEBUG(lock, "client-side enqueue, new resource");
370 if ((*flags) & LDLM_FL_AST_SENT) {
371 l_lock(&ns->ns_lock);
372 lock->l_flags |= LDLM_FL_CBPENDING;
373 l_unlock(&ns->ns_lock);
374 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
379 tmplvb = lustre_swab_repbuf(req, 1, lvb_len, lvb_swabber);
381 GOTO(out_lock, rc = -EPROTO);
382 memcpy(lock->l_lvb_data, tmplvb, lvb_len);
386 rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
387 if (lock->l_completion_ast != NULL) {
388 int err = lock->l_completion_ast(lock, *flags, NULL);
394 if (lvb_len && lvb != NULL) {
395 /* Copy the LVB here, and not earlier, because the completion
396 * AST (if any) can override what we got in the reply */
397 memcpy(lvb, lock->l_lvb_data, lvb_len);
400 LDLM_DEBUG(lock, "client-side enqueue END");
404 failed_lock_cleanup(ns, lock, lockh, mode);
405 if (!req_passed_in && req != NULL)
406 ptlrpc_req_finished(req);
412 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
416 if (lock->l_resource->lr_namespace->ns_client) {
417 CERROR("Trying to cancel local lock\n");
420 LDLM_DEBUG(lock, "client-side local convert");
422 ldlm_lock_convert(lock, new_mode, flags);
423 ldlm_reprocess_all(lock->l_resource);
425 LDLM_DEBUG(lock, "client-side local convert handler END");
430 /* FIXME: one of ldlm_cli_convert or the server side should reject attempted
431 * conversion of locks which are on the waiting or converting queue */
432 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
434 struct ldlm_request *body;
435 struct ldlm_reply *reply;
436 struct ldlm_lock *lock;
437 struct ldlm_resource *res;
438 struct ptlrpc_request *req;
439 int rc, size = sizeof(*body);
442 lock = ldlm_handle2lock(lockh);
449 if (lock->l_conn_export == NULL)
450 RETURN(ldlm_cli_convert_local(lock, new_mode, flags));
452 LDLM_DEBUG(lock, "client-side convert");
454 req = ptlrpc_prep_req(class_exp2cliimp(lock->l_conn_export),
455 LDLM_CONVERT, 1, &size, NULL);
457 GOTO(out, rc = -ENOMEM);
459 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
460 memcpy(&body->lock_handle1, &lock->l_remote_handle,
461 sizeof(body->lock_handle1));
463 body->lock_desc.l_req_mode = new_mode;
464 body->lock_flags = *flags;
466 size = sizeof(*reply);
467 req->rq_replen = lustre_msg_size(1, &size);
469 rc = ptlrpc_queue_wait(req);
473 reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
474 lustre_swab_ldlm_reply);
476 CERROR ("Can't unpack ldlm_reply\n");
477 GOTO (out, rc = -EPROTO);
480 res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
482 ldlm_reprocess_all(res);
483 /* Go to sleep until the lock is granted. */
484 /* FIXME: or cancelled. */
485 if (lock->l_completion_ast)
486 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC, NULL);
490 ptlrpc_req_finished(req);
494 int ldlm_cli_cancel(struct lustre_handle *lockh)
496 struct ptlrpc_request *req;
497 struct ldlm_lock *lock;
498 struct ldlm_request *body;
499 int rc = 0, size = sizeof(*body);
502 /* concurrent cancels on the same handle can happen */
503 lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);
507 if (lock->l_conn_export) {
509 struct obd_import *imp;
511 LDLM_DEBUG(lock, "client-side cancel");
512 /* Set this flag to prevent others from getting new references*/
513 l_lock(&lock->l_resource->lr_namespace->ns_lock);
514 lock->l_flags |= LDLM_FL_CBPENDING;
515 local_only = (lock->l_flags & LDLM_FL_LOCAL_ONLY);
516 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
517 ldlm_cancel_callback(lock);
520 CDEBUG(D_INFO, "not sending request (at caller's "
526 imp = class_exp2cliimp(lock->l_conn_export);
527 if (imp == NULL || imp->imp_invalid) {
528 CDEBUG(D_HA, "skipping cancel on invalid import %p\n",
533 req = ptlrpc_prep_req(imp, LDLM_CANCEL, 1, &size, NULL);
535 GOTO(out, rc = -ENOMEM);
536 req->rq_no_resend = 1;
538 /* XXX FIXME bug 249 */
539 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
540 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
542 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
543 memcpy(&body->lock_handle1, &lock->l_remote_handle,
544 sizeof(body->lock_handle1));
546 req->rq_replen = lustre_msg_size(0, NULL);
548 rc = ptlrpc_queue_wait(req);
551 CERROR("client/server (nid "LPU64") out of sync--not "
553 req->rq_import->imp_connection->c_peer.peer_nid);
554 } else if (rc == -ETIMEDOUT) {
555 ptlrpc_req_finished(req);
557 } else if (rc != ELDLM_OK) {
558 CERROR("Got rc %d from cancel RPC: canceling "
562 ptlrpc_req_finished(req);
564 ldlm_lock_cancel(lock);
566 if (lock->l_resource->lr_namespace->ns_client) {
567 LDLM_ERROR(lock, "Trying to cancel local lock\n");
570 LDLM_DEBUG(lock, "client-side local cancel");
571 ldlm_lock_cancel(lock);
572 ldlm_reprocess_all(lock->l_resource);
573 LDLM_DEBUG(lock, "client-side local cancel handler END");
582 int ldlm_cancel_lru(struct ldlm_namespace *ns)
584 struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
586 struct ldlm_ast_work *w;
589 l_lock(&ns->ns_lock);
590 count = ns->ns_nr_unused - ns->ns_max_unused;
593 l_unlock(&ns->ns_lock);
597 list_for_each_safe(tmp, next, &ns->ns_unused_list) {
598 struct ldlm_lock *lock;
599 lock = list_entry(tmp, struct ldlm_lock, l_lru);
601 LASSERT(!lock->l_readers && !lock->l_writers);
603 /* Setting the CBPENDING flag is a little misleading, but
604 * prevents an important race; namely, once CBPENDING is set,
605 * the lock can accumulate no more readers/writers. Since
606 * readers and writers are already zero here, ldlm_lock_decref
607 * won't see this flag and call l_blocking_ast */
608 lock->l_flags |= LDLM_FL_CBPENDING;
610 OBD_ALLOC(w, sizeof(*w));
613 w->w_lock = LDLM_LOCK_GET(lock);
614 list_add(&w->w_list, &list);
615 ldlm_lock_remove_from_lru(lock);
620 l_unlock(&ns->ns_lock);
622 list_for_each_safe(tmp, next, &list) {
623 struct lustre_handle lockh;
625 w = list_entry(tmp, struct ldlm_ast_work, w_list);
627 ldlm_lock2handle(w->w_lock, &lockh);
628 rc = ldlm_cli_cancel(&lockh);
630 CDEBUG(D_INFO, "ldlm_cli_cancel: %d\n", rc);
632 list_del(&w->w_list);
633 LDLM_LOCK_PUT(w->w_lock);
634 OBD_FREE(w, sizeof(*w));
640 static int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
641 struct ldlm_res_id res_id, int flags,
644 struct ldlm_resource *res;
645 struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
646 struct ldlm_ast_work *w;
649 res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
651 /* This is not a problem. */
652 CDEBUG(D_INFO, "No resource "LPU64"\n", res_id.name[0]);
656 l_lock(&ns->ns_lock);
657 list_for_each(tmp, &res->lr_granted) {
658 struct ldlm_lock *lock;
659 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
661 if (opaque != NULL && lock->l_ast_data != opaque) {
662 LDLM_ERROR(lock, "data %p doesn't match opaque %p",
663 lock->l_ast_data, opaque);
668 if (lock->l_readers || lock->l_writers) {
669 if (flags & LDLM_FL_WARN) {
670 LDLM_ERROR(lock, "lock in use");
676 /* See CBPENDING comment in ldlm_cancel_lru */
677 lock->l_flags |= LDLM_FL_CBPENDING;
679 OBD_ALLOC(w, sizeof(*w));
682 w->w_lock = LDLM_LOCK_GET(lock);
684 /* Prevent the cancel callback from being called by setting
685 * LDLM_FL_CANCEL in the lock. Very sneaky. -p */
686 if (flags & LDLM_FL_NO_CALLBACK)
687 w->w_lock->l_flags |= LDLM_FL_CANCEL;
689 list_add(&w->w_list, &list);
691 l_unlock(&ns->ns_lock);
693 list_for_each_safe(tmp, next, &list) {
694 struct lustre_handle lockh;
696 w = list_entry(tmp, struct ldlm_ast_work, w_list);
698 if (flags & LDLM_FL_LOCAL_ONLY) {
699 ldlm_lock_cancel(w->w_lock);
701 ldlm_lock2handle(w->w_lock, &lockh);
702 rc = ldlm_cli_cancel(&lockh);
704 CERROR("ldlm_cli_cancel: %d\n", rc);
706 list_del(&w->w_list);
707 LDLM_LOCK_PUT(w->w_lock);
708 OBD_FREE(w, sizeof(*w));
711 ldlm_resource_putref(res);
716 /* Cancel all locks on a namespace (or a specific resource, if given)
717 * that have 0 readers/writers.
719 * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
720 * to notify the server.
721 * If flags & LDLM_FL_NO_CALLBACK, don't run the cancel callback.
722 * If flags & LDLM_FL_WARN, print a warning if some locks are still in use. */
723 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
724 struct ldlm_res_id *res_id, int flags, void *opaque)
733 RETURN(ldlm_cli_cancel_unused_resource(ns, *res_id, flags,
736 l_lock(&ns->ns_lock);
737 for (i = 0; i < RES_HASH_SIZE; i++) {
738 struct list_head *tmp, *pos;
739 list_for_each_safe(tmp, pos, &(ns->ns_hash[i])) {
741 struct ldlm_resource *res;
742 res = list_entry(tmp, struct ldlm_resource, lr_hash);
743 ldlm_resource_getref(res);
745 rc = ldlm_cli_cancel_unused_resource(ns, res->lr_name,
749 CERROR("cancel_unused_res ("LPU64"): %d\n",
750 res->lr_name.name[0], rc);
751 ldlm_resource_putref(res);
754 l_unlock(&ns->ns_lock);
759 /* Lock iterators. */
761 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
764 struct list_head *tmp, *next;
765 struct ldlm_lock *lock;
766 int rc = LDLM_ITER_CONTINUE;
767 struct ldlm_namespace *ns = res->lr_namespace;
772 RETURN(LDLM_ITER_CONTINUE);
774 l_lock(&ns->ns_lock);
775 list_for_each_safe(tmp, next, &res->lr_granted) {
776 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
778 if (iter(lock, closure) == LDLM_ITER_STOP)
779 GOTO(out, rc = LDLM_ITER_STOP);
782 list_for_each_safe(tmp, next, &res->lr_converting) {
783 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
785 if (iter(lock, closure) == LDLM_ITER_STOP)
786 GOTO(out, rc = LDLM_ITER_STOP);
789 list_for_each_safe(tmp, next, &res->lr_waiting) {
790 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
792 if (iter(lock, closure) == LDLM_ITER_STOP)
793 GOTO(out, rc = LDLM_ITER_STOP);
796 l_unlock(&ns->ns_lock);
800 struct iter_helper_data {
801 ldlm_iterator_t iter;
805 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
807 struct iter_helper_data *helper = closure;
808 return helper->iter(lock, helper->closure);
811 static int ldlm_res_iter_helper(struct ldlm_resource *res, void *closure)
813 return ldlm_resource_foreach(res, ldlm_iter_helper, closure);
816 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
819 struct iter_helper_data helper = { iter: iter, closure: closure };
820 return ldlm_namespace_foreach_res(ns, ldlm_res_iter_helper, &helper);
823 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
824 ldlm_res_iterator_t iter, void *closure)
826 int i, rc = LDLM_ITER_CONTINUE;
828 l_lock(&ns->ns_lock);
829 for (i = 0; i < RES_HASH_SIZE; i++) {
830 struct list_head *tmp, *next;
831 list_for_each_safe(tmp, next, &(ns->ns_hash[i])) {
832 struct ldlm_resource *res =
833 list_entry(tmp, struct ldlm_resource, lr_hash);
835 ldlm_resource_getref(res);
836 rc = iter(res, closure);
837 ldlm_resource_putref(res);
838 if (rc == LDLM_ITER_STOP)
843 l_unlock(&ns->ns_lock);
847 /* non-blocking function to manipulate a lock whose cb_data is being put away.*/
848 void ldlm_change_cbdata(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
849 ldlm_iterator_t iter, void *data)
851 struct ldlm_resource *res;
855 CERROR("must pass in namespace");
859 res = ldlm_resource_get(ns, NULL, *res_id, 0, 0);
865 l_lock(&ns->ns_lock);
866 ldlm_resource_foreach(res, iter, data);
867 l_unlock(&ns->ns_lock);
868 ldlm_resource_putref(res);
874 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
876 struct list_head *list = closure;
878 /* we use l_pending_chain here, because it's unused on clients. */
879 list_add(&lock->l_pending_chain, list);
880 return LDLM_ITER_CONTINUE;
883 static int replay_lock_interpret(struct ptlrpc_request *req,
886 struct ldlm_lock *lock;
887 struct ldlm_reply *reply;
889 atomic_dec(&req->rq_import->imp_replay_inflight);
893 lock = req->rq_async_args.pointer_arg[0];
894 LASSERT(lock != NULL);
896 reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
897 lustre_swab_ldlm_reply);
899 CERROR("Can't unpack ldlm_reply\n");
900 GOTO (out, rc = -EPROTO);
903 memcpy(&lock->l_remote_handle, &reply->lock_handle,
904 sizeof(lock->l_remote_handle));
905 LDLM_DEBUG(lock, "replayed lock:");
906 ptlrpc_import_recovery_state_machine(req->rq_import);
911 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
913 struct ptlrpc_request *req;
914 struct ldlm_request *body;
915 struct ldlm_reply *reply;
920 * If granted mode matches the requested mode, this lock is granted.
922 * If they differ, but we have a granted mode, then we were granted
923 * one mode and now want another: ergo, converting.
925 * If we haven't been granted anything and are on a resource list,
926 * then we're blocked/waiting.
928 * If we haven't been granted anything and we're NOT on a resource list,
929 * then we haven't got a reply yet and don't have a known disposition.
930 * This happens whenever a lock enqueue is the request that triggers
933 if (lock->l_granted_mode == lock->l_req_mode)
934 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
935 else if (lock->l_granted_mode)
936 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
937 else if (!list_empty(&lock->l_res_link))
938 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
940 flags = LDLM_FL_REPLAY;
942 size = sizeof(*body);
943 req = ptlrpc_prep_req(imp, LDLM_ENQUEUE, 1, &size, NULL);
947 /* We're part of recovery, so don't wait for it. */
948 req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
950 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
951 ldlm_lock2desc(lock, &body->lock_desc);
952 body->lock_flags = flags;
954 ldlm_lock2handle(lock, &body->lock_handle1);
955 size = sizeof(*reply);
956 req->rq_replen = lustre_msg_size(1, &size);
958 LDLM_DEBUG(lock, "replaying lock:");
960 atomic_inc(&req->rq_import->imp_replay_inflight);
961 req->rq_async_args.pointer_arg[0] = lock;
962 req->rq_interpret_reply = replay_lock_interpret;
963 ptlrpcd_add_req(req);
968 int ldlm_replay_locks(struct obd_import *imp)
970 struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
971 struct list_head list, *pos, *next;
972 struct ldlm_lock *lock;
976 INIT_LIST_HEAD(&list);
978 LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
980 /* ensure this doesn't fall to 0 before all have been queued */
981 atomic_inc(&imp->imp_replay_inflight);
983 l_lock(&ns->ns_lock);
984 (void)ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
986 list_for_each_safe(pos, next, &list) {
987 lock = list_entry(pos, struct ldlm_lock, l_pending_chain);
988 rc = replay_one_lock(imp, lock);
990 break; /* or try to do the rest? */
992 l_unlock(&ns->ns_lock);
994 atomic_dec(&imp->imp_replay_inflight);