1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #define DEBUG_SUBSYSTEM S_LDLM
25 #include <liblustre.h>
28 #include <linux/lustre_dlm.h>
29 #include <linux/obd_class.h>
30 #include <linux/obd.h>
32 #include "ldlm_internal.h"
34 static void interrupted_completion_wait(void *data)
38 struct lock_wait_data {
39 struct ldlm_lock *lwd_lock;
43 int ldlm_expired_completion_wait(void *data)
45 struct lock_wait_data *lwd = data;
46 struct ldlm_lock *lock = lwd->lwd_lock;
47 struct obd_import *imp;
48 struct obd_device *obd;
50 if (lock->l_conn_export == NULL) {
51 static unsigned long next_dump = 0;
53 LDLM_ERROR(lock, "lock timed out; not entering recovery in "
54 "server code, just going back to sleep");
55 if (time_after(jiffies, next_dump)) {
56 ldlm_namespace_dump(lock->l_resource->lr_namespace);
58 portals_debug_dumplog();
59 next_dump = jiffies + 300 * HZ;
64 obd = lock->l_conn_export->exp_obd;
65 imp = obd->u.cli.cl_import;
66 ptlrpc_fail_import(imp, lwd->lwd_generation);
67 LDLM_ERROR(lock, "lock timed out, entering recovery for %s@%s",
68 imp->imp_target_uuid.uuid,
69 imp->imp_connection->c_remote_uuid.uuid);
74 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data)
76 /* XXX ALLOCATE - 160 bytes */
77 struct lock_wait_data lwd;
78 unsigned long irqflags;
79 struct obd_device *obd;
80 struct obd_import *imp = NULL;
81 struct l_wait_info lwi;
85 if (flags == LDLM_FL_WAIT_NOREPROC)
88 if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
89 LDLM_FL_BLOCK_CONV))) {
90 wake_up(&lock->l_waitq);
94 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
96 ldlm_lock_dump(D_OTHER, lock, 0);
97 ldlm_reprocess_all(lock->l_resource);
101 obd = class_exp2obd(lock->l_conn_export);
103 /* if this is a local lock, then there is no import */
105 imp = obd->u.cli.cl_import;
109 if (flags & LDLM_FL_NO_TIMEOUT) {
110 LDLM_DEBUG(lock, "waiting indefinitely for group lock\n");
111 lwi = LWI_INTR(interrupted_completion_wait, &lwd);
113 lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ,
114 ldlm_expired_completion_wait,
115 interrupted_completion_wait, &lwd);
119 spin_lock_irqsave(&imp->imp_lock, irqflags);
120 lwd.lwd_generation = imp->imp_generation;
121 spin_unlock_irqrestore(&imp->imp_lock, irqflags);
124 /* Go to sleep until the lock is granted or cancelled. */
125 rc = l_wait_event(lock->l_waitq,
126 ((lock->l_req_mode == lock->l_granted_mode) ||
127 (lock->l_flags & LDLM_FL_FAILED)), &lwi);
129 if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
130 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
135 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
140 LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
144 static int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
145 struct ldlm_res_id res_id,
147 ldlm_policy_data_t *policy,
150 ldlm_blocking_callback blocking,
151 ldlm_completion_callback completion,
152 ldlm_glimpse_callback glimpse,
153 void *data, __u32 lvb_len,
155 struct lustre_handle *lockh)
157 struct ldlm_lock *lock;
162 CERROR("Trying to enqueue local lock in a shadow namespace\n");
166 lock = ldlm_lock_create(ns, NULL, res_id, type, mode, blocking,
167 completion, glimpse, data, lvb_len);
169 GOTO(out_nolock, err = -ENOMEM);
170 LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
172 ldlm_lock_addref_internal(lock, mode);
173 ldlm_lock2handle(lock, lockh);
174 lock->l_flags |= LDLM_FL_LOCAL;
175 lock->l_lvb_swabber = lvb_swabber;
177 memcpy(&lock->l_policy_data, policy, sizeof(*policy));
178 if (type == LDLM_EXTENT)
179 memcpy(&lock->l_req_extent, &policy->l_extent,
180 sizeof(policy->l_extent));
182 err = ldlm_lock_enqueue(ns, &lock, policy, flags);
187 memcpy(policy, &lock->l_policy_data, sizeof(*policy));
188 if ((*flags) & LDLM_FL_LOCK_CHANGED)
189 memcpy(&res_id, &lock->l_resource->lr_name, sizeof(res_id));
191 LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
194 if (lock->l_completion_ast)
195 lock->l_completion_ast(lock, *flags, NULL);
197 LDLM_DEBUG(lock, "client-side local enqueue END");
205 static void failed_lock_cleanup(struct ldlm_namespace *ns,
206 struct ldlm_lock *lock,
207 struct lustre_handle *lockh, int mode)
209 /* Set a flag to prevent us from sending a CANCEL (bug 407) */
210 l_lock(&ns->ns_lock);
211 lock->l_flags |= LDLM_FL_LOCAL_ONLY;
212 LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
213 l_unlock(&ns->ns_lock);
215 ldlm_lock_decref_and_cancel(lockh, mode);
218 int ldlm_cli_enqueue(struct obd_export *exp,
219 struct ptlrpc_request *req,
220 struct ldlm_namespace *ns,
221 struct ldlm_res_id res_id,
223 ldlm_policy_data_t *policy,
226 ldlm_blocking_callback blocking,
227 ldlm_completion_callback completion,
228 ldlm_glimpse_callback glimpse,
233 struct lustre_handle *lockh)
235 struct ldlm_lock *lock;
236 struct ldlm_request *body;
237 struct ldlm_reply *reply;
238 int rc, size[2] = {sizeof(*body), lvb_len}, req_passed_in = 1;
239 int is_replay = *flags & LDLM_FL_REPLAY;
244 rc = ldlm_cli_enqueue_local(ns, res_id, type, policy, mode,
245 flags, blocking, completion,
246 glimpse, data, lvb_len, lvb_swabber,
251 /* If we're replaying this lock, just check some invariants.
252 * If we're creating a new lock, get everything all setup nice. */
254 lock = ldlm_handle2lock(lockh);
255 LDLM_DEBUG(lock, "client-side enqueue START");
256 LASSERT(exp == lock->l_conn_export);
258 lock = ldlm_lock_create(ns, NULL, res_id, type, mode, blocking,
259 completion, glimpse, data, lvb_len);
261 GOTO(out_nolock, rc = -ENOMEM);
262 /* for the local lock, add the reference */
263 ldlm_lock_addref_internal(lock, mode);
264 ldlm_lock2handle(lock, lockh);
265 lock->l_lvb_swabber = lvb_swabber;
267 memcpy(&lock->l_policy_data, policy, sizeof(*policy));
268 if (type == LDLM_EXTENT)
269 memcpy(&lock->l_req_extent, &policy->l_extent,
270 sizeof(policy->l_extent));
271 LDLM_DEBUG(lock, "client-side enqueue START");
275 req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, 1,
278 GOTO(out_lock, rc = -ENOMEM);
280 } else if (req->rq_reqmsg->buflens[0] != sizeof(*body))
283 /* Dump lock data into the request buffer */
284 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
285 ldlm_lock2desc(lock, &body->lock_desc);
286 body->lock_flags = *flags;
288 memcpy(&body->lock_handle1, lockh, sizeof(*lockh));
290 /* Continue as normal. */
291 if (!req_passed_in) {
295 size[0] = sizeof(*reply);
296 req->rq_replen = lustre_msg_size(buffers, size);
298 lock->l_conn_export = exp;
299 lock->l_export = NULL;
300 lock->l_blocking_ast = blocking;
302 LDLM_DEBUG(lock, "sending request");
303 rc = ptlrpc_queue_wait(req);
305 if (rc != ELDLM_OK) {
307 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
308 rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
309 if (rc == ELDLM_LOCK_ABORTED) {
310 /* Before we return, swab the reply */
311 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
312 lustre_swab_ldlm_reply);
314 CERROR("Can't unpack ldlm_reply\n");
319 tmplvb = lustre_swab_repbuf(req, 1, lvb_len,
322 GOTO(out_lock, rc = -EPROTO);
324 memcpy(lvb, tmplvb, lvb_len);
330 reply = lustre_swab_repbuf(req, 0, sizeof(*reply),
331 lustre_swab_ldlm_reply);
333 CERROR("Can't unpack ldlm_reply\n");
334 GOTO(out_lock, rc = -EPROTO);
337 memcpy(&lock->l_remote_handle, &reply->lock_handle,
338 sizeof(lock->l_remote_handle));
339 *flags = reply->lock_flags;
341 CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: 0x%x\n",
342 lock, reply->lock_handle.cookie, *flags);
344 /* If enqueue returned a blocked lock but the completion handler has
345 * already run, then it fixed up the resource and we don't need to do it
347 if ((*flags) & LDLM_FL_LOCK_CHANGED) {
348 int newmode = reply->lock_desc.l_req_mode;
350 if (newmode && newmode != lock->l_req_mode) {
351 LDLM_DEBUG(lock, "server returned different mode %s",
352 ldlm_lockname[newmode]);
353 lock->l_req_mode = newmode;
356 if (reply->lock_desc.l_resource.lr_name.name[0] !=
357 lock->l_resource->lr_name.name[0]) {
358 CDEBUG(D_INFO, "remote intent success, locking %ld "
360 (long)reply->lock_desc.l_resource.lr_name.name[0],
361 (long)lock->l_resource->lr_name.name[0]);
363 ldlm_lock_change_resource(ns, lock,
364 reply->lock_desc.l_resource.lr_name);
365 if (lock->l_resource == NULL) {
367 GOTO(out_lock, rc = -ENOMEM);
369 LDLM_DEBUG(lock, "client-side enqueue, new resource");
372 memcpy(&lock->l_policy_data,
373 &reply->lock_desc.l_policy_data,
374 sizeof(reply->lock_desc.l_policy_data));
375 if (type != LDLM_PLAIN)
376 LDLM_DEBUG(lock,"client-side enqueue, new policy data");
379 if ((*flags) & LDLM_FL_AST_SENT) {
380 l_lock(&ns->ns_lock);
381 lock->l_flags |= LDLM_FL_CBPENDING;
382 l_unlock(&ns->ns_lock);
383 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
386 /* If the lock has already been granted by a completion AST, don't
387 * clobber the LVB with an older one. */
388 if (lvb_len && (lock->l_req_mode != lock->l_granted_mode)) {
390 tmplvb = lustre_swab_repbuf(req, 1, lvb_len, lvb_swabber);
392 GOTO(out_lock, rc = -EPROTO);
393 memcpy(lock->l_lvb_data, tmplvb, lvb_len);
397 rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
398 if (lock->l_completion_ast != NULL) {
399 int err = lock->l_completion_ast(lock, *flags, NULL);
405 if (lvb_len && lvb != NULL) {
406 /* Copy the LVB here, and not earlier, because the completion
407 * AST (if any) can override what we got in the reply */
408 memcpy(lvb, lock->l_lvb_data, lvb_len);
411 LDLM_DEBUG(lock, "client-side enqueue END");
415 failed_lock_cleanup(ns, lock, lockh, mode);
416 if (!req_passed_in && req != NULL)
417 ptlrpc_req_finished(req);
423 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
427 if (lock->l_resource->lr_namespace->ns_client) {
428 CERROR("Trying to cancel local lock\n");
431 LDLM_DEBUG(lock, "client-side local convert");
433 ldlm_lock_convert(lock, new_mode, flags);
434 ldlm_reprocess_all(lock->l_resource);
436 LDLM_DEBUG(lock, "client-side local convert handler END");
441 /* FIXME: one of ldlm_cli_convert or the server side should reject attempted
442 * conversion of locks which are on the waiting or converting queue */
443 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
445 struct ldlm_request *body;
446 struct ldlm_reply *reply;
447 struct ldlm_lock *lock;
448 struct ldlm_resource *res;
449 struct ptlrpc_request *req;
450 int rc, size = sizeof(*body);
453 lock = ldlm_handle2lock(lockh);
460 if (lock->l_conn_export == NULL)
461 RETURN(ldlm_cli_convert_local(lock, new_mode, flags));
463 LDLM_DEBUG(lock, "client-side convert");
465 req = ptlrpc_prep_req(class_exp2cliimp(lock->l_conn_export),
466 LDLM_CONVERT, 1, &size, NULL);
468 GOTO(out, rc = -ENOMEM);
470 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
471 memcpy(&body->lock_handle1, &lock->l_remote_handle,
472 sizeof(body->lock_handle1));
474 body->lock_desc.l_req_mode = new_mode;
475 body->lock_flags = *flags;
477 size = sizeof(*reply);
478 req->rq_replen = lustre_msg_size(1, &size);
480 rc = ptlrpc_queue_wait(req);
484 reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
485 lustre_swab_ldlm_reply);
487 CERROR ("Can't unpack ldlm_reply\n");
488 GOTO (out, rc = -EPROTO);
491 res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
493 ldlm_reprocess_all(res);
494 /* Go to sleep until the lock is granted. */
495 /* FIXME: or cancelled. */
496 if (lock->l_completion_ast)
497 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC, NULL);
501 ptlrpc_req_finished(req);
505 int ldlm_cli_cancel(struct lustre_handle *lockh)
507 struct ptlrpc_request *req;
508 struct ldlm_lock *lock;
509 struct ldlm_request *body;
510 int rc = 0, size = sizeof(*body);
513 /* concurrent cancels on the same handle can happen */
514 lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);
518 if (lock->l_conn_export) {
520 struct obd_import *imp;
522 LDLM_DEBUG(lock, "client-side cancel");
523 /* Set this flag to prevent others from getting new references*/
524 l_lock(&lock->l_resource->lr_namespace->ns_lock);
525 lock->l_flags |= LDLM_FL_CBPENDING;
526 local_only = (lock->l_flags & LDLM_FL_LOCAL_ONLY);
527 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
528 ldlm_cancel_callback(lock);
531 CDEBUG(D_INFO, "not sending request (at caller's "
537 imp = class_exp2cliimp(lock->l_conn_export);
538 if (imp == NULL || imp->imp_invalid) {
539 CDEBUG(D_HA, "skipping cancel on invalid import %p\n",
544 req = ptlrpc_prep_req(imp, LDLM_CANCEL, 1, &size, NULL);
546 GOTO(out, rc = -ENOMEM);
547 req->rq_no_resend = 1;
549 /* XXX FIXME bug 249 */
550 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
551 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
553 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
554 memcpy(&body->lock_handle1, &lock->l_remote_handle,
555 sizeof(body->lock_handle1));
557 req->rq_replen = lustre_msg_size(0, NULL);
559 rc = ptlrpc_queue_wait(req);
562 char str[PTL_NALFMT_SIZE];
563 CERROR("client/server (nid %s) out of sync"
565 ptlrpc_peernid2str(&req->rq_import->
566 imp_connection->c_peer, str));
567 } else if (rc == -ETIMEDOUT) {
568 ptlrpc_req_finished(req);
570 } else if (rc != ELDLM_OK) {
571 CERROR("Got rc %d from cancel RPC: canceling "
575 ptlrpc_req_finished(req);
577 ldlm_lock_cancel(lock);
579 if (lock->l_resource->lr_namespace->ns_client) {
580 LDLM_ERROR(lock, "Trying to cancel local lock\n");
583 LDLM_DEBUG(lock, "client-side local cancel");
584 ldlm_lock_cancel(lock);
585 ldlm_reprocess_all(lock->l_resource);
586 LDLM_DEBUG(lock, "client-side local cancel handler END");
595 /* when called with LDLM_ASYNC the blocking callback will be handled
596 * in a thread and this function will return after the thread has been
597 * asked to call the callback. when called with LDLM_SYNC the blocking
598 * callback will be performed in this function. */
599 int ldlm_cancel_lru(struct ldlm_namespace *ns, ldlm_sync_t sync)
601 struct list_head *tmp, *next;
602 struct ldlm_lock *lock;
607 l_lock(&ns->ns_lock);
608 count = ns->ns_nr_unused - ns->ns_max_unused;
611 l_unlock(&ns->ns_lock);
615 list_for_each_safe(tmp, next, &ns->ns_unused_list) {
617 lock = list_entry(tmp, struct ldlm_lock, l_lru);
619 LASSERT(!lock->l_readers && !lock->l_writers);
621 /* Setting the CBPENDING flag is a little misleading, but
622 * prevents an important race; namely, once CBPENDING is set,
623 * the lock can accumulate no more readers/writers. Since
624 * readers and writers are already zero here, ldlm_lock_decref
625 * won't see this flag and call l_blocking_ast */
626 lock->l_flags |= LDLM_FL_CBPENDING;
628 LDLM_LOCK_GET(lock); /* dropped by bl thread */
629 ldlm_lock_remove_from_lru(lock);
630 if (sync == LDLM_ASYNC)
631 ldlm_bl_to_thread(ns, NULL, lock);
633 list_add(&lock->l_lru, &cblist);
638 l_unlock(&ns->ns_lock);
640 list_for_each_safe(tmp, next, &cblist) {
641 lock = list_entry(tmp, struct ldlm_lock, l_lru);
642 list_del_init(&lock->l_lru);
643 ldlm_handle_bl_callback(ns, NULL, lock);
648 static int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
649 struct ldlm_res_id res_id, int flags,
652 struct ldlm_resource *res;
653 struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
654 struct ldlm_ast_work *w;
657 res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
659 /* This is not a problem. */
660 CDEBUG(D_INFO, "No resource "LPU64"\n", res_id.name[0]);
664 l_lock(&ns->ns_lock);
665 list_for_each(tmp, &res->lr_granted) {
666 struct ldlm_lock *lock;
667 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
669 if (opaque != NULL && lock->l_ast_data != opaque) {
670 LDLM_ERROR(lock, "data %p doesn't match opaque %p",
671 lock->l_ast_data, opaque);
676 if (lock->l_readers || lock->l_writers) {
677 if (flags & LDLM_FL_WARN) {
678 LDLM_ERROR(lock, "lock in use");
684 /* See CBPENDING comment in ldlm_cancel_lru */
685 lock->l_flags |= LDLM_FL_CBPENDING;
687 OBD_ALLOC(w, sizeof(*w));
690 w->w_lock = LDLM_LOCK_GET(lock);
692 list_add(&w->w_list, &list);
694 l_unlock(&ns->ns_lock);
696 list_for_each_safe(tmp, next, &list) {
697 struct lustre_handle lockh;
699 w = list_entry(tmp, struct ldlm_ast_work, w_list);
701 if (flags & LDLM_FL_LOCAL_ONLY) {
702 ldlm_lock_cancel(w->w_lock);
704 ldlm_lock2handle(w->w_lock, &lockh);
705 rc = ldlm_cli_cancel(&lockh);
707 CERROR("ldlm_cli_cancel: %d\n", rc);
709 list_del(&w->w_list);
710 LDLM_LOCK_PUT(w->w_lock);
711 OBD_FREE(w, sizeof(*w));
714 ldlm_resource_putref(res);
719 /* Cancel all locks on a namespace (or a specific resource, if given)
720 * that have 0 readers/writers.
722 * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
723 * to notify the server.
724 * If flags & LDLM_FL_WARN, print a warning if some locks are still in use. */
725 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
726 struct ldlm_res_id *res_id, int flags, void *opaque)
735 RETURN(ldlm_cli_cancel_unused_resource(ns, *res_id, flags,
738 l_lock(&ns->ns_lock);
739 for (i = 0; i < RES_HASH_SIZE; i++) {
740 struct list_head *tmp, *pos;
741 list_for_each_safe(tmp, pos, &(ns->ns_hash[i])) {
743 struct ldlm_resource *res;
744 res = list_entry(tmp, struct ldlm_resource, lr_hash);
745 ldlm_resource_getref(res);
747 rc = ldlm_cli_cancel_unused_resource(ns, res->lr_name,
751 CERROR("cancel_unused_res ("LPU64"): %d\n",
752 res->lr_name.name[0], rc);
753 ldlm_resource_putref(res);
756 l_unlock(&ns->ns_lock);
761 /* Lock iterators. */
763 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
766 struct list_head *tmp, *next;
767 struct ldlm_lock *lock;
768 int rc = LDLM_ITER_CONTINUE;
769 struct ldlm_namespace *ns = res->lr_namespace;
774 RETURN(LDLM_ITER_CONTINUE);
776 l_lock(&ns->ns_lock);
777 list_for_each_safe(tmp, next, &res->lr_granted) {
778 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
780 if (iter(lock, closure) == LDLM_ITER_STOP)
781 GOTO(out, rc = LDLM_ITER_STOP);
784 list_for_each_safe(tmp, next, &res->lr_converting) {
785 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
787 if (iter(lock, closure) == LDLM_ITER_STOP)
788 GOTO(out, rc = LDLM_ITER_STOP);
791 list_for_each_safe(tmp, next, &res->lr_waiting) {
792 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
794 if (iter(lock, closure) == LDLM_ITER_STOP)
795 GOTO(out, rc = LDLM_ITER_STOP);
798 l_unlock(&ns->ns_lock);
802 struct iter_helper_data {
803 ldlm_iterator_t iter;
807 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
809 struct iter_helper_data *helper = closure;
810 return helper->iter(lock, helper->closure);
813 static int ldlm_res_iter_helper(struct ldlm_resource *res, void *closure)
815 return ldlm_resource_foreach(res, ldlm_iter_helper, closure);
818 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
821 struct iter_helper_data helper = { iter: iter, closure: closure };
822 return ldlm_namespace_foreach_res(ns, ldlm_res_iter_helper, &helper);
825 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
826 ldlm_res_iterator_t iter, void *closure)
828 int i, rc = LDLM_ITER_CONTINUE;
830 l_lock(&ns->ns_lock);
831 for (i = 0; i < RES_HASH_SIZE; i++) {
832 struct list_head *tmp, *next;
833 list_for_each_safe(tmp, next, &(ns->ns_hash[i])) {
834 struct ldlm_resource *res =
835 list_entry(tmp, struct ldlm_resource, lr_hash);
837 ldlm_resource_getref(res);
838 rc = iter(res, closure);
839 ldlm_resource_putref(res);
840 if (rc == LDLM_ITER_STOP)
845 l_unlock(&ns->ns_lock);
849 /* non-blocking function to manipulate a lock whose cb_data is being put away.*/
850 void ldlm_change_cbdata(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
851 ldlm_iterator_t iter, void *data)
853 struct ldlm_resource *res;
857 CERROR("must pass in namespace");
861 res = ldlm_resource_get(ns, NULL, *res_id, 0, 0);
867 l_lock(&ns->ns_lock);
868 ldlm_resource_foreach(res, iter, data);
869 l_unlock(&ns->ns_lock);
870 ldlm_resource_putref(res);
876 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
878 struct list_head *list = closure;
880 /* we use l_pending_chain here, because it's unused on clients. */
881 list_add(&lock->l_pending_chain, list);
882 return LDLM_ITER_CONTINUE;
885 static int replay_lock_interpret(struct ptlrpc_request *req,
888 struct ldlm_lock *lock;
889 struct ldlm_reply *reply;
891 atomic_dec(&req->rq_import->imp_replay_inflight);
895 lock = req->rq_async_args.pointer_arg[0];
896 LASSERT(lock != NULL);
898 reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
899 lustre_swab_ldlm_reply);
901 CERROR("Can't unpack ldlm_reply\n");
902 GOTO (out, rc = -EPROTO);
905 memcpy(&lock->l_remote_handle, &reply->lock_handle,
906 sizeof(lock->l_remote_handle));
907 LDLM_DEBUG(lock, "replayed lock:");
908 ptlrpc_import_recovery_state_machine(req->rq_import);
913 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
915 struct ptlrpc_request *req;
916 struct ldlm_request *body;
917 struct ldlm_reply *reply;
923 * If granted mode matches the requested mode, this lock is granted.
925 * If they differ, but we have a granted mode, then we were granted
926 * one mode and now want another: ergo, converting.
928 * If we haven't been granted anything and are on a resource list,
929 * then we're blocked/waiting.
931 * If we haven't been granted anything and we're NOT on a resource list,
932 * then we haven't got a reply yet and don't have a known disposition.
933 * This happens whenever a lock enqueue is the request that triggers
936 if (lock->l_granted_mode == lock->l_req_mode)
937 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
938 else if (lock->l_granted_mode)
939 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
940 else if (!list_empty(&lock->l_res_link))
941 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
943 flags = LDLM_FL_REPLAY;
945 size[0] = sizeof(*body);
946 req = ptlrpc_prep_req(imp, LDLM_ENQUEUE, 1, size, NULL);
950 /* We're part of recovery, so don't wait for it. */
951 req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
953 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
954 ldlm_lock2desc(lock, &body->lock_desc);
955 body->lock_flags = flags;
957 ldlm_lock2handle(lock, &body->lock_handle1);
958 size[0] = sizeof(*reply);
959 if (lock->l_lvb_len != 0) {
961 size[1] = lock->l_lvb_len;
963 req->rq_replen = lustre_msg_size(buffers, size);
965 LDLM_DEBUG(lock, "replaying lock:");
967 atomic_inc(&req->rq_import->imp_replay_inflight);
968 req->rq_async_args.pointer_arg[0] = lock;
969 req->rq_interpret_reply = replay_lock_interpret;
970 ptlrpcd_add_req(req);
975 int ldlm_replay_locks(struct obd_import *imp)
977 struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
978 struct list_head list, *pos, *next;
979 struct ldlm_lock *lock;
983 INIT_LIST_HEAD(&list);
985 LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
987 /* ensure this doesn't fall to 0 before all have been queued */
988 atomic_inc(&imp->imp_replay_inflight);
990 l_lock(&ns->ns_lock);
991 (void)ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
993 list_for_each_safe(pos, next, &list) {
994 lock = list_entry(pos, struct ldlm_lock, l_pending_chain);
995 rc = replay_one_lock(imp, lock);
997 break; /* or try to do the rest? */
999 l_unlock(&ns->ns_lock);
1001 atomic_dec(&imp->imp_replay_inflight);