Whamcloud - gitweb
merge b_devel into HEAD (20030626 merge tag) for 0.7.1
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #define DEBUG_SUBSYSTEM S_LDLM
23 #ifndef __KERNEL__
24 #include <signal.h>
25 #include <liblustre.h>
26 #endif
27
28 #include <linux/lustre_dlm.h>
29 #include <linux/obd_class.h>
30 #include <linux/obd.h>
31
32 static void interrupted_completion_wait(void *data)
33 {
34 }
35
36 struct lock_wait_data {
37         struct ldlm_lock *lwd_lock;
38         int               lwd_generation;
39 };
40
41 int ldlm_expired_completion_wait(void *data)
42 {
43         struct lock_wait_data *lwd = data;
44         struct ldlm_lock *lock = lwd->lwd_lock;
45         struct obd_device *obd = class_conn2obd(lock->l_connh);
46
47         if (obd == NULL) {
48                 LDLM_ERROR(lock, "lock timed out; mot entering recovery in "
49                            "server code, just going back to sleep");
50         } else {
51                 struct obd_import *imp = obd->u.cli.cl_import;
52                 ptlrpc_fail_import(imp, lwd->lwd_generation);
53                 LDLM_ERROR(lock, "lock timed out, entering recovery for %s@%s",
54                            imp->imp_target_uuid.uuid,
55                            imp->imp_connection->c_remote_uuid.uuid);
56         }
57         
58         RETURN(0);
59 }
60
61 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data)
62 {
63         /* XXX ALLOCATE - 160 mytes */
64         struct lock_wait_data lwd;
65         unsigned long irqflags;
66         struct obd_device *obd;
67         struct obd_import *imp = NULL;
68         int rc = 0;
69         struct l_wait_info lwi;
70
71         obd = class_conn2obd(lock->l_connh);
72
73         /* if this is a local lock, then there is no import */
74         if (obd != NULL)
75                 imp = obd->u.cli.cl_import;
76
77         lwd.lwd_lock = lock;
78
79         lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ, ldlm_expired_completion_wait,
80                                interrupted_completion_wait, &lwd);
81         ENTRY;
82
83         if (flags == LDLM_FL_WAIT_NOREPROC)
84                 goto noreproc;
85
86         if (flags == 0) {
87                 wake_up(&lock->l_waitq);
88                 RETURN(0);
89         }
90
91         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
92                        LDLM_FL_BLOCK_CONV)))
93                 RETURN(0);
94
95         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
96                    "sleeping");
97         ldlm_lock_dump(D_OTHER, lock);
98         ldlm_reprocess_all(lock->l_resource);
99
100  noreproc:
101         if (imp != NULL) {
102                 spin_lock_irqsave(&imp->imp_lock, irqflags);
103                 lwd.lwd_generation = imp->imp_generation;
104                 spin_unlock_irqrestore(&imp->imp_lock, irqflags);
105         }
106
107         /* Go to sleep until the lock is granted or cancelled. */
108         rc = l_wait_event(lock->l_waitq,
109                           ((lock->l_req_mode == lock->l_granted_mode) ||
110                            lock->l_destroyed), &lwi);
111
112         if (lock->l_destroyed) {
113                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
114                 RETURN(-EIO);
115         }
116
117         if (rc) {
118                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
119                            rc);
120                 RETURN(rc);
121         }
122
123         LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
124         RETURN(0);
125 }
126
127 static int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
128                                   struct lustre_handle *parent_lockh,
129                                   struct ldlm_res_id res_id,
130                                   __u32 type,
131                                   void *cookie, int cookielen,
132                                   ldlm_mode_t mode,
133                                   int *flags,
134                                   ldlm_completion_callback completion,
135                                   ldlm_blocking_callback blocking,
136                                   void *data,
137                                   struct lustre_handle *lockh)
138 {
139         struct ldlm_lock *lock;
140         int err;
141         ENTRY;
142
143         if (ns->ns_client) {
144                 CERROR("Trying to enqueue local lock in a shadow namespace\n");
145                 LBUG();
146         }
147
148         lock = ldlm_lock_create(ns, parent_lockh, res_id, type, mode,
149                                 blocking, data);
150         if (!lock)
151                 GOTO(out_nolock, err = -ENOMEM);
152         LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
153
154         ldlm_lock_addref_internal(lock, mode);
155         ldlm_lock2handle(lock, lockh);
156         lock->l_flags |= LDLM_FL_LOCAL;
157
158         err = ldlm_lock_enqueue(ns, &lock, cookie, cookielen, flags,
159                                 completion);
160         if (err != ELDLM_OK)
161                 GOTO(out, err);
162
163         if (type == LDLM_EXTENT)
164                 memcpy(cookie, &lock->l_extent, sizeof(lock->l_extent));
165         if ((*flags) & LDLM_FL_LOCK_CHANGED)
166                 memcpy(&res_id, &lock->l_resource->lr_name, sizeof(res_id));
167
168         LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
169                           lock);
170
171         if (lock->l_completion_ast)
172                 lock->l_completion_ast(lock, *flags, NULL);
173
174         LDLM_DEBUG(lock, "client-side local enqueue END");
175         EXIT;
176  out:
177         LDLM_LOCK_PUT(lock);
178  out_nolock:
179         return err;
180 }
181
182 int ldlm_cli_enqueue(struct lustre_handle *connh,
183                      struct ptlrpc_request *req,
184                      struct ldlm_namespace *ns,
185                      struct lustre_handle *parent_lock_handle,
186                      struct ldlm_res_id res_id,
187                      __u32 type,
188                      void *cookie, int cookielen,
189                      ldlm_mode_t mode,
190                      int *flags,
191                      ldlm_completion_callback completion,
192                      ldlm_blocking_callback blocking,
193                      void *data,
194                      struct lustre_handle *lockh)
195 {
196         struct ldlm_lock *lock;
197         struct ldlm_request *body;
198         struct ldlm_reply *reply;
199         int rc, size = sizeof(*body), req_passed_in = 1, is_replay;
200         ENTRY;
201
202         is_replay = *flags & LDLM_FL_REPLAY;
203         LASSERT(connh != NULL || !is_replay);
204
205         if (connh == NULL) {
206                 rc = ldlm_cli_enqueue_local(ns, parent_lock_handle, res_id,
207                                             type, cookie, cookielen, mode,
208                                             flags, completion, blocking, data,
209                                             lockh);
210                 RETURN(rc);
211         }
212
213         /* If we're replaying this lock, just check some invariants.
214          * If we're creating a new lock, get everything all setup nice. */
215         if (is_replay) {
216                 lock = ldlm_handle2lock(lockh);
217                 LDLM_DEBUG(lock, "client-side enqueue START");
218                 LASSERT(connh == lock->l_connh);
219         } else {
220                 lock = ldlm_lock_create(ns, parent_lock_handle, res_id, type,
221                                         mode, blocking, data);
222                 if (lock == NULL)
223                         GOTO(out_nolock, rc = -ENOMEM);
224                 /* ugh.  I set this early (instead of waiting for _enqueue)
225                  * because the completion AST might arrive early, and we need
226                  * (in just this one case) to run the completion_cb even if it
227                  * arrives before the reply. */
228                 lock->l_completion_ast = completion;
229                 LDLM_DEBUG(lock, "client-side enqueue START");
230                 /* for the local lock, add the reference */
231                 ldlm_lock_addref_internal(lock, mode);
232                 ldlm_lock2handle(lock, lockh);
233                 if (type == LDLM_EXTENT)
234                         memcpy(&lock->l_extent, cookie,
235                                sizeof(body->lock_desc.l_extent));
236         }
237
238         if (req == NULL) {
239                 req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_ENQUEUE, 1,
240                                       &size, NULL);
241                 if (!req)
242                         GOTO(out, rc = -ENOMEM);
243                 req_passed_in = 0;
244         } else if (req->rq_reqmsg->buflens[0] != sizeof(*body))
245                 LBUG();
246
247         /* Dump lock data into the request buffer */
248         body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
249         ldlm_lock2desc(lock, &body->lock_desc);
250         body->lock_flags = *flags;
251
252         memcpy(&body->lock_handle1, lockh, sizeof(*lockh));
253         if (parent_lock_handle)
254                 memcpy(&body->lock_handle2, parent_lock_handle,
255                        sizeof(body->lock_handle2));
256
257         /* Continue as normal. */
258         if (!req_passed_in) {
259                 size = sizeof(*reply);
260                 req->rq_replen = lustre_msg_size(1, &size);
261         }
262         lock->l_connh = connh;
263         lock->l_export = NULL;
264         lock->l_blocking_ast = blocking;
265
266         LDLM_DEBUG(lock, "sending request");
267         rc = ptlrpc_queue_wait(req);
268
269         if (rc != ELDLM_OK) {
270                 LASSERT(!is_replay);
271                 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
272                            rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
273                 /* Set a flag to prevent us from sending a CANCEL (bug 407) */
274                 l_lock(&ns->ns_lock);
275                 lock->l_flags |= LDLM_FL_LOCAL_ONLY;
276                 l_unlock(&ns->ns_lock);
277
278                 ldlm_lock_decref_and_cancel(lockh, mode);
279
280                 if (rc == ELDLM_LOCK_ABORTED) {
281                         /* caller expects reply buffer 0 to have been swabbed */
282                         reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
283                                                    lustre_swab_ldlm_reply);
284                         if (reply == NULL) {
285                                 CERROR ("Can't unpack ldlm_reply\n");
286                                 GOTO (out_req, rc = -EPROTO);
287                         }
288                 }
289                 GOTO(out_req, rc);
290         }
291
292         reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
293                                    lustre_swab_ldlm_reply);
294         if (reply == NULL) {
295                 CERROR ("Can't unpack ldlm_reply\n");
296                 GOTO (out_req, rc = -EPROTO);
297         }
298         
299         memcpy(&lock->l_remote_handle, &reply->lock_handle,
300                sizeof(lock->l_remote_handle));
301         *flags = reply->lock_flags;
302
303         CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: %d\n", lock,
304                reply->lock_handle.cookie, *flags);
305         if (type == LDLM_EXTENT) {
306                 CDEBUG(D_INFO, "requested extent: "LPU64" -> "LPU64", got "
307                        "extent "LPU64" -> "LPU64"\n",
308                        body->lock_desc.l_extent.start,
309                        body->lock_desc.l_extent.end,
310                        reply->lock_extent.start, reply->lock_extent.end);
311
312                 if ((reply->lock_extent.end & ~PAGE_MASK) != ~PAGE_MASK) {
313                         /* XXX Old versions of BA OST code have a fencepost bug
314                          * which will cause them to grant a lock that's one
315                          * byte too large.  This can be safely removed after BA
316                          * ships their next release -phik (02 Apr 2003) */
317                         reply->lock_extent.end--;
318                 } else if ((reply->lock_extent.start & ~PAGE_MASK) ==
319                            ~PAGE_MASK) {
320                         reply->lock_extent.start++;
321                 }
322
323                 cookie = &reply->lock_extent; /* FIXME bug 267 */
324                 cookielen = sizeof(reply->lock_extent);
325         }
326
327         /* If enqueue returned a blocked lock but the completion handler has
328          * already run, then it fixed up the resource and we don't need to do it
329          * again. */
330         if ((*flags) & LDLM_FL_LOCK_CHANGED) {
331                 int newmode = reply->lock_mode;
332                 LASSERT(!is_replay);
333                 if (newmode && newmode != lock->l_req_mode) {
334                         LDLM_DEBUG(lock, "server returned different mode %s",
335                                    ldlm_lockname[newmode]);
336                         lock->l_req_mode = newmode;
337                 }
338
339                 if (reply->lock_resource_name.name[0] !=
340                     lock->l_resource->lr_name.name[0]) {
341                         CDEBUG(D_INFO, "remote intent success, locking %ld "
342                                "instead of %ld\n",
343                                (long)reply->lock_resource_name.name[0],
344                                (long)lock->l_resource->lr_name.name[0]);
345
346                         ldlm_lock_change_resource(ns, lock,
347                                                   reply->lock_resource_name);
348                         if (lock->l_resource == NULL) {
349                                 LBUG();
350                                 GOTO(out_req, rc = -ENOMEM);
351                         }
352                         LDLM_DEBUG(lock, "client-side enqueue, new resource");
353                 }
354         }
355
356         if (!is_replay) {
357                 l_lock(&ns->ns_lock);
358                 lock->l_completion_ast = NULL;
359                 rc = ldlm_lock_enqueue(ns, &lock, cookie, cookielen, flags,
360                                        completion);
361                 l_unlock(&ns->ns_lock);
362                 if (lock->l_completion_ast)
363                         lock->l_completion_ast(lock, *flags, NULL);
364         }
365
366         LDLM_DEBUG(lock, "client-side enqueue END");
367         EXIT;
368  out_req:
369         if (!req_passed_in)
370                 ptlrpc_req_finished(req);
371  out:
372         LDLM_LOCK_PUT(lock);
373  out_nolock:
374         return rc;
375 }
376
377 int ldlm_cli_replay_enqueue(struct ldlm_lock *lock)
378 {
379         struct lustre_handle lockh;
380         struct ldlm_res_id junk;
381         int flags = LDLM_FL_REPLAY;
382         ldlm_lock2handle(lock, &lockh);
383         return ldlm_cli_enqueue(lock->l_connh, NULL, NULL, NULL, junk,
384                                 lock->l_resource->lr_type, NULL, 0, -1, &flags,
385                                 NULL, NULL, NULL, &lockh);
386 }
387
388 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
389                                   int *flags)
390 {
391         ENTRY;
392         if (lock->l_resource->lr_namespace->ns_client) {
393                 CERROR("Trying to cancel local lock\n");
394                 LBUG();
395         }
396         LDLM_DEBUG(lock, "client-side local convert");
397
398         ldlm_lock_convert(lock, new_mode, flags);
399         ldlm_reprocess_all(lock->l_resource);
400
401         LDLM_DEBUG(lock, "client-side local convert handler END");
402         LDLM_LOCK_PUT(lock);
403         RETURN(0);
404 }
405
406 /* FIXME: one of ldlm_cli_convert or the server side should reject attempted
407  * conversion of locks which are on the waiting or converting queue */
408 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
409 {
410         struct ldlm_request *body;
411         struct lustre_handle *connh;
412         struct ldlm_reply *reply;
413         struct ldlm_lock *lock;
414         struct ldlm_resource *res;
415         struct ptlrpc_request *req;
416         int rc, size = sizeof(*body);
417         ENTRY;
418
419         lock = ldlm_handle2lock(lockh);
420         if (!lock) {
421                 LBUG();
422                 RETURN(-EINVAL);
423         }
424         *flags = 0;
425         connh = lock->l_connh;
426
427         if (!connh)
428                 RETURN(ldlm_cli_convert_local(lock, new_mode, flags));
429
430         LDLM_DEBUG(lock, "client-side convert");
431
432         req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_CONVERT, 1, &size,
433                               NULL);
434         if (!req)
435                 GOTO(out, rc = -ENOMEM);
436
437         body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
438         memcpy(&body->lock_handle1, &lock->l_remote_handle,
439                sizeof(body->lock_handle1));
440
441         body->lock_desc.l_req_mode = new_mode;
442         body->lock_flags = *flags;
443
444         size = sizeof(*reply);
445         req->rq_replen = lustre_msg_size(1, &size);
446
447         rc = ptlrpc_queue_wait(req);
448         if (rc != ELDLM_OK)
449                 GOTO(out, rc);
450
451         reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
452                                    lustre_swab_ldlm_reply);
453         if (reply == NULL) {
454                 CERROR ("Can't unpack ldlm_reply\n");
455                 GOTO (out, rc = -EPROTO);
456         }
457         
458         res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
459         if (res != NULL)
460                 ldlm_reprocess_all(res);
461         /* Go to sleep until the lock is granted. */
462         /* FIXME: or cancelled. */
463         if (lock->l_completion_ast)
464                 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC, NULL);
465         EXIT;
466  out:
467         LDLM_LOCK_PUT(lock);
468         ptlrpc_req_finished(req);
469         return rc;
470 }
471
472 int ldlm_cli_cancel(struct lustre_handle *lockh)
473 {
474         struct ptlrpc_request *req;
475         struct ldlm_lock *lock;
476         struct ldlm_request *body;
477         int rc = 0, size = sizeof(*body);
478         ENTRY;
479
480         /* concurrent cancels on the same handle can happen */
481         lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);
482         if (lock == NULL)
483                 RETURN(0);
484
485         if (lock->l_connh) {
486                 int local_only;
487                 struct obd_import *imp;
488
489                 LDLM_DEBUG(lock, "client-side cancel");
490                 /* Set this flag to prevent others from getting new references*/
491                 l_lock(&lock->l_resource->lr_namespace->ns_lock);
492                 lock->l_flags |= LDLM_FL_CBPENDING;
493                 local_only = (lock->l_flags & LDLM_FL_LOCAL_ONLY);
494                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
495                 ldlm_cancel_callback(lock);
496
497                 if (local_only) {
498                         CDEBUG(D_INFO, "not sending request (at caller's "
499                                "instruction)\n");
500                         goto local_cancel;
501                 }
502
503                 imp = class_conn2cliimp(lock->l_connh);
504                 if (imp == NULL || imp->imp_invalid) {
505                         CDEBUG(D_HA, "skipping cancel on invalid import %p\n",
506                                imp);
507                         goto local_cancel;
508                 }
509
510                 req = ptlrpc_prep_req(imp, LDLM_CANCEL, 1, &size, NULL);
511                 if (!req)
512                         GOTO(out, rc = -ENOMEM);
513
514                 /* XXX FIXME bug 249 */
515                 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
516                 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
517
518                 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
519                 memcpy(&body->lock_handle1, &lock->l_remote_handle,
520                        sizeof(body->lock_handle1));
521
522                 req->rq_replen = lustre_msg_size(0, NULL);
523
524                 rc = ptlrpc_queue_wait(req);
525
526                 if (rc == ESTALE)
527                         CERROR("client/server (nid "LPU64") out of sync--not "
528                                "fatal\n",
529                                req->rq_import->imp_connection->c_peer.peer_nid);
530                 else if (rc != ELDLM_OK)
531                         CERROR("Got rc %d from cancel RPC: canceling "
532                                "anyway\n", rc);
533
534                 ptlrpc_req_finished(req);
535         local_cancel:
536                 ldlm_lock_cancel(lock);
537         } else {
538                 LDLM_DEBUG(lock, "client-side local cancel");
539                 if (lock->l_resource->lr_namespace->ns_client) {
540                         CERROR("Trying to cancel local lock\n");
541                         LBUG();
542                 }
543                 ldlm_lock_cancel(lock);
544                 ldlm_reprocess_all(lock->l_resource);
545                 LDLM_DEBUG(lock, "client-side local cancel handler END");
546         }
547
548         EXIT;
549  out:
550         LDLM_LOCK_PUT(lock);
551         return rc;
552 }
553
554 int ldlm_cancel_lru(struct ldlm_namespace *ns)
555 {
556         struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
557         int count, rc = 0;
558         struct ldlm_ast_work *w;
559         ENTRY;
560
561         l_lock(&ns->ns_lock);
562         count = ns->ns_nr_unused - ns->ns_max_unused;
563
564         if (count <= 0) {
565                 l_unlock(&ns->ns_lock);
566                 RETURN(0);
567         }
568
569         list_for_each_safe(tmp, next, &ns->ns_unused_list) {
570                 struct ldlm_lock *lock;
571                 lock = list_entry(tmp, struct ldlm_lock, l_lru);
572
573                 LASSERT(!lock->l_readers && !lock->l_writers);
574
575                 /* Setting the CBPENDING flag is a little misleading, but
576                  * prevents an important race; namely, once CBPENDING is set,
577                  * the lock can accumulate no more readers/writers.  Since
578                  * readers and writers are already zero here, ldlm_lock_decref
579                  * won't see this flag and call l_blocking_ast */
580                 lock->l_flags |= LDLM_FL_CBPENDING;
581
582                 OBD_ALLOC(w, sizeof(*w));
583                 LASSERT(w);
584
585                 w->w_lock = LDLM_LOCK_GET(lock);
586                 list_add(&w->w_list, &list);
587                 ldlm_lock_remove_from_lru(lock);
588
589                 if (--count == 0)
590                         break;
591         }
592         l_unlock(&ns->ns_lock);
593
594         list_for_each_safe(tmp, next, &list) {
595                 struct lustre_handle lockh;
596                 int rc;
597                 w = list_entry(tmp, struct ldlm_ast_work, w_list);
598
599                 ldlm_lock2handle(w->w_lock, &lockh);
600                 rc = ldlm_cli_cancel(&lockh);
601                 if (rc != ELDLM_OK)
602                         CDEBUG(D_INFO, "ldlm_cli_cancel: %d\n", rc);
603
604                 list_del(&w->w_list);
605                 LDLM_LOCK_PUT(w->w_lock);
606                 OBD_FREE(w, sizeof(*w));
607         }
608
609         RETURN(rc);
610 }
611
612 static int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
613                                            struct ldlm_res_id res_id, int flags,
614                                            void *opaque)
615 {
616         struct ldlm_resource *res;
617         struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
618         struct ldlm_ast_work *w;
619         ENTRY;
620
621         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
622         if (res == NULL) {
623                 /* This is not a problem. */
624                 CDEBUG(D_INFO, "No resource "LPU64"\n", res_id.name[0]);
625                 RETURN(0);
626         }
627
628         l_lock(&ns->ns_lock);
629         list_for_each(tmp, &res->lr_granted) {
630                 struct ldlm_lock *lock;
631                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
632
633                 if (opaque != NULL && lock->l_data != opaque) {
634                         LDLM_ERROR(lock, "data %p doesn't match opaque %p res"
635                                    LPU64":"LPU64, lock->l_data, opaque,
636                                    res_id.name[0], res_id.name[1]);
637                         //LBUG();
638                         continue;
639                 }
640
641                 if (lock->l_readers || lock->l_writers) {
642                         if (flags & LDLM_FL_WARN) {
643                                 LDLM_ERROR(lock, "lock in use");
644                                 //LBUG();
645                         }
646                         continue;
647                 }
648
649                 /* See CBPENDING comment in ldlm_cancel_lru */
650                 lock->l_flags |= LDLM_FL_CBPENDING;
651
652                 OBD_ALLOC(w, sizeof(*w));
653                 LASSERT(w);
654
655                 w->w_lock = LDLM_LOCK_GET(lock);
656
657                 /* Prevent the cancel callback from being called by setting
658                  * LDLM_FL_CANCEL in the lock.  Very sneaky. -p */
659                 if (flags & LDLM_FL_NO_CALLBACK)
660                         w->w_lock->l_flags |= LDLM_FL_CANCEL;
661
662                 list_add(&w->w_list, &list);
663         }
664         l_unlock(&ns->ns_lock);
665
666         list_for_each_safe(tmp, next, &list) {
667                 struct lustre_handle lockh;
668                 int rc;
669                 w = list_entry(tmp, struct ldlm_ast_work, w_list);
670
671                 if (flags & LDLM_FL_LOCAL_ONLY) {
672                         ldlm_lock_cancel(w->w_lock);
673                 } else {
674                         ldlm_lock2handle(w->w_lock, &lockh);
675                         rc = ldlm_cli_cancel(&lockh);
676                         if (rc != ELDLM_OK)
677                                 CERROR("ldlm_cli_cancel: %d\n", rc);
678                 }
679                 list_del(&w->w_list);
680                 LDLM_LOCK_PUT(w->w_lock);
681                 OBD_FREE(w, sizeof(*w));
682         }
683
684         ldlm_resource_putref(res);
685
686         RETURN(0);
687 }
688
689 /* Cancel all locks on a namespace (or a specific resource, if given)
690  * that have 0 readers/writers.
691  *
692  * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
693  * to notify the server.
694  * If flags & LDLM_FL_NO_CALLBACK, don't run the cancel callback.
695  * If flags & LDLM_FL_WARN, print a warning if some locks are still in use. */
696 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
697                            struct ldlm_res_id *res_id, int flags, void *opaque)
698 {
699         int i;
700         ENTRY;
701
702         if (ns == NULL)
703                 RETURN(ELDLM_OK);
704
705         if (res_id)
706                 RETURN(ldlm_cli_cancel_unused_resource(ns, *res_id, flags,
707                                                        opaque));
708
709         l_lock(&ns->ns_lock);
710         for (i = 0; i < RES_HASH_SIZE; i++) {
711                 struct list_head *tmp, *pos;
712                 list_for_each_safe(tmp, pos, &(ns->ns_hash[i])) {
713                         int rc;
714                         struct ldlm_resource *res;
715                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
716                         ldlm_resource_getref(res);
717
718                         rc = ldlm_cli_cancel_unused_resource(ns, res->lr_name,
719                                                              flags, opaque);
720
721                         if (rc)
722                                 CERROR("cancel_unused_res ("LPU64"): %d\n",
723                                        res->lr_name.name[0], rc);
724                         ldlm_resource_putref(res);
725                 }
726         }
727         l_unlock(&ns->ns_lock);
728
729         RETURN(ELDLM_OK);
730 }
731
732 /* Lock iterators. */
733
734 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
735                           void *closure)
736 {
737         struct list_head *tmp, *next;
738         struct ldlm_lock *lock;
739         int rc = LDLM_ITER_CONTINUE;
740         struct ldlm_namespace *ns = res->lr_namespace;
741
742         ENTRY;
743
744         if (!res)
745                 RETURN(LDLM_ITER_CONTINUE);
746
747         l_lock(&ns->ns_lock);
748         list_for_each_safe(tmp, next, &res->lr_granted) {
749                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
750
751                 if (iter(lock, closure) == LDLM_ITER_STOP)
752                         GOTO(out, rc = LDLM_ITER_STOP);
753         }
754
755         list_for_each_safe(tmp, next, &res->lr_converting) {
756                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
757
758                 if (iter(lock, closure) == LDLM_ITER_STOP)
759                         GOTO(out, rc = LDLM_ITER_STOP);
760         }
761
762         list_for_each_safe(tmp, next, &res->lr_waiting) {
763                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
764
765                 if (iter(lock, closure) == LDLM_ITER_STOP)
766                         GOTO(out, rc = LDLM_ITER_STOP);
767         }
768  out:
769         l_unlock(&ns->ns_lock);
770         RETURN(rc);
771 }
772
773 struct iter_helper_data {
774         ldlm_iterator_t iter;
775         void *closure;
776 };
777
778 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
779 {
780         struct iter_helper_data *helper = closure;
781         return helper->iter(lock, helper->closure);
782 }
783
784 static int ldlm_res_iter_helper(struct ldlm_resource *res, void *closure)
785 {
786         return ldlm_resource_foreach(res, ldlm_iter_helper, closure);
787 }
788
789 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
790                            void *closure)
791 {
792         struct iter_helper_data helper = { iter: iter, closure: closure };
793         return ldlm_namespace_foreach_res(ns, ldlm_res_iter_helper, &helper);
794 }
795
796 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
797                                ldlm_res_iterator_t iter, void *closure)
798 {
799         int i, rc = LDLM_ITER_CONTINUE;
800         
801         l_lock(&ns->ns_lock);
802         for (i = 0; i < RES_HASH_SIZE; i++) {
803                 struct list_head *tmp, *next;
804                 list_for_each_safe(tmp, next, &(ns->ns_hash[i])) {
805                         struct ldlm_resource *res = 
806                                 list_entry(tmp, struct ldlm_resource, lr_hash);
807
808                         ldlm_resource_getref(res);
809                         rc = iter(res, closure);
810                         ldlm_resource_putref(res);
811                         if (rc == LDLM_ITER_STOP)
812                                 GOTO(out, rc);
813                 }
814         }
815  out:
816         l_unlock(&ns->ns_lock);
817         RETURN(rc);
818 }
819
820 /* Lock replay */
821
822 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
823 {
824         struct list_head *list = closure;
825
826         /* we use l_pending_chain here, because it's unused on clients. */
827         list_add(&lock->l_pending_chain, list);
828         return LDLM_ITER_CONTINUE;
829 }
830
831 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
832 {
833         struct ptlrpc_request *req;
834         struct ldlm_request *body;
835         struct ldlm_reply *reply;
836         int rc, size;
837         int flags;
838
839         /*
840          * If granted mode matches the requested mode, this lock is granted.
841          *
842          * If they differ, but we have a granted mode, then we were granted
843          * one mode and now want another: ergo, converting.
844          *
845          * If we haven't been granted anything and are on a resource list,
846          * then we're blocked/waiting.
847          *
848          * If we haven't been granted anything and we're NOT on a resource list,
849          * then we haven't got a reply yet and don't have a known disposition.
850          * This happens whenever a lock enqueue is the request that triggers
851          * recovery.
852          */
853         if (lock->l_granted_mode == lock->l_req_mode)
854                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
855         else if (lock->l_granted_mode)
856                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
857         else if (!list_empty(&lock->l_res_link))
858                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
859         else
860                 flags = LDLM_FL_REPLAY;
861                 
862         size = sizeof(*body);
863         req = ptlrpc_prep_req(imp, LDLM_ENQUEUE, 1, &size, NULL);
864         if (!req)
865                 RETURN(-ENOMEM);
866
867         /* We're part of recovery, so don't wait for it. */
868         req->rq_level = LUSTRE_CONN_RECOVD;
869         
870         body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
871         ldlm_lock2desc(lock, &body->lock_desc);
872         body->lock_flags = flags;
873
874         ldlm_lock2handle(lock, &body->lock_handle1);
875         size = sizeof(*reply);
876         req->rq_replen = lustre_msg_size(1, &size);
877
878         LDLM_DEBUG(lock, "replaying lock:");
879         rc = ptlrpc_queue_wait(req);
880         if (rc != ELDLM_OK)
881                 GOTO(out, rc);
882         
883         reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
884                                    lustre_swab_ldlm_reply);
885         if (reply == NULL) {
886                 CERROR("Can't unpack ldlm_reply\n");
887                 GOTO (out, rc = -EPROTO);
888         }
889         
890         memcpy(&lock->l_remote_handle, &reply->lock_handle,
891                sizeof(lock->l_remote_handle));
892         LDLM_DEBUG(lock, "replayed lock:");
893  out:
894         ptlrpc_req_finished(req);
895         RETURN(rc);
896 }
897
898 int ldlm_replay_locks(struct obd_import *imp)
899 {
900         struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
901         struct list_head list, *pos, *next;
902         struct ldlm_lock *lock;
903         int rc = 0;
904         
905         ENTRY;
906         INIT_LIST_HEAD(&list);
907
908         l_lock(&ns->ns_lock);
909         (void)ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
910
911         list_for_each_safe(pos, next, &list) {
912                 lock = list_entry(pos, struct ldlm_lock, l_pending_chain);
913                 rc = replay_one_lock(imp, lock);
914                 if (rc)
915                         break; /* or try to do the rest? */
916         }
917         l_unlock(&ns->ns_lock);
918         RETURN(rc);
919 }