Whamcloud - gitweb
merge b_devel into HEAD, which will become 0.7.3
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #define DEBUG_SUBSYSTEM S_LDLM
23 #ifndef __KERNEL__
24 #include <signal.h>
25 #include <liblustre.h>
26 #endif
27
28 #include <linux/lustre_dlm.h>
29 #include <linux/obd_class.h>
30 #include <linux/obd.h>
31
32 static void interrupted_completion_wait(void *data)
33 {
34 }
35
36 struct lock_wait_data {
37         struct ldlm_lock *lwd_lock;
38         int               lwd_generation;
39 };
40
41 int ldlm_expired_completion_wait(void *data)
42 {
43         struct lock_wait_data *lwd = data;
44         struct ldlm_lock *lock = lwd->lwd_lock;
45         struct obd_device *obd = class_conn2obd(lock->l_connh);
46
47         if (obd == NULL) {
48                 LDLM_ERROR(lock, "lock timed out; mot entering recovery in "
49                            "server code, just going back to sleep");
50         } else {
51                 struct obd_import *imp = obd->u.cli.cl_import;
52                 ptlrpc_fail_import(imp, lwd->lwd_generation);
53                 LDLM_ERROR(lock, "lock timed out, entering recovery for %s@%s",
54                            imp->imp_target_uuid.uuid,
55                            imp->imp_connection->c_remote_uuid.uuid);
56         }
57         
58         RETURN(0);
59 }
60
61 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data)
62 {
63         /* XXX ALLOCATE - 160 mytes */
64         struct lock_wait_data lwd;
65         unsigned long irqflags;
66         struct obd_device *obd;
67         struct obd_import *imp = NULL;
68         int rc = 0;
69         struct l_wait_info lwi;
70
71         obd = class_conn2obd(lock->l_connh);
72
73         /* if this is a local lock, then there is no import */
74         if (obd != NULL)
75                 imp = obd->u.cli.cl_import;
76
77         lwd.lwd_lock = lock;
78
79         lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ, ldlm_expired_completion_wait,
80                                interrupted_completion_wait, &lwd);
81         ENTRY;
82
83         if (flags == LDLM_FL_WAIT_NOREPROC)
84                 goto noreproc;
85
86         if (flags == 0) {
87                 wake_up(&lock->l_waitq);
88                 RETURN(0);
89         }
90
91         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
92                        LDLM_FL_BLOCK_CONV)))
93                 RETURN(0);
94
95         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
96                    "sleeping");
97         ldlm_lock_dump(D_OTHER, lock);
98         ldlm_reprocess_all(lock->l_resource);
99
100  noreproc:
101         if (imp != NULL) {
102                 spin_lock_irqsave(&imp->imp_lock, irqflags);
103                 lwd.lwd_generation = imp->imp_generation;
104                 spin_unlock_irqrestore(&imp->imp_lock, irqflags);
105         }
106
107         /* Go to sleep until the lock is granted or cancelled. */
108         rc = l_wait_event(lock->l_waitq,
109                           ((lock->l_req_mode == lock->l_granted_mode) ||
110                            lock->l_destroyed), &lwi);
111
112         if (lock->l_destroyed) {
113                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
114                 RETURN(-EIO);
115         }
116
117         if (rc) {
118                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
119                            rc);
120                 RETURN(rc);
121         }
122
123         LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
124         RETURN(0);
125 }
126
127 static int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
128                                   struct lustre_handle *parent_lockh,
129                                   struct ldlm_res_id res_id,
130                                   __u32 type,
131                                   void *cookie, int cookielen,
132                                   ldlm_mode_t mode,
133                                   int *flags,
134                                   ldlm_completion_callback completion,
135                                   ldlm_blocking_callback blocking,
136                                   void *data,
137                                   struct lustre_handle *lockh)
138 {
139         struct ldlm_lock *lock;
140         int err;
141         ENTRY;
142
143         if (ns->ns_client) {
144                 CERROR("Trying to enqueue local lock in a shadow namespace\n");
145                 LBUG();
146         }
147
148         lock = ldlm_lock_create(ns, parent_lockh, res_id, type, mode,
149                                 blocking, data);
150         if (!lock)
151                 GOTO(out_nolock, err = -ENOMEM);
152         LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
153
154         ldlm_lock_addref_internal(lock, mode);
155         ldlm_lock2handle(lock, lockh);
156         lock->l_flags |= LDLM_FL_LOCAL;
157
158         err = ldlm_lock_enqueue(ns, &lock, cookie, cookielen, flags,
159                                 completion);
160         if (err != ELDLM_OK)
161                 GOTO(out, err);
162
163         if (type == LDLM_EXTENT)
164                 memcpy(cookie, &lock->l_extent, sizeof(lock->l_extent));
165         if ((*flags) & LDLM_FL_LOCK_CHANGED)
166                 memcpy(&res_id, &lock->l_resource->lr_name, sizeof(res_id));
167
168         LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
169                           lock);
170
171         if (lock->l_completion_ast)
172                 lock->l_completion_ast(lock, *flags, NULL);
173
174         LDLM_DEBUG(lock, "client-side local enqueue END");
175         EXIT;
176  out:
177         LDLM_LOCK_PUT(lock);
178  out_nolock:
179         return err;
180 }
181
182 int ldlm_cli_enqueue(struct lustre_handle *connh,
183                      struct ptlrpc_request *req,
184                      struct ldlm_namespace *ns,
185                      struct lustre_handle *parent_lock_handle,
186                      struct ldlm_res_id res_id,
187                      __u32 type,
188                      void *cookie, int cookielen,
189                      ldlm_mode_t mode,
190                      int *flags,
191                      ldlm_completion_callback completion,
192                      ldlm_blocking_callback blocking,
193                      void *data,
194                      struct lustre_handle *lockh)
195 {
196         struct ldlm_lock *lock;
197         struct ldlm_request *body;
198         struct ldlm_reply *reply;
199         int rc, size = sizeof(*body), req_passed_in = 1, is_replay;
200         ENTRY;
201
202         is_replay = *flags & LDLM_FL_REPLAY;
203         LASSERT(connh != NULL || !is_replay);
204
205         if (connh == NULL) {
206                 rc = ldlm_cli_enqueue_local(ns, parent_lock_handle, res_id,
207                                             type, cookie, cookielen, mode,
208                                             flags, completion, blocking, data,
209                                             lockh);
210                 RETURN(rc);
211         }
212
213         /* If we're replaying this lock, just check some invariants.
214          * If we're creating a new lock, get everything all setup nice. */
215         if (is_replay) {
216                 lock = ldlm_handle2lock(lockh);
217                 LDLM_DEBUG(lock, "client-side enqueue START");
218                 LASSERT(connh == lock->l_connh);
219         } else {
220                 lock = ldlm_lock_create(ns, parent_lock_handle, res_id, type,
221                                         mode, blocking, data);
222                 if (lock == NULL)
223                         GOTO(out_nolock, rc = -ENOMEM);
224                 /* ugh.  I set this early (instead of waiting for _enqueue)
225                  * because the completion AST might arrive early, and we need
226                  * (in just this one case) to run the completion_cb even if it
227                  * arrives before the reply. */
228                 lock->l_completion_ast = completion;
229                 LDLM_DEBUG(lock, "client-side enqueue START");
230                 /* for the local lock, add the reference */
231                 ldlm_lock_addref_internal(lock, mode);
232                 ldlm_lock2handle(lock, lockh);
233                 if (type == LDLM_EXTENT)
234                         memcpy(&lock->l_extent, cookie,
235                                sizeof(body->lock_desc.l_extent));
236         }
237
238         if (req == NULL) {
239                 req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_ENQUEUE, 1,
240                                       &size, NULL);
241                 if (!req)
242                         GOTO(out, rc = -ENOMEM);
243                 req_passed_in = 0;
244         } else if (req->rq_reqmsg->buflens[0] != sizeof(*body))
245                 LBUG();
246
247         /* Dump lock data into the request buffer */
248         body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
249         ldlm_lock2desc(lock, &body->lock_desc);
250         body->lock_flags = *flags;
251
252         memcpy(&body->lock_handle1, lockh, sizeof(*lockh));
253         if (parent_lock_handle)
254                 memcpy(&body->lock_handle2, parent_lock_handle,
255                        sizeof(body->lock_handle2));
256
257         /* Continue as normal. */
258         if (!req_passed_in) {
259                 size = sizeof(*reply);
260                 req->rq_replen = lustre_msg_size(1, &size);
261         }
262         lock->l_connh = connh;
263         lock->l_export = NULL;
264         lock->l_blocking_ast = blocking;
265
266         LDLM_DEBUG(lock, "sending request");
267         rc = ptlrpc_queue_wait(req);
268
269         if (rc != ELDLM_OK) {
270                 LASSERT(!is_replay);
271                 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
272                            rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
273                 /* Set a flag to prevent us from sending a CANCEL (bug 407) */
274                 l_lock(&ns->ns_lock);
275                 lock->l_flags |= LDLM_FL_LOCAL_ONLY;
276                 LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
277                 l_unlock(&ns->ns_lock);
278
279                 ldlm_lock_decref_and_cancel(lockh, mode);
280
281                 if (rc == ELDLM_LOCK_ABORTED) {
282                         /* caller expects reply buffer 0 to have been swabbed */
283                         reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
284                                                    lustre_swab_ldlm_reply);
285                         if (reply == NULL) {
286                                 CERROR ("Can't unpack ldlm_reply\n");
287                                 GOTO (out_req, rc = -EPROTO);
288                         }
289                 }
290                 GOTO(out_req, rc);
291         }
292
293         reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
294                                    lustre_swab_ldlm_reply);
295         if (reply == NULL) {
296                 CERROR ("Can't unpack ldlm_reply\n");
297                 GOTO (out_req, rc = -EPROTO);
298         }
299
300         memcpy(&lock->l_remote_handle, &reply->lock_handle,
301                sizeof(lock->l_remote_handle));
302         *flags = reply->lock_flags;
303
304         CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: %d\n", lock,
305                reply->lock_handle.cookie, *flags);
306         if (type == LDLM_EXTENT) {
307                 CDEBUG(D_INFO, "requested extent: "LPU64" -> "LPU64", got "
308                        "extent "LPU64" -> "LPU64"\n",
309                        body->lock_desc.l_extent.start,
310                        body->lock_desc.l_extent.end,
311                        reply->lock_extent.start, reply->lock_extent.end);
312
313                 cookie = &reply->lock_extent; /* FIXME bug 267 */
314                 cookielen = sizeof(reply->lock_extent);
315         }
316
317         /* If enqueue returned a blocked lock but the completion handler has
318          * already run, then it fixed up the resource and we don't need to do it
319          * again. */
320         if ((*flags) & LDLM_FL_LOCK_CHANGED) {
321                 int newmode = reply->lock_mode;
322                 LASSERT(!is_replay);
323                 if (newmode && newmode != lock->l_req_mode) {
324                         LDLM_DEBUG(lock, "server returned different mode %s",
325                                    ldlm_lockname[newmode]);
326                         lock->l_req_mode = newmode;
327                 }
328
329                 if (reply->lock_resource_name.name[0] !=
330                     lock->l_resource->lr_name.name[0]) {
331                         CDEBUG(D_INFO, "remote intent success, locking %ld "
332                                "instead of %ld\n",
333                                (long)reply->lock_resource_name.name[0],
334                                (long)lock->l_resource->lr_name.name[0]);
335
336                         ldlm_lock_change_resource(ns, lock,
337                                                   reply->lock_resource_name);
338                         if (lock->l_resource == NULL) {
339                                 LBUG();
340                                 GOTO(out_req, rc = -ENOMEM);
341                         }
342                         LDLM_DEBUG(lock, "client-side enqueue, new resource");
343                 }
344         }
345
346         if (!is_replay) {
347                 l_lock(&ns->ns_lock);
348                 lock->l_completion_ast = NULL;
349                 rc = ldlm_lock_enqueue(ns, &lock, cookie, cookielen, flags,
350                                        completion);
351                 l_unlock(&ns->ns_lock);
352                 if (lock->l_completion_ast)
353                         lock->l_completion_ast(lock, *flags, NULL);
354         }
355
356         LDLM_DEBUG(lock, "client-side enqueue END");
357         EXIT;
358  out_req:
359         if (!req_passed_in)
360                 ptlrpc_req_finished(req);
361  out:
362         LDLM_LOCK_PUT(lock);
363  out_nolock:
364         return rc;
365 }
366
367 int ldlm_cli_replay_enqueue(struct ldlm_lock *lock)
368 {
369         struct lustre_handle lockh;
370         struct ldlm_res_id junk;
371         int flags = LDLM_FL_REPLAY;
372         ldlm_lock2handle(lock, &lockh);
373         return ldlm_cli_enqueue(lock->l_connh, NULL, NULL, NULL, junk,
374                                 lock->l_resource->lr_type, NULL, 0, -1, &flags,
375                                 NULL, NULL, NULL, &lockh);
376 }
377
378 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
379                                   int *flags)
380 {
381         ENTRY;
382         if (lock->l_resource->lr_namespace->ns_client) {
383                 CERROR("Trying to cancel local lock\n");
384                 LBUG();
385         }
386         LDLM_DEBUG(lock, "client-side local convert");
387
388         ldlm_lock_convert(lock, new_mode, flags);
389         ldlm_reprocess_all(lock->l_resource);
390
391         LDLM_DEBUG(lock, "client-side local convert handler END");
392         LDLM_LOCK_PUT(lock);
393         RETURN(0);
394 }
395
396 /* FIXME: one of ldlm_cli_convert or the server side should reject attempted
397  * conversion of locks which are on the waiting or converting queue */
398 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
399 {
400         struct ldlm_request *body;
401         struct lustre_handle *connh;
402         struct ldlm_reply *reply;
403         struct ldlm_lock *lock;
404         struct ldlm_resource *res;
405         struct ptlrpc_request *req;
406         int rc, size = sizeof(*body);
407         ENTRY;
408
409         lock = ldlm_handle2lock(lockh);
410         if (!lock) {
411                 LBUG();
412                 RETURN(-EINVAL);
413         }
414         *flags = 0;
415         connh = lock->l_connh;
416
417         if (!connh)
418                 RETURN(ldlm_cli_convert_local(lock, new_mode, flags));
419
420         LDLM_DEBUG(lock, "client-side convert");
421
422         req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_CONVERT, 1, &size,
423                               NULL);
424         if (!req)
425                 GOTO(out, rc = -ENOMEM);
426
427         body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
428         memcpy(&body->lock_handle1, &lock->l_remote_handle,
429                sizeof(body->lock_handle1));
430
431         body->lock_desc.l_req_mode = new_mode;
432         body->lock_flags = *flags;
433
434         size = sizeof(*reply);
435         req->rq_replen = lustre_msg_size(1, &size);
436
437         rc = ptlrpc_queue_wait(req);
438         if (rc != ELDLM_OK)
439                 GOTO(out, rc);
440
441         reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
442                                    lustre_swab_ldlm_reply);
443         if (reply == NULL) {
444                 CERROR ("Can't unpack ldlm_reply\n");
445                 GOTO (out, rc = -EPROTO);
446         }
447
448         res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
449         if (res != NULL)
450                 ldlm_reprocess_all(res);
451         /* Go to sleep until the lock is granted. */
452         /* FIXME: or cancelled. */
453         if (lock->l_completion_ast)
454                 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC, NULL);
455         EXIT;
456  out:
457         LDLM_LOCK_PUT(lock);
458         ptlrpc_req_finished(req);
459         return rc;
460 }
461
462 int ldlm_cli_cancel(struct lustre_handle *lockh)
463 {
464         struct ptlrpc_request *req;
465         struct ldlm_lock *lock;
466         struct ldlm_request *body;
467         int rc = 0, size = sizeof(*body);
468         ENTRY;
469
470         /* concurrent cancels on the same handle can happen */
471         lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);
472         if (lock == NULL)
473                 RETURN(0);
474
475         if (lock->l_connh) {
476                 int local_only;
477                 struct obd_import *imp;
478
479                 LDLM_DEBUG(lock, "client-side cancel");
480                 /* Set this flag to prevent others from getting new references*/
481                 l_lock(&lock->l_resource->lr_namespace->ns_lock);
482                 lock->l_flags |= LDLM_FL_CBPENDING;
483                 local_only = (lock->l_flags & LDLM_FL_LOCAL_ONLY);
484                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
485                 ldlm_cancel_callback(lock);
486
487                 if (local_only) {
488                         CDEBUG(D_INFO, "not sending request (at caller's "
489                                "instruction)\n");
490                         goto local_cancel;
491                 }
492
493                 imp = class_conn2cliimp(lock->l_connh);
494                 if (imp == NULL || imp->imp_invalid) {
495                         CDEBUG(D_HA, "skipping cancel on invalid import %p\n",
496                                imp);
497                         goto local_cancel;
498                 }
499
500                 req = ptlrpc_prep_req(imp, LDLM_CANCEL, 1, &size, NULL);
501                 if (!req)
502                         GOTO(out, rc = -ENOMEM);
503
504                 /* XXX FIXME bug 249 */
505                 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
506                 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
507
508                 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
509                 memcpy(&body->lock_handle1, &lock->l_remote_handle,
510                        sizeof(body->lock_handle1));
511
512                 req->rq_replen = lustre_msg_size(0, NULL);
513
514                 rc = ptlrpc_queue_wait(req);
515
516                 if (rc == ESTALE)
517                         CERROR("client/server (nid "LPU64") out of sync--not "
518                                "fatal\n",
519                                req->rq_import->imp_connection->c_peer.peer_nid);
520                 else if (rc != ELDLM_OK)
521                         CERROR("Got rc %d from cancel RPC: canceling "
522                                "anyway\n", rc);
523
524                 ptlrpc_req_finished(req);
525         local_cancel:
526                 ldlm_lock_cancel(lock);
527         } else {
528                 if (lock->l_resource->lr_namespace->ns_client) {
529                         LDLM_ERROR(lock, "Trying to cancel local lock\n");
530                         LBUG();
531                 }
532                 LDLM_DEBUG(lock, "client-side local cancel");
533                 ldlm_lock_cancel(lock);
534                 ldlm_reprocess_all(lock->l_resource);
535                 LDLM_DEBUG(lock, "client-side local cancel handler END");
536         }
537
538         EXIT;
539  out:
540         LDLM_LOCK_PUT(lock);
541         return rc;
542 }
543
544 int ldlm_cancel_lru(struct ldlm_namespace *ns)
545 {
546         struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
547         int count, rc = 0;
548         struct ldlm_ast_work *w;
549         ENTRY;
550
551         l_lock(&ns->ns_lock);
552         count = ns->ns_nr_unused - ns->ns_max_unused;
553
554         if (count <= 0) {
555                 l_unlock(&ns->ns_lock);
556                 RETURN(0);
557         }
558
559         list_for_each_safe(tmp, next, &ns->ns_unused_list) {
560                 struct ldlm_lock *lock;
561                 lock = list_entry(tmp, struct ldlm_lock, l_lru);
562
563                 LASSERT(!lock->l_readers && !lock->l_writers);
564
565                 /* Setting the CBPENDING flag is a little misleading, but
566                  * prevents an important race; namely, once CBPENDING is set,
567                  * the lock can accumulate no more readers/writers.  Since
568                  * readers and writers are already zero here, ldlm_lock_decref
569                  * won't see this flag and call l_blocking_ast */
570                 lock->l_flags |= LDLM_FL_CBPENDING;
571
572                 OBD_ALLOC(w, sizeof(*w));
573                 LASSERT(w);
574
575                 w->w_lock = LDLM_LOCK_GET(lock);
576                 list_add(&w->w_list, &list);
577                 ldlm_lock_remove_from_lru(lock);
578
579                 if (--count == 0)
580                         break;
581         }
582         l_unlock(&ns->ns_lock);
583
584         list_for_each_safe(tmp, next, &list) {
585                 struct lustre_handle lockh;
586                 int rc;
587                 w = list_entry(tmp, struct ldlm_ast_work, w_list);
588
589                 ldlm_lock2handle(w->w_lock, &lockh);
590                 rc = ldlm_cli_cancel(&lockh);
591                 if (rc != ELDLM_OK)
592                         CDEBUG(D_INFO, "ldlm_cli_cancel: %d\n", rc);
593
594                 list_del(&w->w_list);
595                 LDLM_LOCK_PUT(w->w_lock);
596                 OBD_FREE(w, sizeof(*w));
597         }
598
599         RETURN(rc);
600 }
601
602 static int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
603                                            struct ldlm_res_id res_id, int flags,
604                                            void *opaque)
605 {
606         struct ldlm_resource *res;
607         struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
608         struct ldlm_ast_work *w;
609         ENTRY;
610
611         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
612         if (res == NULL) {
613                 /* This is not a problem. */
614                 CDEBUG(D_INFO, "No resource "LPU64"\n", res_id.name[0]);
615                 RETURN(0);
616         }
617
618         l_lock(&ns->ns_lock);
619         list_for_each(tmp, &res->lr_granted) {
620                 struct ldlm_lock *lock;
621                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
622
623                 if (opaque != NULL && lock->l_data != opaque) {
624                         LDLM_ERROR(lock, "data %p doesn't match opaque %p",
625                                    lock->l_data, opaque);
626                         //LBUG();
627                         continue;
628                 }
629
630                 if (lock->l_readers || lock->l_writers) {
631                         if (flags & LDLM_FL_WARN) {
632                                 LDLM_ERROR(lock, "lock in use");
633                                 //LBUG();
634                         }
635                         continue;
636                 }
637
638                 /* See CBPENDING comment in ldlm_cancel_lru */
639                 lock->l_flags |= LDLM_FL_CBPENDING;
640
641                 OBD_ALLOC(w, sizeof(*w));
642                 LASSERT(w);
643
644                 w->w_lock = LDLM_LOCK_GET(lock);
645
646                 /* Prevent the cancel callback from being called by setting
647                  * LDLM_FL_CANCEL in the lock.  Very sneaky. -p */
648                 if (flags & LDLM_FL_NO_CALLBACK)
649                         w->w_lock->l_flags |= LDLM_FL_CANCEL;
650
651                 list_add(&w->w_list, &list);
652         }
653         l_unlock(&ns->ns_lock);
654
655         list_for_each_safe(tmp, next, &list) {
656                 struct lustre_handle lockh;
657                 int rc;
658                 w = list_entry(tmp, struct ldlm_ast_work, w_list);
659
660                 if (flags & LDLM_FL_LOCAL_ONLY) {
661                         ldlm_lock_cancel(w->w_lock);
662                 } else {
663                         ldlm_lock2handle(w->w_lock, &lockh);
664                         rc = ldlm_cli_cancel(&lockh);
665                         if (rc != ELDLM_OK)
666                                 CERROR("ldlm_cli_cancel: %d\n", rc);
667                 }
668                 list_del(&w->w_list);
669                 LDLM_LOCK_PUT(w->w_lock);
670                 OBD_FREE(w, sizeof(*w));
671         }
672
673         ldlm_resource_putref(res);
674
675         RETURN(0);
676 }
677
678 /* Cancel all locks on a namespace (or a specific resource, if given)
679  * that have 0 readers/writers.
680  *
681  * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
682  * to notify the server.
683  * If flags & LDLM_FL_NO_CALLBACK, don't run the cancel callback.
684  * If flags & LDLM_FL_WARN, print a warning if some locks are still in use. */
685 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
686                            struct ldlm_res_id *res_id, int flags, void *opaque)
687 {
688         int i;
689         ENTRY;
690
691         if (ns == NULL)
692                 RETURN(ELDLM_OK);
693
694         if (res_id)
695                 RETURN(ldlm_cli_cancel_unused_resource(ns, *res_id, flags,
696                                                        opaque));
697
698         l_lock(&ns->ns_lock);
699         for (i = 0; i < RES_HASH_SIZE; i++) {
700                 struct list_head *tmp, *pos;
701                 list_for_each_safe(tmp, pos, &(ns->ns_hash[i])) {
702                         int rc;
703                         struct ldlm_resource *res;
704                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
705                         ldlm_resource_getref(res);
706
707                         rc = ldlm_cli_cancel_unused_resource(ns, res->lr_name,
708                                                              flags, opaque);
709
710                         if (rc)
711                                 CERROR("cancel_unused_res ("LPU64"): %d\n",
712                                        res->lr_name.name[0], rc);
713                         ldlm_resource_putref(res);
714                 }
715         }
716         l_unlock(&ns->ns_lock);
717
718         RETURN(ELDLM_OK);
719 }
720
721 /* Lock iterators. */
722
723 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
724                           void *closure)
725 {
726         struct list_head *tmp, *next;
727         struct ldlm_lock *lock;
728         int rc = LDLM_ITER_CONTINUE;
729         struct ldlm_namespace *ns = res->lr_namespace;
730
731         ENTRY;
732
733         if (!res)
734                 RETURN(LDLM_ITER_CONTINUE);
735
736         l_lock(&ns->ns_lock);
737         list_for_each_safe(tmp, next, &res->lr_granted) {
738                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
739
740                 if (iter(lock, closure) == LDLM_ITER_STOP)
741                         GOTO(out, rc = LDLM_ITER_STOP);
742         }
743
744         list_for_each_safe(tmp, next, &res->lr_converting) {
745                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
746
747                 if (iter(lock, closure) == LDLM_ITER_STOP)
748                         GOTO(out, rc = LDLM_ITER_STOP);
749         }
750
751         list_for_each_safe(tmp, next, &res->lr_waiting) {
752                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
753
754                 if (iter(lock, closure) == LDLM_ITER_STOP)
755                         GOTO(out, rc = LDLM_ITER_STOP);
756         }
757  out:
758         l_unlock(&ns->ns_lock);
759         RETURN(rc);
760 }
761
762 struct iter_helper_data {
763         ldlm_iterator_t iter;
764         void *closure;
765 };
766
767 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
768 {
769         struct iter_helper_data *helper = closure;
770         return helper->iter(lock, helper->closure);
771 }
772
773 static int ldlm_res_iter_helper(struct ldlm_resource *res, void *closure)
774 {
775         return ldlm_resource_foreach(res, ldlm_iter_helper, closure);
776 }
777
778 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
779                            void *closure)
780 {
781         struct iter_helper_data helper = { iter: iter, closure: closure };
782         return ldlm_namespace_foreach_res(ns, ldlm_res_iter_helper, &helper);
783 }
784
785 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
786                                ldlm_res_iterator_t iter, void *closure)
787 {
788         int i, rc = LDLM_ITER_CONTINUE;
789
790         l_lock(&ns->ns_lock);
791         for (i = 0; i < RES_HASH_SIZE; i++) {
792                 struct list_head *tmp, *next;
793                 list_for_each_safe(tmp, next, &(ns->ns_hash[i])) {
794                         struct ldlm_resource *res =
795                                 list_entry(tmp, struct ldlm_resource, lr_hash);
796
797                         ldlm_resource_getref(res);
798                         rc = iter(res, closure);
799                         ldlm_resource_putref(res);
800                         if (rc == LDLM_ITER_STOP)
801                                 GOTO(out, rc);
802                 }
803         }
804  out:
805         l_unlock(&ns->ns_lock);
806         RETURN(rc);
807 }
808
809 /* non-blocking function to manipulate a lock whose cb_data is being put away.*/
810 void ldlm_change_cbdata(struct ldlm_namespace *ns, 
811                        struct ldlm_res_id *res_id, 
812                        ldlm_iterator_t iter,
813                        void *data)
814 {
815         struct ldlm_resource *res;
816         int rc = 0;
817         ENTRY;
818
819         if (ns == NULL) {
820                 CERROR("must pass in namespace");
821                 LBUG();
822         }
823
824         res = ldlm_resource_get(ns, NULL, *res_id, 0, 0);
825         if (res == NULL) {
826                 EXIT;
827                 return;
828         }
829
830         l_lock(&ns->ns_lock);
831         rc = ldlm_resource_foreach(res, iter, data);
832         l_unlock(&ns->ns_lock);
833         ldlm_resource_putref(res);
834         EXIT;
835 }
836
837 /* Lock replay */
838
839 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
840 {
841         struct list_head *list = closure;
842
843         /* we use l_pending_chain here, because it's unused on clients. */
844         list_add(&lock->l_pending_chain, list);
845         return LDLM_ITER_CONTINUE;
846 }
847
848 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
849 {
850         struct ptlrpc_request *req;
851         struct ldlm_request *body;
852         struct ldlm_reply *reply;
853         int rc, size;
854         int flags;
855
856         /*
857          * If granted mode matches the requested mode, this lock is granted.
858          *
859          * If they differ, but we have a granted mode, then we were granted
860          * one mode and now want another: ergo, converting.
861          *
862          * If we haven't been granted anything and are on a resource list,
863          * then we're blocked/waiting.
864          *
865          * If we haven't been granted anything and we're NOT on a resource list,
866          * then we haven't got a reply yet and don't have a known disposition.
867          * This happens whenever a lock enqueue is the request that triggers
868          * recovery.
869          */
870         if (lock->l_granted_mode == lock->l_req_mode)
871                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
872         else if (lock->l_granted_mode)
873                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
874         else if (!list_empty(&lock->l_res_link))
875                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
876         else
877                 flags = LDLM_FL_REPLAY;
878
879         size = sizeof(*body);
880         req = ptlrpc_prep_req(imp, LDLM_ENQUEUE, 1, &size, NULL);
881         if (!req)
882                 RETURN(-ENOMEM);
883
884         /* We're part of recovery, so don't wait for it. */
885         req->rq_level = LUSTRE_CONN_RECOVER;
886
887         body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
888         ldlm_lock2desc(lock, &body->lock_desc);
889         body->lock_flags = flags;
890
891         ldlm_lock2handle(lock, &body->lock_handle1);
892         size = sizeof(*reply);
893         req->rq_replen = lustre_msg_size(1, &size);
894
895         LDLM_DEBUG(lock, "replaying lock:");
896         rc = ptlrpc_queue_wait(req);
897         if (rc != ELDLM_OK)
898                 GOTO(out, rc);
899
900         reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
901                                    lustre_swab_ldlm_reply);
902         if (reply == NULL) {
903                 CERROR("Can't unpack ldlm_reply\n");
904                 GOTO (out, rc = -EPROTO);
905         }
906
907         memcpy(&lock->l_remote_handle, &reply->lock_handle,
908                sizeof(lock->l_remote_handle));
909         LDLM_DEBUG(lock, "replayed lock:");
910  out:
911         ptlrpc_req_finished(req);
912         RETURN(rc);
913 }
914
915 int ldlm_replay_locks(struct obd_import *imp)
916 {
917         struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
918         struct list_head list, *pos, *next;
919         struct ldlm_lock *lock;
920         int rc = 0;
921
922         ENTRY;
923         INIT_LIST_HEAD(&list);
924
925         l_lock(&ns->ns_lock);
926         (void)ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
927
928         list_for_each_safe(pos, next, &list) {
929                 lock = list_entry(pos, struct ldlm_lock, l_pending_chain);
930                 rc = replay_one_lock(imp, lock);
931                 if (rc)
932                         break; /* or try to do the rest? */
933         }
934         l_unlock(&ns->ns_lock);
935         RETURN(rc);
936 }