Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of the Lustre file system, http://www.lustre.org
7  *   Lustre is a trademark of Cluster File Systems, Inc.
8  *
9  *   You may have signed or agreed to another license before downloading
10  *   this software.  If so, you are bound by the terms and conditions
11  *   of that agreement, and the following does not apply to you.  See the
12  *   LICENSE file included with this distribution for more information.
13  *
14  *   If you did not agree to a different license, then this copy of Lustre
15  *   is open source software; you can redistribute it and/or modify it
16  *   under the terms of version 2 of the GNU General Public License as
17  *   published by the Free Software Foundation.
18  *
19  *   In either case, Lustre is distributed in the hope that it will be
20  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  *   license text for more details.
23  */
24
25 #define DEBUG_SUBSYSTEM S_LDLM
26 #ifndef __KERNEL__
27 #include <signal.h>
28 #include <liblustre.h>
29 #endif
30
31 #include <lustre_dlm.h>
32 #include <obd_class.h>
33 #include <obd.h>
34
35 #include "ldlm_internal.h"
36
37 static void interrupted_completion_wait(void *data)
38 {
39 }
40
41 struct lock_wait_data {
42         struct ldlm_lock *lwd_lock;
43         __u32             lwd_conn_cnt;
44 };
45
46 struct ldlm_async_args {
47         struct lustre_handle lock_handle;
48 };
49
50 int ldlm_expired_completion_wait(void *data)
51 {
52         struct lock_wait_data *lwd = data;
53         struct ldlm_lock *lock = lwd->lwd_lock;
54         struct obd_import *imp;
55         struct obd_device *obd;
56
57         ENTRY;
58         if (lock->l_conn_export == NULL) {
59                 static cfs_time_t next_dump = 0, last_dump = 0;
60
61                 if (ptlrpc_check_suspend())
62                         RETURN(0);
63
64                 LDLM_ERROR(lock, "lock timed out (enqueued at %lu, %lus ago); "
65                            "not entering recovery in server code, just going "
66                            "back to sleep", lock->l_enqueued_time.tv_sec,
67                            CURRENT_SECONDS - lock->l_enqueued_time.tv_sec);
68                 if (cfs_time_after(cfs_time_current(), next_dump)) {
69                         last_dump = next_dump;
70                         next_dump = cfs_time_shift(300);
71                         ldlm_namespace_dump(D_DLMTRACE,
72                                             lock->l_resource->lr_namespace);
73                         if (last_dump == 0)
74                                 libcfs_debug_dumplog();
75                 }
76                 RETURN(0);
77         }
78
79         obd = lock->l_conn_export->exp_obd;
80         imp = obd->u.cli.cl_import;
81         ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
82         LDLM_ERROR(lock, "lock timed out (enqueued at %lu, %lus ago), entering "
83                    "recovery for %s@%s", lock->l_enqueued_time.tv_sec,
84                    CURRENT_SECONDS - lock->l_enqueued_time.tv_sec,
85                    obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
86
87         RETURN(0);
88 }
89
90 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data)
91 {
92         /* XXX ALLOCATE - 160 bytes */
93         struct lock_wait_data lwd;
94         struct obd_device *obd;
95         struct obd_import *imp = NULL;
96         struct l_wait_info lwi;
97         int rc = 0;
98         ENTRY;
99
100         if (flags == LDLM_FL_WAIT_NOREPROC) {
101                 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
102                 goto noreproc;
103         }
104
105         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
106                        LDLM_FL_BLOCK_CONV))) {
107                 cfs_waitq_signal(&lock->l_waitq);
108                 RETURN(0);
109         }
110
111         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
112                    "sleeping");
113         ldlm_lock_dump(D_OTHER, lock, 0);
114         ldlm_reprocess_all(lock->l_resource);
115
116 noreproc:
117
118         obd = class_exp2obd(lock->l_conn_export);
119
120         /* if this is a local lock, then there is no import */
121         if (obd != NULL)
122                 imp = obd->u.cli.cl_import;
123
124         lwd.lwd_lock = lock;
125
126         if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
127                 LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
128                 lwi = LWI_INTR(interrupted_completion_wait, &lwd);
129         } else {
130                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
131                                        ldlm_expired_completion_wait,
132                                        interrupted_completion_wait, &lwd);
133         }
134
135         if (imp != NULL) {
136                 spin_lock(&imp->imp_lock);
137                 lwd.lwd_conn_cnt = imp->imp_conn_cnt;
138                 spin_unlock(&imp->imp_lock);
139         }
140
141         /* Go to sleep until the lock is granted or cancelled. */
142         rc = l_wait_event(lock->l_waitq,
143                           ((lock->l_req_mode == lock->l_granted_mode) ||
144                            (lock->l_flags & LDLM_FL_FAILED)), &lwi);
145
146         if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
147                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
148                 RETURN(-EIO);
149         }
150
151         if (rc) {
152                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
153                            rc);
154                 RETURN(rc);
155         }
156
157         LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
158         RETURN(0);
159 }
160
161 /*
162  * ->l_blocking_ast() callback for LDLM locks acquired by server-side OBDs.
163  */
164 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
165                       void *data, int flag)
166 {
167         int do_ast;
168         ENTRY;
169
170         if (flag == LDLM_CB_CANCELING) {
171                 /* Don't need to do anything here. */
172                 RETURN(0);
173         }
174
175         lock_res_and_lock(lock);
176         /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
177          * that ldlm_blocking_ast is called just before intent_policy method
178          * takes the ns_lock, then by the time we get the lock, we might not
179          * be the correct blocking function anymore.  So check, and return
180          * early, if so. */
181         if (lock->l_blocking_ast != ldlm_blocking_ast) {
182                 unlock_res_and_lock(lock);
183                 RETURN(0);
184         }
185
186         lock->l_flags |= LDLM_FL_CBPENDING;
187         do_ast = (!lock->l_readers && !lock->l_writers);
188         unlock_res_and_lock(lock);
189
190         if (do_ast) {
191                 struct lustre_handle lockh;
192                 int rc;
193
194                 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
195                 ldlm_lock2handle(lock, &lockh);
196                 rc = ldlm_cli_cancel(&lockh);
197                 if (rc < 0)
198                         CERROR("ldlm_cli_cancel: %d\n", rc);
199         } else {
200                 LDLM_DEBUG(lock, "Lock still has references, will be "
201                            "cancelled later");
202         }
203         RETURN(0);
204 }
205
206 /*
207  * ->l_glimpse_ast() for DLM extent locks acquired on the server-side. See
208  * comment in filter_intent_policy() on why you may need this.
209  */
210 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp)
211 {
212         /*
213          * Returning -ELDLM_NO_LOCK_DATA actually works, but the reason for
214          * that is rather subtle: with OST-side locking, it may so happen that
215          * _all_ extent locks are held by the OST. If client wants to obtain
216          * current file size it calls ll{,u}_glimpse_size(), and (as locks are
217          * on the server), dummy glimpse callback fires and does
218          * nothing. Client still receives correct file size due to the
219          * following fragment in filter_intent_policy():
220          *
221          * rc = l->l_glimpse_ast(l, NULL); // this will update the LVB
222          * if (rc != 0 && res->lr_namespace->ns_lvbo &&
223          *     res->lr_namespace->ns_lvbo->lvbo_update) {
224          *         res->lr_namespace->ns_lvbo->lvbo_update(res, NULL, 0, 1);
225          * }
226          *
227          * that is, after glimpse_ast() fails, filter_lvbo_update() runs, and
228          * returns correct file size to the client.
229          */
230         return -ELDLM_NO_LOCK_DATA;
231 }
232
233 int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
234                            const struct ldlm_res_id *res_id,
235                            ldlm_type_t type, ldlm_policy_data_t *policy,
236                            ldlm_mode_t mode, int *flags,
237                            ldlm_blocking_callback blocking,
238                            ldlm_completion_callback completion,
239                            ldlm_glimpse_callback glimpse,
240                            void *data, __u32 lvb_len, void *lvb_swabber,
241                            struct lustre_handle *lockh)
242 {
243         struct ldlm_lock *lock;
244         int err;
245         ENTRY;
246
247         LASSERT(!(*flags & LDLM_FL_REPLAY));
248         if (unlikely(ns_is_client(ns))) {
249                 CERROR("Trying to enqueue local lock in a shadow namespace\n");
250                 LBUG();
251         }
252
253         lock = ldlm_lock_create(ns, res_id, type, mode, blocking,
254                                 completion, glimpse, data, lvb_len);
255         if (unlikely(!lock))
256                 GOTO(out_nolock, err = -ENOMEM);
257         LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
258
259         ldlm_lock_addref_internal(lock, mode);
260         ldlm_lock2handle(lock, lockh);
261         lock_res_and_lock(lock);
262         lock->l_flags |= LDLM_FL_LOCAL;
263         if (*flags & LDLM_FL_ATOMIC_CB)
264                 lock->l_flags |= LDLM_FL_ATOMIC_CB;
265         lock->l_lvb_swabber = lvb_swabber;
266         unlock_res_and_lock(lock);
267         if (policy != NULL)
268                 lock->l_policy_data = *policy;
269         if (type == LDLM_EXTENT)
270                 lock->l_req_extent = policy->l_extent;
271
272         err = ldlm_lock_enqueue(ns, &lock, policy, flags);
273         if (unlikely(err != ELDLM_OK))
274                 GOTO(out, err);
275
276         if (policy != NULL)
277                 *policy = lock->l_policy_data;
278
279         LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
280                           lock);
281
282         if (lock->l_completion_ast)
283                 lock->l_completion_ast(lock, *flags, NULL);
284
285         LDLM_DEBUG(lock, "client-side local enqueue END");
286         EXIT;
287  out:
288         LDLM_LOCK_PUT(lock);
289  out_nolock:
290         return err;
291 }
292
293 static void failed_lock_cleanup(struct ldlm_namespace *ns,
294                                 struct ldlm_lock *lock,
295                                 struct lustre_handle *lockh, int mode)
296 {
297         /* Set a flag to prevent us from sending a CANCEL (bug 407) */
298         lock_res_and_lock(lock);
299         lock->l_flags |= LDLM_FL_LOCAL_ONLY;
300         unlock_res_and_lock(lock);
301         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
302
303         ldlm_lock_decref_and_cancel(lockh, mode);
304
305         /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
306          *       from llite/file.c/ll_file_flock(). */
307         if (lock->l_resource->lr_type == LDLM_FLOCK) {
308                 ldlm_lock_destroy(lock);
309         }
310 }
311
312 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
313                           ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
314                           int *flags, void *lvb, __u32 lvb_len,
315                           void *lvb_swabber, struct lustre_handle *lockh,int rc)
316 {
317         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
318         int is_replay = *flags & LDLM_FL_REPLAY;
319         struct ldlm_lock *lock;
320         struct ldlm_reply *reply;
321         int cleanup_phase = 1;
322         ENTRY;
323
324         lock = ldlm_handle2lock(lockh);
325         /* ldlm_cli_enqueue is holding a reference on this lock. */
326         if (!lock) {
327                 LASSERT(type == LDLM_FLOCK);
328                 RETURN(-ENOLCK);
329         }
330
331         if (rc != ELDLM_OK) {
332                 LASSERT(!is_replay);
333                 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
334                            rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
335                 if (rc == ELDLM_LOCK_ABORTED) {
336                         /* Before we return, swab the reply */
337                         reply = lustre_swab_repbuf(req, DLM_LOCKREPLY_OFF,
338                                                    sizeof(*reply),
339                                                    lustre_swab_ldlm_reply);
340                         if (reply == NULL) {
341                                 CERROR("Can't unpack ldlm_reply\n");
342                                 rc = -EPROTO;
343                         }
344                         if (lvb_len) {
345                                 void *tmplvb;
346                                 tmplvb = lustre_swab_repbuf(req,
347                                                             DLM_REPLY_REC_OFF,
348                                                             lvb_len,
349                                                             lvb_swabber);
350                                 if (tmplvb == NULL)
351                                         GOTO(cleanup, rc = -EPROTO);
352                                 if (lvb != NULL)
353                                         memcpy(lvb, tmplvb, lvb_len);
354                         }
355                 }
356                 GOTO(cleanup, rc);
357         }
358
359         reply = lustre_swab_repbuf(req, DLM_LOCKREPLY_OFF, sizeof(*reply),
360                                    lustre_swab_ldlm_reply);
361         if (reply == NULL) {
362                 CERROR("Can't unpack ldlm_reply\n");
363                 GOTO(cleanup, rc = -EPROTO);
364         }
365
366         /* lock enqueued on the server */
367         cleanup_phase = 0;
368
369         lock_res_and_lock(lock);
370         lock->l_remote_handle = reply->lock_handle;
371         *flags = reply->lock_flags;
372         lock->l_flags |= reply->lock_flags & LDLM_INHERIT_FLAGS;
373         /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
374          * to wait with no timeout as well */
375         lock->l_flags |= reply->lock_flags & LDLM_FL_NO_TIMEOUT;
376         unlock_res_and_lock(lock);
377
378         CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: 0x%x\n",
379                lock, reply->lock_handle.cookie, *flags);
380
381         /* If enqueue returned a blocked lock but the completion handler has
382          * already run, then it fixed up the resource and we don't need to do it
383          * again. */
384         if ((*flags) & LDLM_FL_LOCK_CHANGED) {
385                 int newmode = reply->lock_desc.l_req_mode;
386                 LASSERT(!is_replay);
387                 if (newmode && newmode != lock->l_req_mode) {
388                         LDLM_DEBUG(lock, "server returned different mode %s",
389                                    ldlm_lockname[newmode]);
390                         lock->l_req_mode = newmode;
391                 }
392
393                 if (memcmp(reply->lock_desc.l_resource.lr_name.name,
394                           lock->l_resource->lr_name.name,
395                           sizeof(struct ldlm_res_id))) {
396                         CDEBUG(D_INFO, "remote intent success, locking "
397                                         "(%ld,%ld,%ld) instead of "
398                                         "(%ld,%ld,%ld)\n",
399                               (long)reply->lock_desc.l_resource.lr_name.name[0],
400                               (long)reply->lock_desc.l_resource.lr_name.name[1],
401                               (long)reply->lock_desc.l_resource.lr_name.name[2],
402                               (long)lock->l_resource->lr_name.name[0],
403                               (long)lock->l_resource->lr_name.name[1],
404                               (long)lock->l_resource->lr_name.name[2]);
405
406                         ldlm_lock_change_resource(ns, lock,
407                                           &reply->lock_desc.l_resource.lr_name);
408                         if (lock->l_resource == NULL) {
409                                 LBUG();
410                                 GOTO(cleanup, rc = -ENOMEM);
411                         }
412                         LDLM_DEBUG(lock, "client-side enqueue, new resource");
413                 }
414                 if (with_policy)
415                         if (!(type == LDLM_IBITS && !(exp->exp_connect_flags &
416                                                     OBD_CONNECT_IBITS)))
417                                 lock->l_policy_data =
418                                                  reply->lock_desc.l_policy_data;
419                 if (type != LDLM_PLAIN)
420                         LDLM_DEBUG(lock,"client-side enqueue, new policy data");
421         }
422
423         if ((*flags) & LDLM_FL_AST_SENT ||
424             /* Cancel extent locks as soon as possible on a liblustre client,
425              * because it cannot handle asynchronous ASTs robustly (see
426              * bug 7311). */
427             (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
428                 lock_res_and_lock(lock);
429                 lock->l_flags |= LDLM_FL_CBPENDING |  LDLM_FL_BL_AST;
430                 unlock_res_and_lock(lock);
431                 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
432         }
433
434         /* If the lock has already been granted by a completion AST, don't
435          * clobber the LVB with an older one. */
436         if (lvb_len && (lock->l_req_mode != lock->l_granted_mode)) {
437                 void *tmplvb;
438                 tmplvb = lustre_swab_repbuf(req, DLM_REPLY_REC_OFF, lvb_len,
439                                             lvb_swabber);
440                 if (tmplvb == NULL)
441                         GOTO(cleanup, rc = -EPROTO);
442                 memcpy(lock->l_lvb_data, tmplvb, lvb_len);
443         }
444
445         if (!is_replay) {
446                 rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
447                 if (lock->l_completion_ast != NULL) {
448                         int err = lock->l_completion_ast(lock, *flags, NULL);
449                         if (!rc)
450                                 rc = err;
451                         if (rc && type != LDLM_FLOCK) /* bug 9425, bug 10250 */
452                                 cleanup_phase = 1;
453                 }
454         }
455
456         if (lvb_len && lvb != NULL) {
457                 /* Copy the LVB here, and not earlier, because the completion
458                  * AST (if any) can override what we got in the reply */
459                 memcpy(lvb, lock->l_lvb_data, lvb_len);
460         }
461
462         LDLM_DEBUG(lock, "client-side enqueue END");
463         EXIT;
464 cleanup:
465         if (cleanup_phase == 1 && rc)
466                 failed_lock_cleanup(ns, lock, lockh, mode);
467         /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
468         LDLM_LOCK_PUT(lock);
469         LDLM_LOCK_PUT(lock);
470         return rc;
471 }
472
473 /* PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
474  * a single page on the send/receive side. XXX: 512 should be changed
475  * to more adequate value. */
476 static inline int ldlm_req_handles_avail(struct obd_export *exp,
477                                          int *size, int bufcount, int off)
478 {
479         int avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512);
480         int old_size = size[DLM_LOCKREQ_OFF];
481
482         size[DLM_LOCKREQ_OFF] = sizeof(struct ldlm_request);
483         avail -= lustre_msg_size(class_exp2cliimp(exp)->imp_msg_magic,
484                                  bufcount, size);
485         avail /= sizeof(struct lustre_handle);
486         avail += LDLM_LOCKREQ_HANDLES - off;
487         size[DLM_LOCKREQ_OFF] = old_size;
488
489         return avail;
490 }
491
492 static inline int ldlm_cancel_handles_avail(struct obd_export *exp)
493 {
494         int size[2] = { sizeof(struct ptlrpc_body),
495                         sizeof(struct ldlm_request) };
496         return ldlm_req_handles_avail(exp, size, 2, 0);
497 }
498
499 /* Cancel lru locks and pack them into the enqueue request. Pack there the given
500  * @count locks in @cancels. */
501 struct ptlrpc_request *ldlm_prep_enqueue_req(struct obd_export *exp,
502                                              int bufcount, int *size,
503                                              struct list_head *cancels,
504                                              int count)
505 {
506         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
507         struct ldlm_request *dlm = NULL;
508         struct ptlrpc_request *req;
509         CFS_LIST_HEAD(head);
510         ENTRY;
511
512         if (cancels == NULL)
513                 cancels = &head;
514         if (exp_connect_cancelset(exp)) {
515                 /* Estimate the amount of available space in the request. */
516                 int avail = ldlm_req_handles_avail(exp, size, bufcount,
517                                                    LDLM_ENQUEUE_CANCEL_OFF);
518                 int flags, cancel;
519
520                 LASSERT(avail >= count);
521
522                 flags = ns_connect_lru_resize(ns) ? 
523                         LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
524                 cancel = ns_connect_lru_resize(ns) ? 0 : 1;
525
526                 /* Cancel lru locks here _only_ if the server supports 
527                  * EARLY_CANCEL. Otherwise we have to send extra CANCEL
528                  * rpc right on enqueue, what will make it slower, vs. 
529                  * asynchronous rpc in blocking thread. */
530                 count += ldlm_cancel_lru_local(ns, cancels, cancel,
531                                                avail - count, 0, flags);
532                 size[DLM_LOCKREQ_OFF] =
533                         ldlm_request_bufsize(count, LDLM_ENQUEUE);
534         }
535         req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
536                               LDLM_ENQUEUE, bufcount, size, NULL);
537         if (exp_connect_cancelset(exp) && req) {
538                 dlm = lustre_msg_buf(req->rq_reqmsg,
539                                      DLM_LOCKREQ_OFF, sizeof(*dlm));
540                 /* Skip first lock handler in ldlm_request_pack(), this method
541                  * will incrment @lock_count according to the lock handle amount
542                  * actually written to the buffer. */
543                 dlm->lock_count = LDLM_ENQUEUE_CANCEL_OFF;
544                 ldlm_cli_cancel_list(cancels, count, req, DLM_LOCKREQ_OFF, 0);
545         } else {
546                 ldlm_lock_list_put(cancels, l_bl_ast, count);
547         }
548         RETURN(req);
549 }
550
551 /* If a request has some specific initialisation it is passed in @reqp,
552  * otherwise it is created in ldlm_cli_enqueue.
553  *
554  * Supports sync and async requests, pass @async flag accordingly. If a
555  * request was created in ldlm_cli_enqueue and it is the async request,
556  * pass it to the caller in @reqp. */
557 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
558                      struct ldlm_enqueue_info *einfo,
559                      const struct ldlm_res_id *res_id,
560                      ldlm_policy_data_t *policy, int *flags,
561                      void *lvb, __u32 lvb_len, void *lvb_swabber,
562                      struct lustre_handle *lockh, int async)
563 {
564         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
565         struct ldlm_lock *lock;
566         struct ldlm_request *body;
567         struct ldlm_reply *reply;
568         int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
569                         [DLM_LOCKREQ_OFF]     = sizeof(*body),
570                         [DLM_REPLY_REC_OFF]   = lvb_len };
571         int is_replay = *flags & LDLM_FL_REPLAY;
572         int req_passed_in = 1, rc, err;
573         struct ptlrpc_request *req;
574         ENTRY;
575
576         LASSERT(exp != NULL);
577
578         /* If we're replaying this lock, just check some invariants.
579          * If we're creating a new lock, get everything all setup nice. */
580         if (is_replay) {
581                 lock = ldlm_handle2lock(lockh);
582                 LASSERT(lock != NULL);
583                 LDLM_DEBUG(lock, "client-side enqueue START");
584                 LASSERT(exp == lock->l_conn_export);
585         } else {
586                 lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
587                                         einfo->ei_mode, einfo->ei_cb_bl,
588                                         einfo->ei_cb_cp, einfo->ei_cb_gl,
589                                         einfo->ei_cbdata, lvb_len);
590                 if (lock == NULL)
591                         RETURN(-ENOMEM);
592                 /* for the local lock, add the reference */
593                 ldlm_lock_addref_internal(lock, einfo->ei_mode);
594                 ldlm_lock2handle(lock, lockh);
595                 lock->l_lvb_swabber = lvb_swabber;
596                 if (policy != NULL) {
597                         /* INODEBITS_INTEROP: If the server does not support
598                          * inodebits, we will request a plain lock in the
599                          * descriptor (ldlm_lock2desc() below) but use an
600                          * inodebits lock internally with both bits set.
601                          */
602                         if (einfo->ei_type == LDLM_IBITS &&
603                             !(exp->exp_connect_flags & OBD_CONNECT_IBITS))
604                                 lock->l_policy_data.l_inodebits.bits =
605                                         MDS_INODELOCK_LOOKUP |
606                                         MDS_INODELOCK_UPDATE;
607                         else
608                                 lock->l_policy_data = *policy;
609                 }
610
611                 if (einfo->ei_type == LDLM_EXTENT)
612                         lock->l_req_extent = policy->l_extent;
613                 LDLM_DEBUG(lock, "client-side enqueue START");
614         }
615
616         /* lock not sent to server yet */
617
618         if (reqp == NULL || *reqp == NULL) {
619                 req = ldlm_prep_enqueue_req(exp, 2, size, NULL, 0);
620                 if (req == NULL) {
621                         failed_lock_cleanup(ns, lock, lockh, einfo->ei_mode);
622                         LDLM_LOCK_PUT(lock);
623                         RETURN(-ENOMEM);
624                 }
625                 req_passed_in = 0;
626                 if (reqp)
627                         *reqp = req;
628         } else {
629                 req = *reqp;
630                 LASSERTF(lustre_msg_buflen(req->rq_reqmsg, DLM_LOCKREQ_OFF) >=
631                          sizeof(*body), "buflen[%d] = %d, not "LPSZ"\n",
632                          DLM_LOCKREQ_OFF,
633                          lustre_msg_buflen(req->rq_reqmsg, DLM_LOCKREQ_OFF),
634                          sizeof(*body));
635         }
636
637         lock->l_conn_export = exp;
638         lock->l_export = NULL;
639         lock->l_blocking_ast = einfo->ei_cb_bl;
640
641         /* Dump lock data into the request buffer */
642         body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
643         ldlm_lock2desc(lock, &body->lock_desc);
644         body->lock_flags = *flags;
645         body->lock_handle[0] = *lockh;
646
647         /* Continue as normal. */
648         if (!req_passed_in) {
649                 size[DLM_LOCKREPLY_OFF] = sizeof(*reply);
650                 ptlrpc_req_set_repsize(req, 2 + (lvb_len > 0), size);
651         }
652
653         /*
654          * Liblustre client doesn't get extent locks, except for O_APPEND case
655          * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where
656          * [i_size, OBD_OBJECT_EOF] lock is taken.
657          */
658         LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT ||
659                      policy->l_extent.end == OBD_OBJECT_EOF));
660
661         if (async) {
662                 LASSERT(reqp != NULL);
663                 RETURN(0);
664         }
665
666         LDLM_DEBUG(lock, "sending request");
667         rc = ptlrpc_queue_wait(req);
668         err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
669                                     einfo->ei_mode, flags, lvb, lvb_len,
670                                     lvb_swabber, lockh, rc);
671
672         /* If ldlm_cli_enqueue_fini did not find the lock, we need to free
673          * one reference that we took */
674         if (err == -ENOLCK)
675                 LDLM_LOCK_PUT(lock);
676         else
677                 rc = err;
678
679         if (!req_passed_in && req != NULL) {
680                 ptlrpc_req_finished(req);
681                 if (reqp)
682                         *reqp = NULL;
683         }
684
685         RETURN(rc);
686 }
687
688 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
689                                   int *flags)
690 {
691         struct ldlm_resource *res;
692         int rc;
693         ENTRY;
694         if (ns_is_client(lock->l_resource->lr_namespace)) {
695                 CERROR("Trying to cancel local lock\n");
696                 LBUG();
697         }
698         LDLM_DEBUG(lock, "client-side local convert");
699
700         res = ldlm_lock_convert(lock, new_mode, flags);
701         if (res) {
702                 ldlm_reprocess_all(res);
703                 rc = 0;
704         } else {
705                 rc = EDEADLOCK;
706         }
707         LDLM_DEBUG(lock, "client-side local convert handler END");
708         LDLM_LOCK_PUT(lock);
709         RETURN(rc);
710 }
711
712 /* FIXME: one of ldlm_cli_convert or the server side should reject attempted
713  * conversion of locks which are on the waiting or converting queue */
714 /* Caller of this code is supposed to take care of lock readers/writers
715    accounting */
716 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
717 {
718         struct ldlm_request *body;
719         struct ldlm_reply *reply;
720         struct ldlm_lock *lock;
721         struct ldlm_resource *res;
722         struct ptlrpc_request *req;
723         int size[2] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
724                         [DLM_LOCKREQ_OFF]     = sizeof(*body) };
725         int rc;
726         ENTRY;
727
728         lock = ldlm_handle2lock(lockh);
729         if (!lock) {
730                 LBUG();
731                 RETURN(-EINVAL);
732         }
733         *flags = 0;
734
735         if (lock->l_conn_export == NULL)
736                 RETURN(ldlm_cli_convert_local(lock, new_mode, flags));
737
738         LDLM_DEBUG(lock, "client-side convert");
739
740         req = ptlrpc_prep_req(class_exp2cliimp(lock->l_conn_export),
741                               LUSTRE_DLM_VERSION, LDLM_CONVERT, 2, size, NULL);
742         if (!req)
743                 GOTO(out, rc = -ENOMEM);
744
745         body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
746         body->lock_handle[0] = lock->l_remote_handle;
747
748         body->lock_desc.l_req_mode = new_mode;
749         body->lock_flags = *flags;
750
751         size[DLM_LOCKREPLY_OFF] = sizeof(*reply);
752         ptlrpc_req_set_repsize(req, 2, size);
753
754         rc = ptlrpc_queue_wait(req);
755         if (rc != ELDLM_OK)
756                 GOTO(out, rc);
757
758         reply = lustre_swab_repbuf(req, DLM_LOCKREPLY_OFF, sizeof(*reply),
759                                    lustre_swab_ldlm_reply);
760         if (reply == NULL) {
761                 CERROR ("Can't unpack ldlm_reply\n");
762                 GOTO (out, rc = -EPROTO);
763         }
764
765         if (req->rq_status)
766                 GOTO(out, rc = req->rq_status);
767
768         res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
769         if (res != NULL) {
770                 ldlm_reprocess_all(res);
771                 /* Go to sleep until the lock is granted. */
772                 /* FIXME: or cancelled. */
773                 if (lock->l_completion_ast) {
774                         rc = lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC,
775                                                     NULL);
776                         if (rc)
777                                 GOTO(out, rc);
778                 }
779         } else {
780                 rc = EDEADLOCK;
781         }
782         EXIT;
783  out:
784         LDLM_LOCK_PUT(lock);
785         ptlrpc_req_finished(req);
786         return rc;
787 }
788
789 /* Cancel locks locally.
790  * Returns:
791  * LDLM_FL_LOCAL_ONLY if tere is no need in a CANCEL rpc to the server;
792  * LDLM_FL_CANCELING otherwise;
793  * LDLM_FL_BL_AST if there is a need in a separate CANCEL rpc. */
794 static int ldlm_cli_cancel_local(struct ldlm_lock *lock)
795 {
796         int rc = LDLM_FL_LOCAL_ONLY;
797         ENTRY;
798         
799         if (lock->l_conn_export) {
800                 int local_only;
801
802                 LDLM_DEBUG(lock, "client-side cancel");
803                 /* Set this flag to prevent others from getting new references*/
804                 lock_res_and_lock(lock);
805                 lock->l_flags |= LDLM_FL_CBPENDING;
806                 local_only = (lock->l_flags &
807                               (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
808                 ldlm_cancel_callback(lock);
809                 rc = (lock->l_flags & LDLM_FL_BL_AST) ?
810                         LDLM_FL_BL_AST : LDLM_FL_CANCELING;
811                 unlock_res_and_lock(lock);
812
813                 if (local_only) {
814                         CDEBUG(D_DLMTRACE, "not sending request (at caller's "
815                                "instruction)\n");
816                         rc = LDLM_FL_LOCAL_ONLY;
817                 }
818                 ldlm_lock_cancel(lock);
819         } else {
820                 if (ns_is_client(lock->l_resource->lr_namespace)) {
821                         LDLM_ERROR(lock, "Trying to cancel local lock");
822                         LBUG();
823                 }
824                 LDLM_DEBUG(lock, "server-side local cancel");
825                 ldlm_lock_cancel(lock);
826                 ldlm_reprocess_all(lock->l_resource);
827                 LDLM_DEBUG(lock, "server-side local cancel handler END");
828         }
829
830         RETURN(rc);
831 }
832
833 /* Pack @count locks in @head into ldlm_request buffer at the offset @off,
834    of the request @req. */
835 static void ldlm_cancel_pack(struct ptlrpc_request *req, int off,
836                              struct list_head *head, int count)
837 {
838         struct ldlm_request *dlm;
839         struct ldlm_lock *lock;
840         int max, packed = 0;
841         ENTRY;
842
843         dlm = lustre_msg_buf(req->rq_reqmsg, off, sizeof(*dlm));
844         LASSERT(dlm != NULL);
845
846         /* Check the room in the request buffer. */
847         max = lustre_msg_buflen(req->rq_reqmsg, off) - 
848                 sizeof(struct ldlm_request);
849         max /= sizeof(struct lustre_handle);
850         max += LDLM_LOCKREQ_HANDLES;
851         LASSERT(max >= dlm->lock_count + count);
852
853         /* XXX: it would be better to pack lock handles grouped by resource.
854          * so that the server cancel would call filter_lvbo_update() less
855          * frequently. */
856         list_for_each_entry(lock, head, l_bl_ast) {
857                 if (!count--)
858                         break;
859                 LASSERT(lock->l_conn_export);
860                 /* Pack the lock handle to the given request buffer. */
861                 LDLM_DEBUG(lock, "packing");
862                 dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle;
863                 packed++;
864         }
865         CDEBUG(D_DLMTRACE, "%d locks packed\n", packed);
866         EXIT;
867 }
868
869 /* Prepare and send a batched cancel rpc, it will include count lock handles
870  * of locks given in @head. */
871 int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
872                         int count, int flags)
873 {
874         struct ptlrpc_request *req = NULL;
875         struct ldlm_request *body;
876         int size[2] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
877                 [DLM_LOCKREQ_OFF]     = sizeof(*body) };
878         struct obd_import *imp;
879         int free, sent = 0;
880         int rc = 0;
881         ENTRY;
882
883         LASSERT(exp != NULL);
884         LASSERT(count > 0);
885
886         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
887                 RETURN(count);
888
889         free = ldlm_req_handles_avail(exp, size, 2, 0);
890         if (count > free)
891                 count = free;
892
893         size[DLM_LOCKREQ_OFF] = ldlm_request_bufsize(count, LDLM_CANCEL);
894         while (1) {
895                 imp = class_exp2cliimp(exp);
896                 if (imp == NULL || imp->imp_invalid) {
897                         CDEBUG(D_DLMTRACE,
898                                "skipping cancel on invalid import %p\n", imp);
899                         RETURN(count);
900                 }
901
902                 req = ptlrpc_prep_req(imp, LUSTRE_DLM_VERSION, LDLM_CANCEL, 2,
903                                       size, NULL);
904                 if (!req)
905                         GOTO(out, rc = -ENOMEM);
906
907                 req->rq_no_resend = 1;
908                 req->rq_no_delay = 1;
909
910                 /* XXX FIXME bug 249 */
911                 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
912                 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
913
914                 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF,
915                                       sizeof(*body));
916                 ldlm_cancel_pack(req, DLM_LOCKREQ_OFF, cancels, count);
917
918                 ptlrpc_req_set_repsize(req, 1, NULL);
919                 if (flags & LDLM_FL_ASYNC) {
920                         ptlrpcd_add_req(req);
921                         sent = count;
922                         GOTO(out, 0);
923                 } else {
924                         rc = ptlrpc_queue_wait(req);
925                 }
926                 if (rc == ESTALE) {
927                         CDEBUG(D_DLMTRACE, "client/server (nid %s) "
928                                "out of sync -- not fatal\n",
929                                libcfs_nid2str(req->rq_import->
930                                               imp_connection->c_peer.nid));
931                         rc = 0;
932                 } else if (rc == -ETIMEDOUT && /* check there was no reconnect*/
933                            req->rq_import_generation == imp->imp_generation) {
934                         ptlrpc_req_finished(req);
935                         continue;
936                 } else if (rc != ELDLM_OK) {
937                         CERROR("Got rc %d from cancel RPC: canceling "
938                                "anyway\n", rc);
939                         break;
940                 }
941                 sent = count;
942                 break;
943         }
944
945         ptlrpc_req_finished(req);
946         EXIT;
947 out:
948         return sent ? sent : rc;
949 }
950
951 static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
952 {
953         LASSERT(imp != NULL);
954         return &imp->imp_obd->obd_namespace->ns_pool;
955 }
956
957 int ldlm_cli_update_pool(struct ptlrpc_request *req)
958 {
959         struct ldlm_pool *pl;
960         ENTRY;
961     
962         if (!imp_connect_lru_resize(req->rq_import))
963                 RETURN(0);
964
965         if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
966             lustre_msg_get_limit(req->rq_repmsg) == 0)
967                 RETURN(0);
968
969         pl = ldlm_imp2pl(req->rq_import);
970         
971         spin_lock(&pl->pl_lock);
972
973         /* Check if we need to wakeup pools thread for fast SLV change. 
974          * This is only done when threads period is noticably long like 
975          * 10s or more. */
976 #if defined(__KERNEL__) && (LDLM_POOLS_THREAD_PERIOD >= 10)
977         {
978                 __u64 old_slv, new_slv, fast_change;
979
980                 old_slv = ldlm_pool_get_slv(pl);
981                 new_slv = lustre_msg_get_slv(req->rq_repmsg);
982                 fast_change = old_slv * LDLM_POOLS_FAST_SLV_CHANGE;
983                 do_div(fast_change, 100);
984
985                 /* Wake up pools thread only if SLV has changed more than 
986                  * 50% since last update. In this case we want to react asap. 
987                  * Otherwise it is no sense to wake up pools as they are 
988                  * re-calculated every LDLM_POOLS_THREAD_PERIOD anyways. */
989                 if (old_slv > new_slv && old_slv - new_slv > fast_change)
990                         ldlm_pools_wakeup();
991         }
992 #endif
993         /* In some cases RPC may contain slv and limit zeroed out. This is 
994          * the case when server does not support lru resize feature. This is
995          * also possible in some recovery cases when server side reqs have no
996          * ref to obd export and thus access to server side namespace is no 
997          * possible. */
998         if (lustre_msg_get_slv(req->rq_repmsg) != 0 && 
999             lustre_msg_get_limit(req->rq_repmsg) != 0) {
1000                 ldlm_pool_set_slv(pl, lustre_msg_get_slv(req->rq_repmsg));
1001                 ldlm_pool_set_limit(pl, lustre_msg_get_limit(req->rq_repmsg));
1002         } else {
1003                 DEBUG_REQ(D_HA, req, "zero SLV or Limit found "
1004                           "(SLV: "LPU64", Limit: %u)", 
1005                           lustre_msg_get_slv(req->rq_repmsg), 
1006                           lustre_msg_get_limit(req->rq_repmsg));
1007         }
1008         spin_unlock(&pl->pl_lock);
1009
1010         RETURN(0);
1011 }
1012 EXPORT_SYMBOL(ldlm_cli_update_pool);
1013
1014 int ldlm_cli_cancel(struct lustre_handle *lockh)
1015 {
1016         int avail, flags, count = 1, rc = 0;
1017         struct ldlm_namespace *ns;
1018         struct ldlm_lock *lock;
1019         CFS_LIST_HEAD(cancels);
1020         ENTRY;
1021
1022         /* concurrent cancels on the same handle can happen */
1023         lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);
1024         if (lock == NULL) {
1025                 LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
1026                 RETURN(0);
1027         }
1028
1029         rc = ldlm_cli_cancel_local(lock);
1030         if (rc < 0 || rc == LDLM_FL_LOCAL_ONLY) {
1031                 LDLM_LOCK_PUT(lock);
1032                 RETURN(rc < 0 ? rc : 0);
1033         }
1034         /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
1035          * rpc which goes to canceld portal, so we can cancel other lru locks
1036          * here and send them all as one LDLM_CANCEL rpc. */
1037         LASSERT(list_empty(&lock->l_bl_ast));
1038         list_add(&lock->l_bl_ast, &cancels);
1039         avail = ldlm_cancel_handles_avail(lock->l_conn_export);
1040         LASSERT(avail > 0);
1041
1042         ns = lock->l_resource->lr_namespace;
1043         flags = ns_connect_lru_resize(ns) ? LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
1044         count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - count,
1045                                        LDLM_FL_BL_AST, flags);
1046         ldlm_cli_cancel_list(&cancels, count, NULL, 0, 0);
1047         RETURN(0);
1048 }
1049
1050 /* XXX until we will have compound requests and can cut cancels from generic rpc
1051  * we need send cancels with LDLM_FL_BL_AST flag as separate rpc */
1052 static int ldlm_cancel_list(struct list_head *cancels, int count, int flags)
1053 {
1054         CFS_LIST_HEAD(head);
1055         struct ldlm_lock *lock, *next;
1056         int left = 0, bl_ast = 0, rc;
1057
1058         left = count;
1059         list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
1060                 if (left-- == 0)
1061                         break;
1062
1063                 if (flags & LDLM_FL_LOCAL_ONLY) {
1064                         rc = LDLM_FL_LOCAL_ONLY;
1065                         ldlm_lock_cancel(lock);
1066                 } else {
1067                         rc = ldlm_cli_cancel_local(lock);
1068                 }
1069                 if (!(flags & LDLM_FL_BL_AST) && (rc == LDLM_FL_BL_AST)) {
1070                         LDLM_DEBUG(lock, "Cancel lock separately");
1071                         list_del_init(&lock->l_bl_ast);
1072                         list_add(&lock->l_bl_ast, &head);
1073                         bl_ast ++;
1074                         continue;
1075                 }
1076                 if (rc == LDLM_FL_LOCAL_ONLY) {
1077                         /* CANCEL RPC should not be sent to server. */
1078                         list_del_init(&lock->l_bl_ast);
1079                         LDLM_LOCK_PUT(lock);
1080                         count--;
1081                 }
1082
1083         }
1084         if (bl_ast > 0) {
1085                 count -= bl_ast;
1086                 ldlm_cli_cancel_list(&head, bl_ast, NULL, 0, 0);
1087         }
1088
1089         RETURN(count);
1090 }
1091
1092 /* Return 1 if @lock should be canceled according to shrinker policy. 
1093  * Return zero otherwise. */
1094 static int ldlm_cancel_shrink_policy(struct ldlm_namespace *ns,
1095                                      struct ldlm_lock *lock,
1096                                      int unused, int added, 
1097                                      int asked)
1098 {
1099         int lock_cost;
1100         __u64 page_nr;
1101
1102         if (lock->l_resource->lr_type == LDLM_EXTENT) {
1103                 struct ldlm_extent *l_extent;
1104
1105                 /* For all extent locks cost is 1 + number of pages in
1106                  * their extent. */
1107                 l_extent = &lock->l_policy_data.l_extent;
1108                 page_nr = (l_extent->end - l_extent->start);
1109                 do_div(page_nr, CFS_PAGE_SIZE);
1110
1111 #ifdef __KERNEL__
1112                 /* XXX: In fact this is evil hack, we can't access inode
1113                  * here. For doing it right we need somehow to have number
1114                  * of covered by lock. This should be fixed later when 10718 
1115                  * is landed. */
1116                 if (lock->l_ast_data != NULL) {
1117                         struct inode *inode = lock->l_ast_data;
1118                         if (page_nr > inode->i_mapping->nrpages)
1119                                 page_nr = inode->i_mapping->nrpages;
1120                 }
1121 #endif
1122                 lock_cost = 1 + page_nr;
1123         } else {
1124                 /* For all locks which are not extent ones cost is 1 */
1125                 lock_cost = 1;
1126         }
1127
1128         /* Keep all expensive locks in lru for the memory pressure time
1129          * cancel policy. They anyways may be canceled by lru resize
1130          * pplicy if they have not small enough CLV. */
1131         return (lock_cost <= ns->ns_shrink_thumb);
1132 }
1133
1134 /* Return 1 if @lock should be canceled according to lru resize policy. 
1135  * Return zero otherwise. */
1136 static int ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1137                                    struct ldlm_lock *lock, 
1138                                    int unused, int added, 
1139                                    int asked)
1140 {
1141         cfs_time_t cur = cfs_time_current();
1142         struct ldlm_pool *pl = &ns->ns_pool;
1143         __u64 slv, lvf, lv;
1144         cfs_time_t la;
1145
1146         spin_lock(&pl->pl_lock);
1147         slv = ldlm_pool_get_slv(pl);
1148         lvf = atomic_read(&pl->pl_lock_volume_factor);
1149         spin_unlock(&pl->pl_lock);
1150
1151         la = cfs_duration_sec(cfs_time_sub(cur, 
1152                               lock->l_last_used));
1153
1154         /* Stop when slv is not yet come from server or 
1155          * lv is smaller than it is. */
1156         lv = lvf * la * unused;
1157         return (slv > 1 && lv >= slv);
1158 }
1159
1160 /* Return 1 if @lock should be canceled according to passed policy. 
1161  * Return zero otherwise. */
1162 static int ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
1163                                      struct ldlm_lock *lock, 
1164                                      int unused, int added,
1165                                      int asked)
1166 {
1167         /* Do nothing here, we allow canceling all locks which
1168          * are passed here from upper layer logic. So that locks
1169          * number to be canceled will be limited by @count and
1170          * @max in ldlm_cancel_lru_local(). */
1171         return 1;
1172 }
1173
1174 /* Return 1 if @lock should be canceled according to aged policy. 
1175  * Return zero otherwise. */
1176 static int ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
1177                                    struct ldlm_lock *lock, 
1178                                    int unused, int added,
1179                                    int asked)
1180 {
1181         /* Cancel old locks if reached asked limit. */
1182         return !((added >= asked) && 
1183                  cfs_time_before_64(cfs_time_current(),
1184                                     cfs_time_add(lock->l_last_used,
1185                                                  ns->ns_max_age)));
1186 }
1187
1188 typedef int (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *, 
1189                                         struct ldlm_lock *, int, 
1190                                         int, int);
1191
1192 static ldlm_cancel_lru_policy_t
1193 ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
1194 {
1195         if (ns_connect_lru_resize(ns)) {
1196                 if (flags & LDLM_CANCEL_SHRINK)
1197                         return ldlm_cancel_shrink_policy;
1198                 else if (flags & LDLM_CANCEL_LRUR)
1199                         return ldlm_cancel_lrur_policy;
1200                 else if (flags & LDLM_CANCEL_PASSED)
1201                         return ldlm_cancel_passed_policy;
1202         } else {
1203                 if (flags & LDLM_CANCEL_AGED)
1204                         return ldlm_cancel_aged_policy;
1205         }
1206         return NULL;
1207 }
1208  
1209 /* - Free space in lru for @count new locks,
1210  *   redundant unused locks are canceled locally;
1211  * - also cancel locally unused aged locks;
1212  * - do not cancel more than @max locks;
1213  * - GET the found locks and add them into the @cancels list.
1214  *
1215  * A client lock can be added to the l_bl_ast list only when it is
1216  * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing CANCEL.
1217  * There are the following use cases: ldlm_cancel_resource_local(),
1218  * ldlm_cancel_lru_local() and ldlm_cli_cancel(), which check&set this
1219  * flag properly. As any attempt to cancel a lock rely on this flag,
1220  * l_bl_ast list is accessed later without any special locking.
1221  *
1222  * Calling policies for enabled lru resize:
1223  * ----------------------------------------
1224  * flags & LDLM_CANCEL_LRUR - use lru resize policy (SLV from server) to
1225  *                            cancel not more than @count locks;
1226  *
1227  * flags & LDLM_CANCEL_PASSED - cancel @count number of old locks (located at
1228  *                              the beginning of lru list);
1229  *
1230  * flags & LDLM_CANCEL_SHRINK - cancel not more than @count locks according to
1231  *                              memory pressre policy function.
1232  */
1233 int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
1234                           int count, int max, int cancel_flags, int flags)
1235 {
1236         ldlm_cancel_lru_policy_t cancel_lru_policy_func;
1237         int added = 0, unused, cancel;
1238         struct ldlm_lock *lock, *next;
1239         ENTRY;
1240
1241         spin_lock(&ns->ns_unused_lock);
1242         unused = ns->ns_nr_unused;
1243
1244         if (!ns_connect_lru_resize(ns))
1245                 count += unused - ns->ns_max_unused;
1246
1247         cancel_lru_policy_func = ldlm_cancel_lru_policy(ns, flags);
1248      
1249         list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru) {
1250                 /* Make sure that we skip locks being already in cancel. */
1251                 if ((lock->l_flags & LDLM_FL_CANCELING) ||
1252                     (lock->l_flags & LDLM_FL_BL_AST))
1253                         continue;
1254
1255                 /* For any flags, stop scanning if @max or passed @count is
1256                  * reached. */
1257                 if ((max && added >= max) || (count && added >= count))
1258                         break;
1259
1260                 /* Pass the lock through the policy filter and see if it
1261                  * should stay in lru. */
1262                 if (cancel_lru_policy_func != NULL) {
1263                         cancel = cancel_lru_policy_func(ns, lock, unused, 
1264                                                         added, count);
1265                      
1266                         /* Take next lock for shrink policy, we need to check
1267                          * whole list. Stop scanning for other policies. */
1268                         if ((flags & LDLM_CANCEL_SHRINK) && !cancel)
1269                                 continue;
1270                         else if (!cancel)
1271                                 break;
1272                 }
1273
1274                 if (cancels != NULL) {
1275                         LDLM_LOCK_GET(lock); /* dropped by bl thread */
1276                         spin_unlock(&ns->ns_unused_lock);
1277
1278                         lock_res_and_lock(lock);
1279                         /* Check flags again under the lock. */
1280                         if ((lock->l_flags & LDLM_FL_CANCELING) ||
1281                             (lock->l_flags & LDLM_FL_BL_AST) ||
1282                             (ldlm_lock_remove_from_lru(lock) == 0)) {
1283                                 /* other thread is removing lock from lru or
1284                                  * somebody is already doing CANCEL or
1285                                  * there is a blocking request which will send
1286                                  * cancel by itseft. */
1287                                 unlock_res_and_lock(lock);
1288                                 LDLM_LOCK_PUT(lock);
1289                                 spin_lock(&ns->ns_unused_lock);
1290                                 continue;
1291                         }
1292                         LASSERT(!lock->l_readers && !lock->l_writers);
1293
1294                         /* If we have chosen to cancel this lock voluntarily, we
1295                          * better send cancel notification to server, so that it
1296                          * frees appropriate state. This might lead to a race 
1297                          * where while we are doing cancel here, server is also 
1298                          * silently cancelling this lock. */
1299                         lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
1300
1301                         /* Setting the CBPENDING flag is a little misleading,
1302                          * but prevents an important race; namely, once
1303                          * CBPENDING is set, the lock can accumulate no more
1304                          * readers/writers. Since readers and writers are
1305                          * already zero here, ldlm_lock_decref() won't see
1306                          * this flag and call l_blocking_ast */
1307                         lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
1308
1309                         /* We can't re-add to l_lru as it confuses the
1310                          * refcounting in ldlm_lock_remove_from_lru() if an AST
1311                          * arrives after we drop ns_lock below. We use l_bl_ast
1312                          * and can't use l_pending_chain as it is used both on
1313                          * server and client nevertheless bug 5666 says it is
1314                          * used only on server */
1315                         LASSERT(list_empty(&lock->l_bl_ast));
1316                         list_add(&lock->l_bl_ast, cancels);
1317                         unlock_res_and_lock(lock);
1318                         spin_lock(&ns->ns_unused_lock);
1319                 }
1320                 added++;
1321                 unused--;
1322         }
1323         spin_unlock(&ns->ns_unused_lock);
1324   
1325         if (cancels == NULL)
1326                 RETURN(added);
1327
1328         RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
1329 }
1330
1331 /* when called with LDLM_ASYNC the blocking callback will be handled
1332  * in a thread and this function will return after the thread has been
1333  * asked to call the callback.  when called with LDLM_SYNC the blocking
1334  * callback will be performed in this function. */
1335 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync, 
1336                     int flags)
1337 {
1338         CFS_LIST_HEAD(cancels);
1339         int count, rc;
1340         ENTRY;
1341
1342 #ifndef __KERNEL__
1343         sync = LDLM_SYNC; /* force to be sync in user space */
1344 #endif
1345         count = ldlm_cancel_lru_local(ns, &cancels, nr, 0, 0, flags);
1346         if (sync == LDLM_ASYNC) {
1347                 rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count);
1348                 if (rc == 0)
1349                         RETURN(count);
1350         }
1351
1352         /* If an error occured in ASYNC mode, or
1353          * this is SYNC mode, cancel the list. */
1354         ldlm_cli_cancel_list(&cancels, count, NULL, 0, 0);
1355         RETURN(count);
1356 }
1357
1358 /* Find and cancel locally unused locks found on resource, matched to the
1359  * given policy, mode. GET the found locks and add them into the @cancels
1360  * list. */
1361 int ldlm_cancel_resource_local(struct ldlm_resource *res,
1362                                struct list_head *cancels,
1363                                ldlm_policy_data_t *policy,
1364                                ldlm_mode_t mode, int lock_flags,
1365                                int cancel_flags, void *opaque)
1366 {
1367         struct ldlm_lock *lock;
1368         int count = 0;
1369         ENTRY;
1370
1371         lock_res(res);
1372         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
1373                 if (opaque != NULL && lock->l_ast_data != opaque) {
1374                         LDLM_ERROR(lock, "data %p doesn't match opaque %p",
1375                                    lock->l_ast_data, opaque);
1376                         //LBUG();
1377                         continue;
1378                 }
1379
1380                 if (lock->l_readers || lock->l_writers) {
1381                         if (cancel_flags & LDLM_FL_WARN) {
1382                                 LDLM_ERROR(lock, "lock in use");
1383                                 //LBUG();
1384                         }
1385                         continue;
1386                 }
1387
1388                 /* If somebody is already doing CANCEL, or blocking ast came,
1389                  * skip this lock. */
1390                 if (lock->l_flags & LDLM_FL_BL_AST || 
1391                     lock->l_flags & LDLM_FL_CANCELING)
1392                         continue;
1393
1394                 if (lockmode_compat(lock->l_granted_mode, mode))
1395                         continue;
1396
1397                 /* If policy is given and this is IBITS lock, add to list only
1398                  * those locks that match by policy. */
1399                 if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
1400                     !(lock->l_policy_data.l_inodebits.bits &
1401                       policy->l_inodebits.bits))
1402                         continue;
1403
1404                 /* See CBPENDING comment in ldlm_cancel_lru */
1405                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
1406                                  lock_flags;
1407
1408                 LASSERT(list_empty(&lock->l_bl_ast));
1409                 list_add(&lock->l_bl_ast, cancels);
1410                 LDLM_LOCK_GET(lock);
1411                 count++;
1412         }
1413         unlock_res(res);
1414
1415         RETURN(ldlm_cancel_list(cancels, count, cancel_flags));
1416 }
1417
1418 /* If @req is NULL, send CANCEL request to server with handles of locks 
1419  * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests 
1420  * separately per lock.
1421  * If @req is not NULL, put handles of locks in @cancels into the request 
1422  * buffer at the offset @off.
1423  * Destroy @cancels at the end. */
1424 int ldlm_cli_cancel_list(struct list_head *cancels, int count,
1425                          struct ptlrpc_request *req, int off, int flags)
1426 {
1427         struct ldlm_lock *lock;
1428         int res = 0;
1429         ENTRY;
1430
1431         if (list_empty(cancels) || count == 0)
1432                 RETURN(0);
1433         
1434         /* XXX: requests (both batched and not) could be sent in parallel. 
1435          * Usually it is enough to have just 1 RPC, but it is possible that
1436          * there are to many locks to be cancelled in LRU or on a resource.
1437          * It would also speed up the case when the server does not support
1438          * the feature. */
1439         while (count > 0) {
1440                 LASSERT(!list_empty(cancels));
1441                 lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
1442                 LASSERT(lock->l_conn_export);
1443
1444                 if (exp_connect_cancelset(lock->l_conn_export)) {
1445                         res = count;
1446                         if (req)
1447                                 ldlm_cancel_pack(req, off, cancels, count);
1448                         else
1449                                 res = ldlm_cli_cancel_req(lock->l_conn_export,
1450                                                           cancels, count,
1451                                                           flags);
1452                 } else {
1453                         res = ldlm_cli_cancel_req(lock->l_conn_export,
1454                                                   cancels, 1, flags);
1455                 }
1456
1457                 if (res < 0) {
1458                         CERROR("ldlm_cli_cancel_list: %d\n", res);
1459                         res = count;
1460                 }
1461
1462                 count -= res;
1463                 ldlm_lock_list_put(cancels, l_bl_ast, res);
1464         }
1465         LASSERT(list_empty(cancels));
1466         LASSERT(count == 0);
1467         RETURN(0);
1468 }
1469
1470 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1471                                     const struct ldlm_res_id *res_id,
1472                                     ldlm_policy_data_t *policy,
1473                                     ldlm_mode_t mode, int flags, void *opaque)
1474 {
1475         struct ldlm_resource *res;
1476         CFS_LIST_HEAD(cancels);
1477         int count;
1478         int rc;
1479         ENTRY;
1480
1481         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
1482         if (res == NULL) {
1483                 /* This is not a problem. */
1484                 CDEBUG(D_INFO, "No resource "LPU64"\n", res_id->name[0]);
1485                 RETURN(0);
1486         }
1487
1488         count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
1489                                            0, flags, opaque);
1490         rc = ldlm_cli_cancel_list(&cancels, count, NULL, 0, flags);
1491         if (rc != ELDLM_OK)
1492                 CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc);
1493
1494         ldlm_resource_putref(res);
1495         RETURN(0);
1496 }
1497
1498 static inline int have_no_nsresource(struct ldlm_namespace *ns)
1499 {
1500         int no_resource = 0;
1501
1502         spin_lock(&ns->ns_hash_lock);
1503         if (ns->ns_resources == 0)
1504                 no_resource = 1;
1505         spin_unlock(&ns->ns_hash_lock);
1506
1507         RETURN(no_resource);
1508 }
1509
1510 /* Cancel all locks on a namespace (or a specific resource, if given)
1511  * that have 0 readers/writers.
1512  *
1513  * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
1514  * to notify the server. */
1515 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
1516                            const struct ldlm_res_id *res_id,
1517                            int flags, void *opaque)
1518 {
1519         int i;
1520         ENTRY;
1521
1522         if (ns == NULL)
1523                 RETURN(ELDLM_OK);
1524
1525         if (res_id)
1526                 RETURN(ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
1527                                                        LCK_MINMODE, flags,
1528                                                        opaque));
1529
1530         spin_lock(&ns->ns_hash_lock);
1531         for (i = 0; i < RES_HASH_SIZE; i++) {
1532                 struct list_head *tmp;
1533                 tmp = ns->ns_hash[i].next;
1534                 while (tmp != &(ns->ns_hash[i])) {
1535                         struct ldlm_resource *res;
1536                         int rc;
1537
1538                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
1539                         ldlm_resource_getref(res);
1540                         spin_unlock(&ns->ns_hash_lock);
1541
1542                         rc = ldlm_cli_cancel_unused_resource(ns, &res->lr_name,
1543                                                              NULL, LCK_MINMODE,
1544                                                              flags, opaque);
1545
1546                         if (rc)
1547                                 CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n",
1548                                        res->lr_name.name[0], rc);
1549
1550                         spin_lock(&ns->ns_hash_lock);
1551                         tmp = tmp->next;
1552                         ldlm_resource_putref_locked(res);
1553                 }
1554         }
1555         spin_unlock(&ns->ns_hash_lock);
1556
1557         RETURN(ELDLM_OK);
1558 }
1559
1560 /* join/split resource locks to/from lru list */
1561 int ldlm_cli_join_lru(struct ldlm_namespace *ns,
1562                       const struct ldlm_res_id *res_id, int join)
1563 {
1564         struct ldlm_resource *res;
1565         struct ldlm_lock *lock, *n;
1566         int count = 0;
1567         ENTRY;
1568
1569         LASSERT(ns_is_client(ns));
1570
1571         res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
1572         if (res == NULL)
1573                 RETURN(count);
1574         LASSERT(res->lr_type == LDLM_EXTENT);
1575
1576         lock_res(res);
1577         if (!join)
1578                 goto split;
1579
1580         list_for_each_entry_safe (lock, n, &res->lr_granted, l_res_link) {
1581                 if (list_empty(&lock->l_lru) &&
1582                     !lock->l_readers && !lock->l_writers &&
1583                     !(lock->l_flags & LDLM_FL_LOCAL) &&
1584                     !(lock->l_flags & LDLM_FL_CBPENDING)) {
1585                         ldlm_lock_add_to_lru(lock);
1586                         lock->l_flags &= ~LDLM_FL_NO_LRU;
1587                         LDLM_DEBUG(lock, "join lock to lru");
1588                         count++;
1589                 }
1590         }
1591         goto unlock;
1592 split:
1593         spin_lock(&ns->ns_unused_lock);
1594         list_for_each_entry_safe (lock, n, &ns->ns_unused_list, l_lru) {
1595                 if (lock->l_resource == res) {
1596                         ldlm_lock_remove_from_lru_nolock(lock);
1597                         lock->l_flags |= LDLM_FL_NO_LRU;
1598                         LDLM_DEBUG(lock, "split lock from lru");
1599                         count++;
1600                 }
1601         }
1602         spin_unlock(&ns->ns_unused_lock);
1603 unlock:
1604         unlock_res(res);
1605         ldlm_resource_putref(res);
1606         RETURN(count);
1607 }
1608
1609 /* Lock iterators. */
1610
1611 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
1612                           void *closure)
1613 {
1614         struct list_head *tmp, *next;
1615         struct ldlm_lock *lock;
1616         int rc = LDLM_ITER_CONTINUE;
1617
1618         ENTRY;
1619
1620         if (!res)
1621                 RETURN(LDLM_ITER_CONTINUE);
1622
1623         lock_res(res);
1624         list_for_each_safe(tmp, next, &res->lr_granted) {
1625                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1626
1627                 if (iter(lock, closure) == LDLM_ITER_STOP)
1628                         GOTO(out, rc = LDLM_ITER_STOP);
1629         }
1630
1631         list_for_each_safe(tmp, next, &res->lr_converting) {
1632                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1633
1634                 if (iter(lock, closure) == LDLM_ITER_STOP)
1635                         GOTO(out, rc = LDLM_ITER_STOP);
1636         }
1637
1638         list_for_each_safe(tmp, next, &res->lr_waiting) {
1639                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1640
1641                 if (iter(lock, closure) == LDLM_ITER_STOP)
1642                         GOTO(out, rc = LDLM_ITER_STOP);
1643         }
1644  out:
1645         unlock_res(res);
1646         RETURN(rc);
1647 }
1648
1649 struct iter_helper_data {
1650         ldlm_iterator_t iter;
1651         void *closure;
1652 };
1653
1654 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
1655 {
1656         struct iter_helper_data *helper = closure;
1657         return helper->iter(lock, helper->closure);
1658 }
1659
1660 static int ldlm_res_iter_helper(struct ldlm_resource *res, void *closure)
1661 {
1662         return ldlm_resource_foreach(res, ldlm_iter_helper, closure);
1663 }
1664
1665 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
1666                            void *closure)
1667 {
1668         struct iter_helper_data helper = { iter: iter, closure: closure };
1669         return ldlm_namespace_foreach_res(ns, ldlm_res_iter_helper, &helper);
1670 }
1671
1672 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
1673                                ldlm_res_iterator_t iter, void *closure)
1674 {
1675         int i, rc = LDLM_ITER_CONTINUE;
1676         struct ldlm_resource *res;
1677         struct list_head *tmp;
1678
1679         ENTRY;
1680         spin_lock(&ns->ns_hash_lock);
1681         for (i = 0; i < RES_HASH_SIZE; i++) {
1682                 tmp = ns->ns_hash[i].next;
1683                 while (tmp != &(ns->ns_hash[i])) {
1684                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
1685                         ldlm_resource_getref(res);
1686                         spin_unlock(&ns->ns_hash_lock);
1687
1688                         rc = iter(res, closure);
1689
1690                         spin_lock(&ns->ns_hash_lock);
1691                         tmp = tmp->next;
1692                         ldlm_resource_putref_locked(res);
1693                         if (rc == LDLM_ITER_STOP)
1694                                 GOTO(out, rc);
1695                 }
1696         }
1697  out:
1698         spin_unlock(&ns->ns_hash_lock);
1699         RETURN(rc);
1700 }
1701
1702 /* non-blocking function to manipulate a lock whose cb_data is being put away.*/
1703 void ldlm_resource_iterate(struct ldlm_namespace *ns,
1704                            const struct ldlm_res_id *res_id,
1705                            ldlm_iterator_t iter, void *data)
1706 {
1707         struct ldlm_resource *res;
1708         ENTRY;
1709
1710         if (ns == NULL) {
1711                 CERROR("must pass in namespace\n");
1712                 LBUG();
1713         }
1714
1715         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
1716         if (res == NULL) {
1717                 EXIT;
1718                 return;
1719         }
1720
1721         ldlm_resource_foreach(res, iter, data);
1722         ldlm_resource_putref(res);
1723         EXIT;
1724 }
1725
1726 /* Lock replay */
1727
1728 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
1729 {
1730         struct list_head *list = closure;
1731
1732         /* we use l_pending_chain here, because it's unused on clients. */
1733         LASSERTF(list_empty(&lock->l_pending_chain),"lock %p next %p prev %p\n",
1734                  lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
1735         /* bug 9573: don't replay locks left after eviction */
1736         if (!(lock->l_flags & LDLM_FL_FAILED))
1737                 list_add(&lock->l_pending_chain, list);
1738         return LDLM_ITER_CONTINUE;
1739 }
1740
1741 static int replay_lock_interpret(struct ptlrpc_request *req,
1742                                  struct ldlm_async_args *aa, int rc)
1743 {
1744         struct ldlm_lock *lock;
1745         struct ldlm_reply *reply;
1746
1747         ENTRY;
1748         atomic_dec(&req->rq_import->imp_replay_inflight);
1749         if (rc != ELDLM_OK)
1750                 GOTO(out, rc);
1751
1752
1753         reply = lustre_swab_repbuf(req, DLM_LOCKREPLY_OFF, sizeof(*reply),
1754                                    lustre_swab_ldlm_reply);
1755         if (reply == NULL) {
1756                 CERROR("Can't unpack ldlm_reply\n");
1757                 GOTO (out, rc = -EPROTO);
1758         }
1759
1760         lock = ldlm_handle2lock(&aa->lock_handle);
1761         if (!lock) {
1762                 CERROR("received replay ack for unknown local cookie "LPX64
1763                        " remote cookie "LPX64 " from server %s id %s\n",
1764                        aa->lock_handle.cookie, reply->lock_handle.cookie,
1765                        req->rq_export->exp_client_uuid.uuid,
1766                        libcfs_id2str(req->rq_peer));
1767                 GOTO(out, rc = -ESTALE);
1768         }
1769
1770         lock->l_remote_handle = reply->lock_handle;
1771         LDLM_DEBUG(lock, "replayed lock:");
1772         ptlrpc_import_recovery_state_machine(req->rq_import);
1773         LDLM_LOCK_PUT(lock);
1774 out:
1775         if (rc != ELDLM_OK)
1776                 ptlrpc_connect_import(req->rq_import, NULL);
1777
1778
1779         RETURN(rc);
1780 }
1781
1782 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
1783 {
1784         struct ptlrpc_request *req;
1785         struct ldlm_request *body;
1786         struct ldlm_reply *reply;
1787         struct ldlm_async_args *aa;
1788         int buffers = 2;
1789         int size[3] = { sizeof(struct ptlrpc_body) };
1790         int flags;
1791         ENTRY;
1792
1793
1794         /* Bug 11974: Do not replay a lock which is actively being canceled */
1795         if (lock->l_flags & LDLM_FL_CANCELING) {
1796                 LDLM_DEBUG(lock, "Not replaying canceled lock:");
1797                 RETURN(0);
1798         }
1799
1800         /* If this is reply-less callback lock, we cannot replay it, since
1801          * server might have long dropped it, but notification of that event was
1802          * lost by network. (and server granted conflicting lock already) */
1803         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
1804                 LDLM_DEBUG(lock, "Not replaying reply-less lock:");
1805                 ldlm_lock_cancel(lock);
1806                 RETURN(0);
1807         }
1808         /*
1809          * If granted mode matches the requested mode, this lock is granted.
1810          *
1811          * If they differ, but we have a granted mode, then we were granted
1812          * one mode and now want another: ergo, converting.
1813          *
1814          * If we haven't been granted anything and are on a resource list,
1815          * then we're blocked/waiting.
1816          *
1817          * If we haven't been granted anything and we're NOT on a resource list,
1818          * then we haven't got a reply yet and don't have a known disposition.
1819          * This happens whenever a lock enqueue is the request that triggers
1820          * recovery.
1821          */
1822         if (lock->l_granted_mode == lock->l_req_mode)
1823                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
1824         else if (lock->l_granted_mode)
1825                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
1826         else if (!list_empty(&lock->l_res_link))
1827                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
1828         else
1829                 flags = LDLM_FL_REPLAY;
1830
1831         size[DLM_LOCKREQ_OFF] = sizeof(*body);
1832         req = ptlrpc_prep_req(imp, LUSTRE_DLM_VERSION, LDLM_ENQUEUE, 2, size,
1833                               NULL);
1834         if (!req)
1835                 RETURN(-ENOMEM);
1836
1837         /* We're part of recovery, so don't wait for it. */
1838         req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
1839
1840         body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
1841         ldlm_lock2desc(lock, &body->lock_desc);
1842         body->lock_flags = flags;
1843
1844         ldlm_lock2handle(lock, &body->lock_handle[0]);
1845         size[DLM_LOCKREPLY_OFF] = sizeof(*reply);
1846         if (lock->l_lvb_len != 0) {
1847                 buffers = 3;
1848                 size[DLM_REPLY_REC_OFF] = lock->l_lvb_len;
1849         }
1850         ptlrpc_req_set_repsize(req, buffers, size);
1851         /* notify the server we've replayed all requests.
1852          * also, we mark the request to be put on a dedicated
1853          * queue to be processed after all request replayes.
1854          * bug 6063 */
1855         lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
1856
1857         LDLM_DEBUG(lock, "replaying lock:");
1858
1859         atomic_inc(&req->rq_import->imp_replay_inflight);
1860         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1861         aa = (struct ldlm_async_args *)&req->rq_async_args;
1862         aa->lock_handle = body->lock_handle[0];
1863         req->rq_interpret_reply = replay_lock_interpret;
1864         ptlrpcd_add_req(req);
1865
1866         RETURN(0);
1867 }
1868
1869 int ldlm_replay_locks(struct obd_import *imp)
1870 {
1871         struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
1872         struct list_head list;
1873         struct ldlm_lock *lock, *next;
1874         int rc = 0;
1875
1876         ENTRY;
1877         CFS_INIT_LIST_HEAD(&list);
1878
1879         LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
1880
1881         /* ensure this doesn't fall to 0 before all have been queued */
1882         atomic_inc(&imp->imp_replay_inflight);
1883
1884         (void)ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
1885
1886         list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
1887                 list_del_init(&lock->l_pending_chain);
1888                 if (rc)
1889                         continue; /* or try to do the rest? */
1890                 rc = replay_one_lock(imp, lock);
1891         }
1892
1893         atomic_dec(&imp->imp_replay_inflight);
1894
1895         RETURN(rc);
1896 }