Whamcloud - gitweb
b=15226
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of the Lustre file system, http://www.lustre.org
7  *   Lustre is a trademark of Cluster File Systems, Inc.
8  *
9  *   You may have signed or agreed to another license before downloading
10  *   this software.  If so, you are bound by the terms and conditions
11  *   of that agreement, and the following does not apply to you.  See the
12  *   LICENSE file included with this distribution for more information.
13  *
14  *   If you did not agree to a different license, then this copy of Lustre
15  *   is open source software; you can redistribute it and/or modify it
16  *   under the terms of version 2 of the GNU General Public License as
17  *   published by the Free Software Foundation.
18  *
19  *   In either case, Lustre is distributed in the hope that it will be
20  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  *   license text for more details.
23  */
24
25 #define DEBUG_SUBSYSTEM S_LDLM
26 #ifndef __KERNEL__
27 #include <signal.h>
28 #include <liblustre.h>
29 #endif
30
31 #include <lustre_dlm.h>
32 #include <obd_class.h>
33 #include <obd.h>
34
35 #include "ldlm_internal.h"
36
37 static void interrupted_completion_wait(void *data)
38 {
39 }
40
41 struct lock_wait_data {
42         struct ldlm_lock *lwd_lock;
43         __u32             lwd_conn_cnt;
44 };
45
46 struct ldlm_async_args {
47         struct lustre_handle lock_handle;
48 };
49
50 int ldlm_expired_completion_wait(void *data)
51 {
52         struct lock_wait_data *lwd = data;
53         struct ldlm_lock *lock = lwd->lwd_lock;
54         struct obd_import *imp;
55         struct obd_device *obd;
56
57         ENTRY;
58         if (lock->l_conn_export == NULL) {
59                 static cfs_time_t next_dump = 0, last_dump = 0;
60
61                 if (ptlrpc_check_suspend())
62                         RETURN(0);
63
64                 LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
65                            CFS_DURATION_T"s ago); not entering recovery in "
66                            "server code, just going back to sleep",
67                            lock->l_enqueued_time.tv_sec,
68                            cfs_time_current_sec() - lock->l_enqueued_time.tv_sec);
69                 if (cfs_time_after(cfs_time_current(), next_dump)) {
70                         last_dump = next_dump;
71                         next_dump = cfs_time_shift(300);
72                         ldlm_namespace_dump(D_DLMTRACE,
73                                             lock->l_resource->lr_namespace);
74                         if (last_dump == 0)
75                                 libcfs_debug_dumplog();
76                 }
77                 RETURN(0);
78         }
79
80         obd = lock->l_conn_export->exp_obd;
81         imp = obd->u.cli.cl_import;
82         ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
83         LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
84                   CFS_DURATION_T"s ago), entering recovery for %s@%s",
85                   lock->l_enqueued_time.tv_sec,
86                   cfs_time_current_sec() - lock->l_enqueued_time.tv_sec,
87                   obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
88
89         RETURN(0);
90 }
91
92 static int is_granted_or_cancelled(struct ldlm_lock *lock)
93 {
94         int ret = 0;
95
96         lock_res_and_lock(lock);
97         if (((lock->l_req_mode == lock->l_granted_mode) &&
98              !(lock->l_flags & LDLM_FL_CP_REQD)) ||
99             (lock->l_flags & LDLM_FL_FAILED))
100                 ret = 1;
101         unlock_res_and_lock(lock);
102
103         return ret;
104 }
105
106 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data)
107 {
108         /* XXX ALLOCATE - 160 bytes */
109         struct lock_wait_data lwd;
110         struct obd_device *obd;
111         struct obd_import *imp = NULL;
112         struct l_wait_info lwi;
113         int rc = 0;
114         ENTRY;
115
116         if (flags == LDLM_FL_WAIT_NOREPROC) {
117                 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
118                 goto noreproc;
119         }
120
121         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
122                        LDLM_FL_BLOCK_CONV))) {
123                 cfs_waitq_signal(&lock->l_waitq);
124                 RETURN(0);
125         }
126
127         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
128                    "sleeping");
129         ldlm_lock_dump(D_OTHER, lock, 0);
130         ldlm_reprocess_all(lock->l_resource);
131
132 noreproc:
133
134         obd = class_exp2obd(lock->l_conn_export);
135
136         /* if this is a local lock, then there is no import */
137         if (obd != NULL)
138                 imp = obd->u.cli.cl_import;
139
140         lwd.lwd_lock = lock;
141
142         if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
143                 LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
144                 lwi = LWI_INTR(interrupted_completion_wait, &lwd);
145         } else {
146                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
147                                        ldlm_expired_completion_wait,
148                                        interrupted_completion_wait, &lwd);
149         }
150
151         if (imp != NULL) {
152                 spin_lock(&imp->imp_lock);
153                 lwd.lwd_conn_cnt = imp->imp_conn_cnt;
154                 spin_unlock(&imp->imp_lock);
155         }
156
157         /* Go to sleep until the lock is granted or cancelled. */
158         rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
159
160         if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
161                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
162                 RETURN(-EIO);
163         }
164
165         if (rc) {
166                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
167                            rc);
168                 RETURN(rc);
169         }
170
171         LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
172         RETURN(0);
173 }
174
175 /*
176  * ->l_blocking_ast() callback for LDLM locks acquired by server-side OBDs.
177  */
178 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
179                       void *data, int flag)
180 {
181         int do_ast;
182         ENTRY;
183
184         if (flag == LDLM_CB_CANCELING) {
185                 /* Don't need to do anything here. */
186                 RETURN(0);
187         }
188
189         lock_res_and_lock(lock);
190         /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
191          * that ldlm_blocking_ast is called just before intent_policy method
192          * takes the ns_lock, then by the time we get the lock, we might not
193          * be the correct blocking function anymore.  So check, and return
194          * early, if so. */
195         if (lock->l_blocking_ast != ldlm_blocking_ast) {
196                 unlock_res_and_lock(lock);
197                 RETURN(0);
198         }
199
200         lock->l_flags |= LDLM_FL_CBPENDING;
201         do_ast = (!lock->l_readers && !lock->l_writers);
202         unlock_res_and_lock(lock);
203
204         if (do_ast) {
205                 struct lustre_handle lockh;
206                 int rc;
207
208                 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
209                 ldlm_lock2handle(lock, &lockh);
210                 rc = ldlm_cli_cancel(&lockh);
211                 if (rc < 0)
212                         CERROR("ldlm_cli_cancel: %d\n", rc);
213         } else {
214                 LDLM_DEBUG(lock, "Lock still has references, will be "
215                            "cancelled later");
216         }
217         RETURN(0);
218 }
219
220 /*
221  * ->l_glimpse_ast() for DLM extent locks acquired on the server-side. See
222  * comment in filter_intent_policy() on why you may need this.
223  */
224 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp)
225 {
226         /*
227          * Returning -ELDLM_NO_LOCK_DATA actually works, but the reason for
228          * that is rather subtle: with OST-side locking, it may so happen that
229          * _all_ extent locks are held by the OST. If client wants to obtain
230          * current file size it calls ll{,u}_glimpse_size(), and (as locks are
231          * on the server), dummy glimpse callback fires and does
232          * nothing. Client still receives correct file size due to the
233          * following fragment in filter_intent_policy():
234          *
235          * rc = l->l_glimpse_ast(l, NULL); // this will update the LVB
236          * if (rc != 0 && res->lr_namespace->ns_lvbo &&
237          *     res->lr_namespace->ns_lvbo->lvbo_update) {
238          *         res->lr_namespace->ns_lvbo->lvbo_update(res, NULL, 0, 1);
239          * }
240          *
241          * that is, after glimpse_ast() fails, filter_lvbo_update() runs, and
242          * returns correct file size to the client.
243          */
244         return -ELDLM_NO_LOCK_DATA;
245 }
246
247 int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
248                            const struct ldlm_res_id *res_id,
249                            ldlm_type_t type, ldlm_policy_data_t *policy,
250                            ldlm_mode_t mode, int *flags,
251                            ldlm_blocking_callback blocking,
252                            ldlm_completion_callback completion,
253                            ldlm_glimpse_callback glimpse,
254                            void *data, __u32 lvb_len, void *lvb_swabber,
255                            struct lustre_handle *lockh)
256 {
257         struct ldlm_lock *lock;
258         int err;
259         ENTRY;
260
261         LASSERT(!(*flags & LDLM_FL_REPLAY));
262         if (unlikely(ns_is_client(ns))) {
263                 CERROR("Trying to enqueue local lock in a shadow namespace\n");
264                 LBUG();
265         }
266
267         lock = ldlm_lock_create(ns, res_id, type, mode, blocking,
268                                 completion, glimpse, data, lvb_len);
269         if (unlikely(!lock))
270                 GOTO(out_nolock, err = -ENOMEM);
271         LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
272
273         ldlm_lock_addref_internal(lock, mode);
274         ldlm_lock2handle(lock, lockh);
275         lock_res_and_lock(lock);
276         lock->l_flags |= LDLM_FL_LOCAL;
277         if (*flags & LDLM_FL_ATOMIC_CB)
278                 lock->l_flags |= LDLM_FL_ATOMIC_CB;
279         lock->l_lvb_swabber = lvb_swabber;
280         unlock_res_and_lock(lock);
281         if (policy != NULL)
282                 lock->l_policy_data = *policy;
283         if (type == LDLM_EXTENT)
284                 lock->l_req_extent = policy->l_extent;
285
286         err = ldlm_lock_enqueue(ns, &lock, policy, flags);
287         if (unlikely(err != ELDLM_OK))
288                 GOTO(out, err);
289
290         if (policy != NULL)
291                 *policy = lock->l_policy_data;
292
293         LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
294                           lock);
295
296         if (lock->l_completion_ast)
297                 lock->l_completion_ast(lock, *flags, NULL);
298
299         LDLM_DEBUG(lock, "client-side local enqueue END");
300         EXIT;
301  out:
302         LDLM_LOCK_PUT(lock);
303  out_nolock:
304         return err;
305 }
306
307 static void failed_lock_cleanup(struct ldlm_namespace *ns,
308                                 struct ldlm_lock *lock,
309                                 struct lustre_handle *lockh, int mode)
310 {
311         /* Set a flag to prevent us from sending a CANCEL (bug 407) */
312         lock_res_and_lock(lock);
313         lock->l_flags |= LDLM_FL_LOCAL_ONLY;
314         unlock_res_and_lock(lock);
315         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
316
317         ldlm_lock_decref_and_cancel(lockh, mode);
318
319         /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
320          *       from llite/file.c/ll_file_flock(). */
321         if (lock->l_resource->lr_type == LDLM_FLOCK) {
322                 ldlm_lock_destroy(lock);
323         }
324 }
325
326 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
327                           ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
328                           int *flags, void *lvb, __u32 lvb_len,
329                           void *lvb_swabber, struct lustre_handle *lockh,int rc)
330 {
331         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
332         int is_replay = *flags & LDLM_FL_REPLAY;
333         struct ldlm_lock *lock;
334         struct ldlm_reply *reply;
335         int cleanup_phase = 1;
336         ENTRY;
337
338         lock = ldlm_handle2lock(lockh);
339         /* ldlm_cli_enqueue is holding a reference on this lock. */
340         if (!lock) {
341                 LASSERT(type == LDLM_FLOCK);
342                 RETURN(-ENOLCK);
343         }
344
345         if (rc != ELDLM_OK) {
346                 LASSERT(!is_replay);
347                 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
348                            rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
349                 if (rc == ELDLM_LOCK_ABORTED) {
350                         /* Before we return, swab the reply */
351                         reply = req_capsule_server_get(&req->rq_pill,
352                                                        &RMF_DLM_REP);
353                         if (reply == NULL)
354                                 rc = -EPROTO;
355                         if (lvb_len) {
356                                 struct ost_lvb *tmplvb;
357
358                                 req_capsule_set_size(&req->rq_pill,
359                                                      &RMF_DLM_LVB, RCL_SERVER,
360                                                      lvb_len);
361                             tmplvb = req_capsule_server_swab_get(&req->rq_pill,
362                                                                  &RMF_DLM_LVB,
363                                                                  lvb_swabber);
364                                 if (tmplvb == NULL)
365                                         GOTO(cleanup, rc = -EPROTO);
366                                 if (lvb != NULL)
367                                         memcpy(lvb, tmplvb, lvb_len);
368                         }
369                 }
370                 GOTO(cleanup, rc);
371         }
372
373         reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
374         if (reply == NULL)
375                 GOTO(cleanup, rc = -EPROTO);
376
377         /* lock enqueued on the server */
378         cleanup_phase = 0;
379
380         lock_res_and_lock(lock);
381         lock->l_remote_handle = reply->lock_handle;
382         *flags = reply->lock_flags;
383         lock->l_flags |= reply->lock_flags & LDLM_INHERIT_FLAGS;
384         /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
385          * to wait with no timeout as well */
386         lock->l_flags |= reply->lock_flags & LDLM_FL_NO_TIMEOUT;
387         unlock_res_and_lock(lock);
388
389         CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: 0x%x\n",
390                lock, reply->lock_handle.cookie, *flags);
391
392         /* If enqueue returned a blocked lock but the completion handler has
393          * already run, then it fixed up the resource and we don't need to do it
394          * again. */
395         if ((*flags) & LDLM_FL_LOCK_CHANGED) {
396                 int newmode = reply->lock_desc.l_req_mode;
397                 LASSERT(!is_replay);
398                 if (newmode && newmode != lock->l_req_mode) {
399                         LDLM_DEBUG(lock, "server returned different mode %s",
400                                    ldlm_lockname[newmode]);
401                         lock->l_req_mode = newmode;
402                 }
403
404                 if (memcmp(reply->lock_desc.l_resource.lr_name.name,
405                           lock->l_resource->lr_name.name,
406                           sizeof(struct ldlm_res_id))) {
407                         CDEBUG(D_INFO, "remote intent success, locking "
408                                         "(%ld,%ld,%ld) instead of "
409                                         "(%ld,%ld,%ld)\n",
410                               (long)reply->lock_desc.l_resource.lr_name.name[0],
411                               (long)reply->lock_desc.l_resource.lr_name.name[1],
412                               (long)reply->lock_desc.l_resource.lr_name.name[2],
413                               (long)lock->l_resource->lr_name.name[0],
414                               (long)lock->l_resource->lr_name.name[1],
415                               (long)lock->l_resource->lr_name.name[2]);
416
417                         rc = ldlm_lock_change_resource(ns, lock,
418                                         &reply->lock_desc.l_resource.lr_name);
419                         if (rc || lock->l_resource == NULL)
420                                 GOTO(cleanup, rc = -ENOMEM);
421                         LDLM_DEBUG(lock, "client-side enqueue, new resource");
422                 }
423                 if (with_policy)
424                         if (!(type == LDLM_IBITS && !(exp->exp_connect_flags &
425                                                     OBD_CONNECT_IBITS)))
426                                 lock->l_policy_data =
427                                                  reply->lock_desc.l_policy_data;
428                 if (type != LDLM_PLAIN)
429                         LDLM_DEBUG(lock,"client-side enqueue, new policy data");
430         }
431
432         if ((*flags) & LDLM_FL_AST_SENT ||
433             /* Cancel extent locks as soon as possible on a liblustre client,
434              * because it cannot handle asynchronous ASTs robustly (see
435              * bug 7311). */
436             (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
437                 lock_res_and_lock(lock);
438                 lock->l_flags |= LDLM_FL_CBPENDING |  LDLM_FL_BL_AST;
439                 unlock_res_and_lock(lock);
440                 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
441         }
442
443         /* If the lock has already been granted by a completion AST, don't
444          * clobber the LVB with an older one. */
445         if (lvb_len && (lock->l_req_mode != lock->l_granted_mode)) {
446                 void *tmplvb;
447
448                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
449                                      lvb_len);
450                 tmplvb = req_capsule_server_swab_get(&req->rq_pill,
451                                                      &RMF_DLM_LVB,
452                                                      lvb_swabber);
453                 if (tmplvb == NULL)
454                         GOTO(cleanup, rc = -EPROTO);
455                 memcpy(lock->l_lvb_data, tmplvb, lvb_len);
456         }
457
458         if (!is_replay) {
459                 rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
460                 if (lock->l_completion_ast != NULL) {
461                         int err = lock->l_completion_ast(lock, *flags, NULL);
462                         if (!rc)
463                                 rc = err;
464                         if (rc && type != LDLM_FLOCK) /* bug 9425, bug 10250 */
465                                 cleanup_phase = 1;
466                 }
467         }
468
469         if (lvb_len && lvb != NULL) {
470                 /* Copy the LVB here, and not earlier, because the completion
471                  * AST (if any) can override what we got in the reply */
472                 memcpy(lvb, lock->l_lvb_data, lvb_len);
473         }
474
475         LDLM_DEBUG(lock, "client-side enqueue END");
476         EXIT;
477 cleanup:
478         if (cleanup_phase == 1 && rc)
479                 failed_lock_cleanup(ns, lock, lockh, mode);
480         /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
481         LDLM_LOCK_PUT(lock);
482         LDLM_LOCK_PUT(lock);
483         return rc;
484 }
485
486 /* PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
487  * a single page on the send/receive side. XXX: 512 should be changed
488  * to more adequate value. */
489 static inline int ldlm_req_handles_avail(int req_size, int off)
490 {
491         int avail;
492
493         avail = min_t(int, LDLM_MAXREQSIZE, CFS_PAGE_SIZE - 512) - req_size;
494         avail /= sizeof(struct lustre_handle);
495         avail += LDLM_LOCKREQ_HANDLES - off;
496
497         return avail;
498 }
499
500 static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
501                                              enum req_location loc,
502                                              int off)
503 {
504         int size = req_capsule_msg_size(pill, loc);
505         return ldlm_req_handles_avail(size, off);
506 }
507
508 static inline int ldlm_format_handles_avail(struct obd_import *imp,
509                                             const struct req_format *fmt,
510                                             enum req_location loc, int off)
511 {
512         int size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
513         return ldlm_req_handles_avail(size, off);
514 }
515
516 /* Cancel lru locks and pack them into the enqueue request. Pack there the given
517  * @count locks in @cancels. */
518 int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
519                       int version, int opc, int canceloff,
520                       struct list_head *cancels, int count)
521 {
522         struct ldlm_namespace   *ns = exp->exp_obd->obd_namespace;
523         struct req_capsule      *pill = &req->rq_pill;
524         struct ldlm_request     *dlm = NULL;
525         int flags, avail, to_free, bufcount, pack = 0;
526         CFS_LIST_HEAD(head);
527         int rc;
528         ENTRY;
529
530         if (cancels == NULL)
531                 cancels = &head;
532         if (exp_connect_cancelset(exp)) {
533                 /* Estimate the amount of available space in the request. */
534                 bufcount = req_capsule_filled_sizes(pill, RCL_CLIENT);
535                 avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
536
537                 flags = ns_connect_lru_resize(ns) ? 
538                         LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
539                 to_free = !ns_connect_lru_resize(ns) &&
540                           opc == LDLM_ENQUEUE ? 1 : 0;
541
542                 /* Cancel lru locks here _only_ if the server supports 
543                  * EARLY_CANCEL. Otherwise we have to send extra CANCEL
544                  * rpc, what will make us slower. */
545                 if (avail > count)
546                         count += ldlm_cancel_lru_local(ns, cancels, to_free,
547                                                        avail - count, 0, flags);
548                 if (avail > count)
549                         pack = count;
550                 else
551                         pack = avail;
552                 req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
553                                      ldlm_request_bufsize(pack, opc));
554         }
555
556         rc = ptlrpc_request_pack(req, version, opc);
557         if (rc) {
558                 ldlm_lock_list_put(cancels, l_bl_ast, count);
559                 RETURN(rc);
560         }
561
562         if (exp_connect_cancelset(exp)) {
563                 if (canceloff) {
564                         dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
565                         LASSERT(dlm);
566                         /* Skip first lock handler in ldlm_request_pack(),
567                          * this method will incrment @lock_count according
568                          * to the lock handle amount actually written to
569                          * the buffer. */
570                         dlm->lock_count = canceloff;
571                 }
572                 /* Pack into the request @pack lock handles. */
573                 ldlm_cli_cancel_list(cancels, pack, req, 0);
574                 /* Prepare and send separate cancel rpc for others. */
575                 ldlm_cli_cancel_list(cancels, count - pack, NULL, 0);
576         } else {
577                 ldlm_lock_list_put(cancels, l_bl_ast, count);
578         }
579         RETURN(0);
580 }
581
582 int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
583                           struct list_head *cancels, int count)
584 {
585         return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
586                                  LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
587 }
588
589 /* If a request has some specific initialisation it is passed in @reqp,
590  * otherwise it is created in ldlm_cli_enqueue.
591  *
592  * Supports sync and async requests, pass @async flag accordingly. If a
593  * request was created in ldlm_cli_enqueue and it is the async request,
594  * pass it to the caller in @reqp. */
595 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
596                      struct ldlm_enqueue_info *einfo,
597                      const struct ldlm_res_id *res_id,
598                      ldlm_policy_data_t *policy, int *flags,
599                      void *lvb, __u32 lvb_len, void *lvb_swabber,
600                      struct lustre_handle *lockh, int async)
601 {
602         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
603         struct ldlm_lock      *lock;
604         struct ldlm_request   *body;
605         int                    is_replay = *flags & LDLM_FL_REPLAY;
606         int                    req_passed_in = 1;
607         int                    rc, err;
608         struct ptlrpc_request *req;
609         ENTRY;
610
611         LASSERT(exp != NULL);
612
613         /* If we're replaying this lock, just check some invariants.
614          * If we're creating a new lock, get everything all setup nice. */
615         if (is_replay) {
616                 lock = ldlm_handle2lock(lockh);
617                 LASSERT(lock != NULL);
618                 LDLM_DEBUG(lock, "client-side enqueue START");
619                 LASSERT(exp == lock->l_conn_export);
620         } else {
621                 lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
622                                         einfo->ei_mode, einfo->ei_cb_bl,
623                                         einfo->ei_cb_cp, einfo->ei_cb_gl,
624                                         einfo->ei_cbdata, lvb_len);
625                 if (lock == NULL)
626                         RETURN(-ENOMEM);
627                 /* for the local lock, add the reference */
628                 ldlm_lock_addref_internal(lock, einfo->ei_mode);
629                 ldlm_lock2handle(lock, lockh);
630                 lock->l_lvb_swabber = lvb_swabber;
631                 if (policy != NULL) {
632                         /* INODEBITS_INTEROP: If the server does not support
633                          * inodebits, we will request a plain lock in the
634                          * descriptor (ldlm_lock2desc() below) but use an
635                          * inodebits lock internally with both bits set.
636                          */
637                         if (einfo->ei_type == LDLM_IBITS &&
638                             !(exp->exp_connect_flags & OBD_CONNECT_IBITS))
639                                 lock->l_policy_data.l_inodebits.bits =
640                                         MDS_INODELOCK_LOOKUP |
641                                         MDS_INODELOCK_UPDATE;
642                         else
643                                 lock->l_policy_data = *policy;
644                 }
645
646                 if (einfo->ei_type == LDLM_EXTENT)
647                         lock->l_req_extent = policy->l_extent;
648                 LDLM_DEBUG(lock, "client-side enqueue START");
649         }
650
651         /* lock not sent to server yet */
652
653         if (reqp == NULL || *reqp == NULL) {
654                 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
655                                                 &RQF_LDLM_ENQUEUE,
656                                                 LUSTRE_DLM_VERSION,
657                                                 LDLM_ENQUEUE);
658                 if (req == NULL) {
659                         failed_lock_cleanup(ns, lock, lockh, einfo->ei_mode);
660                         LDLM_LOCK_PUT(lock);
661                         RETURN(-ENOMEM);
662                 }
663                 req_passed_in = 0;
664                 if (reqp)
665                         *reqp = req;
666         } else {
667                 int len;
668
669                 req = *reqp;
670                 len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ,
671                                            RCL_CLIENT);
672                 LASSERTF(len >= sizeof(*body), "buflen[%d] = %d, not %d\n",
673                          DLM_LOCKREQ_OFF, len, sizeof(*body));
674         }
675
676         lock->l_conn_export = exp;
677         lock->l_export = NULL;
678         lock->l_blocking_ast = einfo->ei_cb_bl;
679
680         /* Dump lock data into the request buffer */
681         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
682         ldlm_lock2desc(lock, &body->lock_desc);
683         body->lock_flags = *flags;
684         body->lock_handle[0] = *lockh;
685
686         /* Continue as normal. */
687         if (!req_passed_in) {
688                 if (lvb_len > 0) {
689                         req_capsule_extend(&req->rq_pill,
690                                            &RQF_LDLM_ENQUEUE_LVB);
691                         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
692                                              RCL_SERVER, lvb_len);
693                 }
694                 ptlrpc_request_set_replen(req);
695         }
696
697         /*
698          * Liblustre client doesn't get extent locks, except for O_APPEND case
699          * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where
700          * [i_size, OBD_OBJECT_EOF] lock is taken.
701          */
702         LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT ||
703                      policy->l_extent.end == OBD_OBJECT_EOF));
704
705         if (async) {
706                 LASSERT(reqp != NULL);
707                 RETURN(0);
708         }
709
710         LDLM_DEBUG(lock, "sending request");
711         rc = ptlrpc_queue_wait(req);
712         err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
713                                     einfo->ei_mode, flags, lvb, lvb_len,
714                                     lvb_swabber, lockh, rc);
715
716         /* If ldlm_cli_enqueue_fini did not find the lock, we need to free
717          * one reference that we took */
718         if (err == -ENOLCK)
719                 LDLM_LOCK_PUT(lock);
720         else
721                 rc = err;
722
723         if (!req_passed_in && req != NULL) {
724                 ptlrpc_req_finished(req);
725                 if (reqp)
726                         *reqp = NULL;
727         }
728
729         RETURN(rc);
730 }
731
732 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
733                                   __u32 *flags)
734 {
735         struct ldlm_resource *res;
736         int rc;
737         ENTRY;
738         if (ns_is_client(lock->l_resource->lr_namespace)) {
739                 CERROR("Trying to cancel local lock\n");
740                 LBUG();
741         }
742         LDLM_DEBUG(lock, "client-side local convert");
743
744         res = ldlm_lock_convert(lock, new_mode, flags);
745         if (res) {
746                 ldlm_reprocess_all(res);
747                 rc = 0;
748         } else {
749                 rc = EDEADLOCK;
750         }
751         LDLM_DEBUG(lock, "client-side local convert handler END");
752         LDLM_LOCK_PUT(lock);
753         RETURN(rc);
754 }
755
756 /* FIXME: one of ldlm_cli_convert or the server side should reject attempted
757  * conversion of locks which are on the waiting or converting queue */
758 /* Caller of this code is supposed to take care of lock readers/writers
759    accounting */
760 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, __u32 *flags)
761 {
762         struct ldlm_request   *body;
763         struct ldlm_reply     *reply;
764         struct ldlm_lock      *lock;
765         struct ldlm_resource  *res;
766         struct ptlrpc_request *req;
767         int                    rc;
768         ENTRY;
769
770         lock = ldlm_handle2lock(lockh);
771         if (!lock) {
772                 LBUG();
773                 RETURN(-EINVAL);
774         }
775         *flags = 0;
776
777         if (lock->l_conn_export == NULL)
778                 RETURN(ldlm_cli_convert_local(lock, new_mode, flags));
779
780         LDLM_DEBUG(lock, "client-side convert");
781
782         req = ptlrpc_request_alloc_pack(class_exp2cliimp(lock->l_conn_export),
783                                         &RQF_LDLM_CONVERT, LUSTRE_DLM_VERSION,
784                                         LDLM_CONVERT);
785         if (req == NULL) {
786                 LDLM_LOCK_PUT(lock);
787                 RETURN(-ENOMEM);
788         }
789
790         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
791         body->lock_handle[0] = lock->l_remote_handle;
792
793         body->lock_desc.l_req_mode = new_mode;
794         body->lock_flags = *flags;
795
796
797         ptlrpc_request_set_replen(req);
798         rc = ptlrpc_queue_wait(req);
799         if (rc != ELDLM_OK)
800                 GOTO(out, rc);
801
802         reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
803         if (reply == NULL)
804                 GOTO(out, rc = -EPROTO);
805
806         if (req->rq_status)
807                 GOTO(out, rc = req->rq_status);
808
809         res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
810         if (res != NULL) {
811                 ldlm_reprocess_all(res);
812                 /* Go to sleep until the lock is granted. */
813                 /* FIXME: or cancelled. */
814                 if (lock->l_completion_ast) {
815                         rc = lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC,
816                                                     NULL);
817                         if (rc)
818                                 GOTO(out, rc);
819                 }
820         } else {
821                 rc = EDEADLOCK;
822         }
823         EXIT;
824  out:
825         LDLM_LOCK_PUT(lock);
826         ptlrpc_req_finished(req);
827         return rc;
828 }
829
830 /* Cancel locks locally.
831  * Returns:
832  * LDLM_FL_LOCAL_ONLY if tere is no need in a CANCEL rpc to the server;
833  * LDLM_FL_CANCELING otherwise;
834  * LDLM_FL_BL_AST if there is a need in a separate CANCEL rpc. */
835 static int ldlm_cli_cancel_local(struct ldlm_lock *lock)
836 {
837         int rc = LDLM_FL_LOCAL_ONLY;
838         ENTRY;
839         
840         if (lock->l_conn_export) {
841                 int local_only;
842
843                 LDLM_DEBUG(lock, "client-side cancel");
844                 /* Set this flag to prevent others from getting new references*/
845                 lock_res_and_lock(lock);
846                 lock->l_flags |= LDLM_FL_CBPENDING;
847                 local_only = (lock->l_flags &
848                               (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
849                 ldlm_cancel_callback(lock);
850                 rc = (lock->l_flags & LDLM_FL_BL_AST) ?
851                         LDLM_FL_BL_AST : LDLM_FL_CANCELING;
852                 unlock_res_and_lock(lock);
853
854                 if (local_only) {
855                         CDEBUG(D_DLMTRACE, "not sending request (at caller's "
856                                "instruction)\n");
857                         rc = LDLM_FL_LOCAL_ONLY;
858                 }
859                 ldlm_lock_cancel(lock);
860         } else {
861                 if (ns_is_client(lock->l_resource->lr_namespace)) {
862                         LDLM_ERROR(lock, "Trying to cancel local lock");
863                         LBUG();
864                 }
865                 LDLM_DEBUG(lock, "server-side local cancel");
866                 ldlm_lock_cancel(lock);
867                 ldlm_reprocess_all(lock->l_resource);
868                 LDLM_DEBUG(lock, "server-side local cancel handler END");
869         }
870
871         RETURN(rc);
872 }
873
874 /* Pack @count locks in @head into ldlm_request buffer at the offset @off,
875    of the request @req. */
876 static void ldlm_cancel_pack(struct ptlrpc_request *req,
877                              struct list_head *head, int count)
878 {
879         struct ldlm_request *dlm;
880         struct ldlm_lock *lock;
881         int max, packed = 0;
882         ENTRY;
883
884         dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
885         LASSERT(dlm != NULL);
886
887         /* Check the room in the request buffer. */
888         max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) - 
889                 sizeof(struct ldlm_request);
890         max /= sizeof(struct lustre_handle);
891         max += LDLM_LOCKREQ_HANDLES;
892         LASSERT(max >= dlm->lock_count + count);
893
894         /* XXX: it would be better to pack lock handles grouped by resource.
895          * so that the server cancel would call filter_lvbo_update() less
896          * frequently. */
897         list_for_each_entry(lock, head, l_bl_ast) {
898                 if (!count--)
899                         break;
900                 LASSERT(lock->l_conn_export);
901                 /* Pack the lock handle to the given request buffer. */
902                 LDLM_DEBUG(lock, "packing");
903                 dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle;
904                 packed++;
905         }
906         CDEBUG(D_DLMTRACE, "%d locks packed\n", packed);
907         EXIT;
908 }
909
910 /* Prepare and send a batched cancel rpc, it will include count lock handles
911  * of locks given in @head. */
912 int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
913                         int count, int flags)
914 {
915         struct ptlrpc_request *req = NULL;
916         struct obd_import *imp;
917         int free, sent = 0;
918         int rc = 0;
919         ENTRY;
920
921         LASSERT(exp != NULL);
922         LASSERT(count > 0);
923
924         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
925                 RETURN(count);
926
927         free = ldlm_format_handles_avail(class_exp2cliimp(exp),
928                                          &RQF_LDLM_CANCEL, RCL_CLIENT, 0);
929         if (count > free)
930                 count = free;
931
932         while (1) {
933                 int bufcount;
934
935                 imp = class_exp2cliimp(exp);
936                 if (imp == NULL || imp->imp_invalid) {
937                         CDEBUG(D_DLMTRACE,
938                                "skipping cancel on invalid import %p\n", imp);
939                         RETURN(count);
940                 }
941
942                 req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
943                 if (req == NULL)
944                         GOTO(out, rc = -ENOMEM);
945
946                 bufcount = req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
947                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
948                                      ldlm_request_bufsize(count, LDLM_CANCEL));
949
950                 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CANCEL);
951                 if (rc) {
952                         ptlrpc_request_free(req);
953                         GOTO(out, rc);
954                 }
955                 req->rq_no_resend = 1;
956                 req->rq_no_delay = 1;
957
958                 /* XXX FIXME bug 249 */
959                 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
960                 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
961
962                 ldlm_cancel_pack(req, cancels, count);
963
964                 ptlrpc_request_set_replen(req);
965                 if (flags & LDLM_FL_ASYNC) {
966                         ptlrpcd_add_req(req);
967                         sent = count;
968                         GOTO(out, 0);
969                 } else {
970                         rc = ptlrpc_queue_wait(req);
971                 }
972                 if (rc == ESTALE) {
973                         CDEBUG(D_DLMTRACE, "client/server (nid %s) "
974                                "out of sync -- not fatal\n",
975                                libcfs_nid2str(req->rq_import->
976                                               imp_connection->c_peer.nid));
977                         rc = 0;
978                 } else if (rc == -ETIMEDOUT && /* check there was no reconnect*/
979                            req->rq_import_generation == imp->imp_generation) {
980                         ptlrpc_req_finished(req);
981                         continue;
982                 } else if (rc != ELDLM_OK) {
983                         CERROR("Got rc %d from cancel RPC: canceling "
984                                "anyway\n", rc);
985                         break;
986                 }
987                 sent = count;
988                 break;
989         }
990
991         ptlrpc_req_finished(req);
992         EXIT;
993 out:
994         return sent ? sent : rc;
995 }
996
997 static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
998 {
999         LASSERT(imp != NULL);
1000         return &imp->imp_obd->obd_namespace->ns_pool;
1001 }
1002
1003 /**
1004  * Update client's obd pool related fields with new SLV and Limit from \a req.
1005  */
1006 int ldlm_cli_update_pool(struct ptlrpc_request *req)
1007 {
1008         struct obd_device *obd;
1009         __u64 old_slv, new_slv;
1010         __u32 new_limit;
1011         ENTRY;
1012     
1013         if (unlikely(!req->rq_import || !req->rq_import->imp_obd || 
1014                      !imp_connect_lru_resize(req->rq_import)))
1015         {
1016                 /* 
1017                  * Do nothing for corner cases. 
1018                  */
1019                 RETURN(0);
1020         }
1021
1022         /* 
1023          * In some cases RPC may contain slv and limit zeroed out. This is 
1024          * the case when server does not support lru resize feature. This is
1025          * also possible in some recovery cases when server side reqs have no
1026          * ref to obd export and thus access to server side namespace is no 
1027          * possible. 
1028          */
1029         if (lustre_msg_get_slv(req->rq_repmsg) == 0 || 
1030             lustre_msg_get_limit(req->rq_repmsg) == 0) {
1031                 DEBUG_REQ(D_HA, req, "Zero SLV or Limit found "
1032                           "(SLV: "LPU64", Limit: %u)", 
1033                           lustre_msg_get_slv(req->rq_repmsg), 
1034                           lustre_msg_get_limit(req->rq_repmsg));
1035                 RETURN(0);
1036         }
1037
1038         new_limit = lustre_msg_get_limit(req->rq_repmsg);
1039         new_slv = lustre_msg_get_slv(req->rq_repmsg);
1040         obd = req->rq_import->imp_obd;
1041
1042         /* 
1043          * Set new SLV and Limit to obd fields to make accessible for pool 
1044          * thread. We do not access obd_namespace and pool directly here
1045          * as there is no reliable way to make sure that they are still
1046          * alive in cleanup time. Evil races are possible which may cause
1047          * oops in that time. 
1048          */
1049         write_lock(&obd->obd_pool_lock);
1050         old_slv = obd->obd_pool_slv;
1051         obd->obd_pool_slv = new_slv;
1052         obd->obd_pool_limit = new_limit;
1053         write_unlock(&obd->obd_pool_lock);
1054
1055         /* 
1056          * Check if we need to wakeup pools thread for fast SLV change. 
1057          * This is only done when threads period is noticably long like 
1058          * 10s or more. 
1059          */
1060 #if defined(__KERNEL__) && (LDLM_POOLS_THREAD_PERIOD >= 10)
1061         if (old_slv > 0) {
1062                 __u64 fast_change = old_slv * LDLM_POOLS_FAST_SLV_CHANGE;
1063                 do_div(fast_change, 100);
1064
1065                 /* 
1066                  * Wake up pools thread only if SLV has changed more than 
1067                  * 50% since last update. In this case we want to react asap. 
1068                  * Otherwise it is no sense to wake up pools as they are 
1069                  * re-calculated every LDLM_POOLS_THREAD_PERIOD anyways. 
1070                  */
1071                 if (old_slv > new_slv && old_slv - new_slv > fast_change)
1072                         ldlm_pools_wakeup();
1073         }
1074 #endif
1075         RETURN(0);
1076 }
1077 EXPORT_SYMBOL(ldlm_cli_update_pool);
1078
1079 int ldlm_cli_cancel(struct lustre_handle *lockh)
1080 {
1081         struct obd_export *exp;
1082         int avail, flags, count = 1, rc = 0;
1083         struct ldlm_namespace *ns;
1084         struct ldlm_lock *lock;
1085         CFS_LIST_HEAD(cancels);
1086         ENTRY;
1087
1088         /* concurrent cancels on the same handle can happen */
1089         lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);
1090         if (lock == NULL) {
1091                 LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
1092                 RETURN(0);
1093         }
1094
1095         rc = ldlm_cli_cancel_local(lock);
1096         if (rc < 0 || rc == LDLM_FL_LOCAL_ONLY) {
1097                 LDLM_LOCK_PUT(lock);
1098                 RETURN(rc < 0 ? rc : 0);
1099         }
1100         /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
1101          * rpc which goes to canceld portal, so we can cancel other lru locks
1102          * here and send them all as one LDLM_CANCEL rpc. */
1103         LASSERT(list_empty(&lock->l_bl_ast));
1104         list_add(&lock->l_bl_ast, &cancels);
1105
1106         exp = lock->l_conn_export;
1107         if (exp_connect_cancelset(exp)) {
1108                 avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
1109                                                   &RQF_LDLM_CANCEL,
1110                                                   RCL_CLIENT, 0);
1111                 LASSERT(avail > 0);
1112
1113                 ns = lock->l_resource->lr_namespace;
1114                 flags = ns_connect_lru_resize(ns) ?
1115                         LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
1116                 count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
1117                                                LDLM_FL_BL_AST, flags);
1118         }
1119         ldlm_cli_cancel_list(&cancels, count, NULL, 0);
1120         RETURN(0);
1121 }
1122
1123 /* XXX until we will have compound requests and can cut cancels from generic rpc
1124  * we need send cancels with LDLM_FL_BL_AST flag as separate rpc */
1125 static int ldlm_cancel_list(struct list_head *cancels, int count, int flags)
1126 {
1127         CFS_LIST_HEAD(head);
1128         struct ldlm_lock *lock, *next;
1129         int left = 0, bl_ast = 0, rc;
1130
1131         left = count;
1132         list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
1133                 if (left-- == 0)
1134                         break;
1135
1136                 if (flags & LDLM_FL_LOCAL_ONLY) {
1137                         rc = LDLM_FL_LOCAL_ONLY;
1138                         ldlm_lock_cancel(lock);
1139                 } else {
1140                         rc = ldlm_cli_cancel_local(lock);
1141                 }
1142                 if (!(flags & LDLM_FL_BL_AST) && (rc == LDLM_FL_BL_AST)) {
1143                         LDLM_DEBUG(lock, "Cancel lock separately");
1144                         list_del_init(&lock->l_bl_ast);
1145                         list_add(&lock->l_bl_ast, &head);
1146                         bl_ast ++;
1147                         continue;
1148                 }
1149                 if (rc == LDLM_FL_LOCAL_ONLY) {
1150                         /* CANCEL RPC should not be sent to server. */
1151                         list_del_init(&lock->l_bl_ast);
1152                         LDLM_LOCK_PUT(lock);
1153                         count--;
1154                 }
1155
1156         }
1157         if (bl_ast > 0) {
1158                 count -= bl_ast;
1159                 ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
1160         }
1161
1162         RETURN(count);
1163 }
1164
1165 /* Return 1 to stop lru processing and keep current lock cached. Return zero 
1166  * otherwise. */
1167 static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns,
1168                                                    struct ldlm_lock *lock,
1169                                                    int unused, int added, 
1170                                                    int count)
1171 {
1172         int lock_cost;
1173         __u64 page_nr;
1174
1175         /* Stop lru processing when we reached passed @count or checked all 
1176          * locks in lru. */
1177         if (count && added >= count)
1178                 return LDLM_POLICY_KEEP_LOCK;
1179
1180         if (lock->l_resource->lr_type == LDLM_EXTENT) {
1181                 struct ldlm_extent *l_extent;
1182
1183                 /* For all extent locks cost is 1 + number of pages in
1184                  * their extent. */
1185                 l_extent = &lock->l_policy_data.l_extent;
1186                 page_nr = (l_extent->end - l_extent->start);
1187                 do_div(page_nr, CFS_PAGE_SIZE);
1188
1189 #ifdef __KERNEL__
1190                 /* XXX: In fact this is evil hack, we can't access inode
1191                  * here. For doing it right we need somehow to have number
1192                  * of covered by lock. This should be fixed later when 10718 
1193                  * is landed. */
1194                 if (lock->l_ast_data != NULL) {
1195                         struct inode *inode = lock->l_ast_data;
1196                         if (page_nr > inode->i_mapping->nrpages)
1197                                 page_nr = inode->i_mapping->nrpages;
1198                 }
1199 #endif
1200                 lock_cost = 1 + page_nr;
1201         } else {
1202                 /* For all locks which are not extent ones cost is 1 */
1203                 lock_cost = 1;
1204         }
1205
1206         /* Keep all expensive locks in lru for the memory pressure time
1207          * cancel policy. They anyways may be canceled by lru resize
1208          * pplicy if they have not small enough CLV. */
1209         return lock_cost > ns->ns_shrink_thumb ? 
1210                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1211 }
1212
1213 /* Return 1 to stop lru processing and keep current lock cached. Return zero 
1214  * otherwise. */
1215 static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1216                                                  struct ldlm_lock *lock, 
1217                                                  int unused, int added, 
1218                                                  int count)
1219 {
1220         cfs_time_t cur = cfs_time_current();
1221         struct ldlm_pool *pl = &ns->ns_pool;
1222         __u64 slv, lvf, lv;
1223         cfs_time_t la;
1224
1225         /* Stop lru processing when we reached passed @count or checked all 
1226          * locks in lru. */
1227         if (count && added >= count)
1228                 return LDLM_POLICY_KEEP_LOCK;
1229
1230         slv = ldlm_pool_get_slv(pl);
1231         lvf = ldlm_pool_get_lvf(pl);
1232         la = cfs_duration_sec(cfs_time_sub(cur, 
1233                               lock->l_last_used));
1234
1235         /* Stop when slv is not yet come from server or 
1236          * lv is smaller than it is. */
1237         lv = lvf * la * unused;
1238         
1239         /* Inform pool about current CLV to see it via proc. */
1240         ldlm_pool_set_clv(pl, lv);
1241         return (slv == 1 || lv < slv) ? 
1242                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1243 }
1244
1245 /* Return 1 to stop lru processing and keep current lock cached. Return zero 
1246  * otherwise. */
1247 static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
1248                                                    struct ldlm_lock *lock, 
1249                                                    int unused, int added,
1250                                                    int count)
1251 {
1252         /* Stop lru processing when we reached passed @count or checked all 
1253          * locks in lru. */
1254         return (added >= count) ? 
1255                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1256 }
1257
1258 /* Return 1 to stop lru processing and keep current lock cached. Return zero 
1259  * otherwise. */
1260 static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
1261                                                  struct ldlm_lock *lock, 
1262                                                  int unused, int added,
1263                                                  int count)
1264 {
1265         /* Stop lru processing if young lock is found and we reached passed 
1266          * @count. */
1267         return ((added >= count) && 
1268                 cfs_time_before(cfs_time_current(),
1269                                 cfs_time_add(lock->l_last_used,
1270                                              ns->ns_max_age))) ? 
1271                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1272 }
1273
1274 /* Return 1 to stop lru processing and keep current lock cached. Return zero 
1275  * otherwise. */
1276 static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
1277                                                     struct ldlm_lock *lock, 
1278                                                     int unused, int added,
1279                                                     int count)
1280 {
1281         /* Stop lru processing when we reached passed @count or checked all 
1282          * locks in lru. */
1283         return (added >= count) ? 
1284                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1285 }
1286
1287 typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *, 
1288                                                       struct ldlm_lock *, int, 
1289                                                       int, int);
1290
1291 static ldlm_cancel_lru_policy_t
1292 ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
1293 {
1294         if (ns_connect_lru_resize(ns)) {
1295                 if (flags & LDLM_CANCEL_SHRINK)
1296                         return ldlm_cancel_shrink_policy;
1297                 else if (flags & LDLM_CANCEL_LRUR)
1298                         return ldlm_cancel_lrur_policy;
1299                 else if (flags & LDLM_CANCEL_PASSED)
1300                         return ldlm_cancel_passed_policy;
1301         } else {
1302                 if (flags & LDLM_CANCEL_AGED)
1303                         return ldlm_cancel_aged_policy;
1304         }
1305         
1306         return ldlm_cancel_default_policy;
1307 }
1308  
1309 /* - Free space in lru for @count new locks,
1310  *   redundant unused locks are canceled locally;
1311  * - also cancel locally unused aged locks;
1312  * - do not cancel more than @max locks;
1313  * - GET the found locks and add them into the @cancels list.
1314  *
1315  * A client lock can be added to the l_bl_ast list only when it is
1316  * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing CANCEL.
1317  * There are the following use cases: ldlm_cancel_resource_local(),
1318  * ldlm_cancel_lru_local() and ldlm_cli_cancel(), which check&set this
1319  * flag properly. As any attempt to cancel a lock rely on this flag,
1320  * l_bl_ast list is accessed later without any special locking.
1321  *
1322  * Calling policies for enabled lru resize:
1323  * ----------------------------------------
1324  * flags & LDLM_CANCEL_LRUR - use lru resize policy (SLV from server) to
1325  *                            cancel not more than @count locks;
1326  *
1327  * flags & LDLM_CANCEL_PASSED - cancel @count number of old locks (located at
1328  *                              the beginning of lru list);
1329  *
1330  * flags & LDLM_CANCEL_SHRINK - cancel not more than @count locks according to
1331  *                              memory pressre policy function;
1332  *
1333  * flags & LDLM_CANCEL_AGED -   cancel alocks according to "aged policy".
1334  */
1335 int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
1336                           int count, int max, int cancel_flags, int flags)
1337 {
1338         ldlm_cancel_lru_policy_t pf;
1339         struct ldlm_lock *lock, *next;
1340         int added = 0, unused;
1341         ENTRY;
1342
1343         spin_lock(&ns->ns_unused_lock);
1344         unused = ns->ns_nr_unused;
1345
1346         if (!ns_connect_lru_resize(ns))
1347                 count += unused - ns->ns_max_unused;
1348
1349         pf = ldlm_cancel_lru_policy(ns, flags);
1350         LASSERT(pf != NULL);
1351         
1352         while (!list_empty(&ns->ns_unused_list)) {
1353                 /* For any flags, stop scanning if @max is reached. */
1354                 if (max && added >= max)
1355                         break;
1356
1357                 list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){
1358                         /* No locks which got blocking requests. */
1359                         LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
1360
1361                         /* Somebody is already doing CANCEL. No need in this
1362                          * lock in lru, do not traverse it again. */
1363                         if (!(lock->l_flags & LDLM_FL_CANCELING))
1364                                 break;
1365
1366                         ldlm_lock_remove_from_lru_nolock(lock);
1367                 }
1368                 if (&lock->l_lru == &ns->ns_unused_list)
1369                         break;
1370
1371                 /* Pass the lock through the policy filter and see if it
1372                  * should stay in lru.
1373                  *
1374                  * Even for shrinker policy we stop scanning if
1375                  * we find a lock that should stay in the cache.
1376                  * We should take into account lock age anyway
1377                  * as new lock even if it is small of weight is
1378                  * valuable resource. 
1379                  *
1380                  * That is, for shrinker policy we drop only
1381                  * old locks, but additionally chose them by
1382                  * their weight. Big extent locks will stay in 
1383                  * the cache. */
1384                 if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK)
1385                         break;
1386
1387                 LDLM_LOCK_GET(lock); /* dropped by bl thread */
1388                 spin_unlock(&ns->ns_unused_lock);
1389
1390                 lock_res_and_lock(lock);
1391                 /* Check flags again under the lock. */
1392                 if ((lock->l_flags & LDLM_FL_CANCELING) ||
1393                     (ldlm_lock_remove_from_lru(lock) == 0)) {
1394                         /* other thread is removing lock from lru or
1395                          * somebody is already doing CANCEL or
1396                          * there is a blocking request which will send
1397                          * cancel by itseft or the lock is matched
1398                          * is already not unused. */
1399                         unlock_res_and_lock(lock);
1400                         LDLM_LOCK_PUT(lock);
1401                         spin_lock(&ns->ns_unused_lock);
1402                         continue;
1403                 }
1404                 LASSERT(!lock->l_readers && !lock->l_writers);
1405
1406                 /* If we have chosen to cancel this lock voluntarily, we
1407                  * better send cancel notification to server, so that it
1408                  * frees appropriate state. This might lead to a race 
1409                  * where while we are doing cancel here, server is also 
1410                  * silently cancelling this lock. */
1411                 lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
1412
1413                 /* Setting the CBPENDING flag is a little misleading,
1414                  * but prevents an important race; namely, once
1415                  * CBPENDING is set, the lock can accumulate no more
1416                  * readers/writers. Since readers and writers are
1417                  * already zero here, ldlm_lock_decref() won't see
1418                  * this flag and call l_blocking_ast */
1419                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
1420
1421                 /* We can't re-add to l_lru as it confuses the
1422                  * refcounting in ldlm_lock_remove_from_lru() if an AST
1423                  * arrives after we drop ns_lock below. We use l_bl_ast
1424                  * and can't use l_pending_chain as it is used both on
1425                  * server and client nevertheless bug 5666 says it is
1426                  * used only on server */
1427                 LASSERT(list_empty(&lock->l_bl_ast));
1428                 list_add(&lock->l_bl_ast, cancels);
1429                 unlock_res_and_lock(lock);
1430                 spin_lock(&ns->ns_unused_lock);
1431                 added++;
1432                 unused--;
1433         }
1434         spin_unlock(&ns->ns_unused_lock);
1435         RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
1436 }
1437
1438 /* Returns number of locks which could be canceled next time when 
1439  * ldlm_cancel_lru() is called. Used from locks pool shrinker. */
1440 int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns,
1441                              int count, int max, int flags)
1442 {
1443         ldlm_cancel_lru_policy_t pf;
1444         struct ldlm_lock *lock;
1445         int added = 0, unused;
1446         ENTRY;
1447
1448         pf = ldlm_cancel_lru_policy(ns, flags);
1449         LASSERT(pf != NULL);
1450         spin_lock(&ns->ns_unused_lock);
1451         unused = ns->ns_nr_unused;
1452
1453         list_for_each_entry(lock, &ns->ns_unused_list, l_lru) {
1454                 /* For any flags, stop scanning if @max is reached. */
1455                 if (max && added >= max)
1456                         break;
1457
1458                 /* Somebody is already doing CANCEL or there is a
1459                  * blocking request will send cancel. Let's not count 
1460                  * this lock. */
1461                 if ((lock->l_flags & LDLM_FL_CANCELING) ||
1462                     (lock->l_flags & LDLM_FL_BL_AST)) 
1463                         continue;
1464
1465                 /* Pass the lock through the policy filter and see if it
1466                  * should stay in lru. */
1467                 if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK)
1468                         break;
1469
1470                 added++;
1471                 unused--;
1472         }
1473         spin_unlock(&ns->ns_unused_lock);
1474         RETURN(added);
1475 }
1476
1477 /* when called with LDLM_ASYNC the blocking callback will be handled
1478  * in a thread and this function will return after the thread has been
1479  * asked to call the callback.  when called with LDLM_SYNC the blocking
1480  * callback will be performed in this function. */
1481 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync, 
1482                     int flags)
1483 {
1484         CFS_LIST_HEAD(cancels);
1485         int count, rc;
1486         ENTRY;
1487
1488 #ifndef __KERNEL__
1489         sync = LDLM_SYNC; /* force to be sync in user space */
1490 #endif
1491         count = ldlm_cancel_lru_local(ns, &cancels, nr, 0, 0, flags);
1492         if (sync == LDLM_ASYNC) {
1493                 rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count);
1494                 if (rc == 0)
1495                         RETURN(count);
1496         }
1497
1498         /* If an error occured in ASYNC mode, or
1499          * this is SYNC mode, cancel the list. */
1500         ldlm_cli_cancel_list(&cancels, count, NULL, 0);
1501         RETURN(count);
1502 }
1503
1504 /* Find and cancel locally unused locks found on resource, matched to the
1505  * given policy, mode. GET the found locks and add them into the @cancels
1506  * list. */
1507 int ldlm_cancel_resource_local(struct ldlm_resource *res,
1508                                struct list_head *cancels,
1509                                ldlm_policy_data_t *policy,
1510                                ldlm_mode_t mode, int lock_flags,
1511                                int cancel_flags, void *opaque)
1512 {
1513         struct ldlm_lock *lock;
1514         int count = 0;
1515         ENTRY;
1516
1517         lock_res(res);
1518         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
1519                 if (opaque != NULL && lock->l_ast_data != opaque) {
1520                         LDLM_ERROR(lock, "data %p doesn't match opaque %p",
1521                                    lock->l_ast_data, opaque);
1522                         //LBUG();
1523                         continue;
1524                 }
1525
1526                 if (lock->l_readers || lock->l_writers) {
1527                         if (cancel_flags & LDLM_FL_WARN) {
1528                                 LDLM_ERROR(lock, "lock in use");
1529                                 //LBUG();
1530                         }
1531                         continue;
1532                 }
1533
1534                 /* If somebody is already doing CANCEL, or blocking ast came,
1535                  * skip this lock. */
1536                 if (lock->l_flags & LDLM_FL_BL_AST || 
1537                     lock->l_flags & LDLM_FL_CANCELING)
1538                         continue;
1539
1540                 if (lockmode_compat(lock->l_granted_mode, mode))
1541                         continue;
1542
1543                 /* If policy is given and this is IBITS lock, add to list only
1544                  * those locks that match by policy. */
1545                 if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
1546                     !(lock->l_policy_data.l_inodebits.bits &
1547                       policy->l_inodebits.bits))
1548                         continue;
1549
1550                 /* See CBPENDING comment in ldlm_cancel_lru */
1551                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
1552                                  lock_flags;
1553
1554                 LASSERT(list_empty(&lock->l_bl_ast));
1555                 list_add(&lock->l_bl_ast, cancels);
1556                 LDLM_LOCK_GET(lock);
1557                 count++;
1558         }
1559         unlock_res(res);
1560
1561         RETURN(ldlm_cancel_list(cancels, count, cancel_flags));
1562 }
1563
1564 /* If @req is NULL, send CANCEL request to server with handles of locks 
1565  * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests 
1566  * separately per lock.
1567  * If @req is not NULL, put handles of locks in @cancels into the request 
1568  * buffer at the offset @off.
1569  * Destroy @cancels at the end. */
1570 int ldlm_cli_cancel_list(struct list_head *cancels, int count,
1571                          struct ptlrpc_request *req, int flags)
1572 {
1573         struct ldlm_lock *lock;
1574         int res = 0;
1575         ENTRY;
1576
1577         if (list_empty(cancels) || count == 0)
1578                 RETURN(0);
1579         
1580         /* XXX: requests (both batched and not) could be sent in parallel. 
1581          * Usually it is enough to have just 1 RPC, but it is possible that
1582          * there are to many locks to be cancelled in LRU or on a resource.
1583          * It would also speed up the case when the server does not support
1584          * the feature. */
1585         while (count > 0) {
1586                 LASSERT(!list_empty(cancels));
1587                 lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
1588                 LASSERT(lock->l_conn_export);
1589
1590                 if (exp_connect_cancelset(lock->l_conn_export)) {
1591                         res = count;
1592                         if (req)
1593                                 ldlm_cancel_pack(req, cancels, count);
1594                         else
1595                                 res = ldlm_cli_cancel_req(lock->l_conn_export,
1596                                                           cancels, count,
1597                                                           flags);
1598                 } else {
1599                         res = ldlm_cli_cancel_req(lock->l_conn_export,
1600                                                   cancels, 1, flags);
1601                 }
1602
1603                 if (res < 0) {
1604                         CERROR("ldlm_cli_cancel_list: %d\n", res);
1605                         res = count;
1606                 }
1607
1608                 count -= res;
1609                 ldlm_lock_list_put(cancels, l_bl_ast, res);
1610         }
1611         LASSERT(count == 0);
1612         RETURN(0);
1613 }
1614
1615 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1616                                     const struct ldlm_res_id *res_id,
1617                                     ldlm_policy_data_t *policy,
1618                                     ldlm_mode_t mode, int flags, void *opaque)
1619 {
1620         struct ldlm_resource *res;
1621         CFS_LIST_HEAD(cancels);
1622         int count;
1623         int rc;
1624         ENTRY;
1625
1626         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
1627         if (res == NULL) {
1628                 /* This is not a problem. */
1629                 CDEBUG(D_INFO, "No resource "LPU64"\n", res_id->name[0]);
1630                 RETURN(0);
1631         }
1632
1633         count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
1634                                            0, flags, opaque);
1635         rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
1636         if (rc != ELDLM_OK)
1637                 CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc);
1638
1639         ldlm_resource_putref(res);
1640         RETURN(0);
1641 }
1642
1643 static inline int have_no_nsresource(struct ldlm_namespace *ns)
1644 {
1645         int no_resource = 0;
1646
1647         spin_lock(&ns->ns_hash_lock);
1648         if (ns->ns_resources == 0)
1649                 no_resource = 1;
1650         spin_unlock(&ns->ns_hash_lock);
1651
1652         RETURN(no_resource);
1653 }
1654
1655 /* Cancel all locks on a namespace (or a specific resource, if given)
1656  * that have 0 readers/writers.
1657  *
1658  * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
1659  * to notify the server. */
1660 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
1661                            const struct ldlm_res_id *res_id,
1662                            int flags, void *opaque)
1663 {
1664         int i;
1665         ENTRY;
1666
1667         if (ns == NULL)
1668                 RETURN(ELDLM_OK);
1669
1670         if (res_id)
1671                 RETURN(ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
1672                                                        LCK_MINMODE, flags,
1673                                                        opaque));
1674
1675         spin_lock(&ns->ns_hash_lock);
1676         for (i = 0; i < RES_HASH_SIZE; i++) {
1677                 struct list_head *tmp;
1678                 tmp = ns->ns_hash[i].next;
1679                 while (tmp != &(ns->ns_hash[i])) {
1680                         struct ldlm_resource *res;
1681                         int rc;
1682
1683                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
1684                         ldlm_resource_getref(res);
1685                         spin_unlock(&ns->ns_hash_lock);
1686
1687                         rc = ldlm_cli_cancel_unused_resource(ns, &res->lr_name,
1688                                                              NULL, LCK_MINMODE,
1689                                                              flags, opaque);
1690
1691                         if (rc)
1692                                 CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n",
1693                                        res->lr_name.name[0], rc);
1694
1695                         spin_lock(&ns->ns_hash_lock);
1696                         tmp = tmp->next;
1697                         ldlm_resource_putref_locked(res);
1698                 }
1699         }
1700         spin_unlock(&ns->ns_hash_lock);
1701
1702         RETURN(ELDLM_OK);
1703 }
1704
1705 /* join/split resource locks to/from lru list */
1706 int ldlm_cli_join_lru(struct ldlm_namespace *ns,
1707                       const struct ldlm_res_id *res_id, int join)
1708 {
1709         struct ldlm_resource *res;
1710         struct ldlm_lock *lock, *n;
1711         int count = 0;
1712         ENTRY;
1713
1714         LASSERT(ns_is_client(ns));
1715
1716         res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
1717         if (res == NULL)
1718                 RETURN(count);
1719         LASSERT(res->lr_type == LDLM_EXTENT);
1720
1721         lock_res(res);
1722         if (!join)
1723                 goto split;
1724
1725         list_for_each_entry_safe (lock, n, &res->lr_granted, l_res_link) {
1726                 if (list_empty(&lock->l_lru) &&
1727                     !lock->l_readers && !lock->l_writers &&
1728                     !(lock->l_flags & LDLM_FL_LOCAL) &&
1729                     !(lock->l_flags & LDLM_FL_CBPENDING) &&
1730                     !(lock->l_flags & LDLM_FL_BL_AST)) {
1731                         ldlm_lock_add_to_lru(lock);
1732                         lock->l_flags &= ~LDLM_FL_NO_LRU;
1733                         LDLM_DEBUG(lock, "join lock to lru");
1734                         count++;
1735                 }
1736         }
1737         goto unlock;
1738 split:
1739         spin_lock(&ns->ns_unused_lock);
1740         list_for_each_entry_safe (lock, n, &ns->ns_unused_list, l_lru) {
1741                 if (lock->l_resource == res) {
1742                         ldlm_lock_remove_from_lru_nolock(lock);
1743                         lock->l_flags |= LDLM_FL_NO_LRU;
1744                         LDLM_DEBUG(lock, "split lock from lru");
1745                         count++;
1746                 }
1747         }
1748         spin_unlock(&ns->ns_unused_lock);
1749 unlock:
1750         unlock_res(res);
1751         ldlm_resource_putref(res);
1752         RETURN(count);
1753 }
1754
1755 /* Lock iterators. */
1756
1757 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
1758                           void *closure)
1759 {
1760         struct list_head *tmp, *next;
1761         struct ldlm_lock *lock;
1762         int rc = LDLM_ITER_CONTINUE;
1763
1764         ENTRY;
1765
1766         if (!res)
1767                 RETURN(LDLM_ITER_CONTINUE);
1768
1769         lock_res(res);
1770         list_for_each_safe(tmp, next, &res->lr_granted) {
1771                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1772
1773                 if (iter(lock, closure) == LDLM_ITER_STOP)
1774                         GOTO(out, rc = LDLM_ITER_STOP);
1775         }
1776
1777         list_for_each_safe(tmp, next, &res->lr_converting) {
1778                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1779
1780                 if (iter(lock, closure) == LDLM_ITER_STOP)
1781                         GOTO(out, rc = LDLM_ITER_STOP);
1782         }
1783
1784         list_for_each_safe(tmp, next, &res->lr_waiting) {
1785                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1786
1787                 if (iter(lock, closure) == LDLM_ITER_STOP)
1788                         GOTO(out, rc = LDLM_ITER_STOP);
1789         }
1790  out:
1791         unlock_res(res);
1792         RETURN(rc);
1793 }
1794
1795 struct iter_helper_data {
1796         ldlm_iterator_t iter;
1797         void *closure;
1798 };
1799
1800 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
1801 {
1802         struct iter_helper_data *helper = closure;
1803         return helper->iter(lock, helper->closure);
1804 }
1805
1806 static int ldlm_res_iter_helper(struct ldlm_resource *res, void *closure)
1807 {
1808         return ldlm_resource_foreach(res, ldlm_iter_helper, closure);
1809 }
1810
1811 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
1812                            void *closure)
1813 {
1814         struct iter_helper_data helper = { iter: iter, closure: closure };
1815         return ldlm_namespace_foreach_res(ns, ldlm_res_iter_helper, &helper);
1816 }
1817
1818 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
1819                                ldlm_res_iterator_t iter, void *closure)
1820 {
1821         int i, rc = LDLM_ITER_CONTINUE;
1822         struct ldlm_resource *res;
1823         struct list_head *tmp;
1824
1825         ENTRY;
1826         spin_lock(&ns->ns_hash_lock);
1827         for (i = 0; i < RES_HASH_SIZE; i++) {
1828                 tmp = ns->ns_hash[i].next;
1829                 while (tmp != &(ns->ns_hash[i])) {
1830                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
1831                         ldlm_resource_getref(res);
1832                         spin_unlock(&ns->ns_hash_lock);
1833
1834                         rc = iter(res, closure);
1835
1836                         spin_lock(&ns->ns_hash_lock);
1837                         tmp = tmp->next;
1838                         ldlm_resource_putref_locked(res);
1839                         if (rc == LDLM_ITER_STOP)
1840                                 GOTO(out, rc);
1841                 }
1842         }
1843  out:
1844         spin_unlock(&ns->ns_hash_lock);
1845         RETURN(rc);
1846 }
1847
1848 /* non-blocking function to manipulate a lock whose cb_data is being put away.*/
1849 void ldlm_resource_iterate(struct ldlm_namespace *ns,
1850                            const struct ldlm_res_id *res_id,
1851                            ldlm_iterator_t iter, void *data)
1852 {
1853         struct ldlm_resource *res;
1854         ENTRY;
1855
1856         if (ns == NULL) {
1857                 CERROR("must pass in namespace\n");
1858                 LBUG();
1859         }
1860
1861         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
1862         if (res == NULL) {
1863                 EXIT;
1864                 return;
1865         }
1866
1867         ldlm_resource_foreach(res, iter, data);
1868         ldlm_resource_putref(res);
1869         EXIT;
1870 }
1871
1872 /* Lock replay */
1873
1874 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
1875 {
1876         struct list_head *list = closure;
1877
1878         /* we use l_pending_chain here, because it's unused on clients. */
1879         LASSERTF(list_empty(&lock->l_pending_chain),"lock %p next %p prev %p\n",
1880                  lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
1881         /* bug 9573: don't replay locks left after eviction */
1882         if (!(lock->l_flags & LDLM_FL_FAILED))
1883                 list_add(&lock->l_pending_chain, list);
1884         return LDLM_ITER_CONTINUE;
1885 }
1886
1887 static int replay_lock_interpret(struct ptlrpc_request *req,
1888                                  struct ldlm_async_args *aa, int rc)
1889 {
1890         struct ldlm_lock  *lock;
1891         struct ldlm_reply *reply;
1892
1893         ENTRY;
1894         atomic_dec(&req->rq_import->imp_replay_inflight);
1895         if (rc != ELDLM_OK)
1896                 GOTO(out, rc);
1897
1898
1899         reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1900         if (reply == NULL)
1901                 GOTO(out, rc = -EPROTO);
1902
1903         lock = ldlm_handle2lock(&aa->lock_handle);
1904         if (!lock) {
1905                 CERROR("received replay ack for unknown local cookie "LPX64
1906                        " remote cookie "LPX64 " from server %s id %s\n",
1907                        aa->lock_handle.cookie, reply->lock_handle.cookie,
1908                        req->rq_export->exp_client_uuid.uuid,
1909                        libcfs_id2str(req->rq_peer));
1910                 GOTO(out, rc = -ESTALE);
1911         }
1912
1913         lock->l_remote_handle = reply->lock_handle;
1914         LDLM_DEBUG(lock, "replayed lock:");
1915         ptlrpc_import_recovery_state_machine(req->rq_import);
1916         LDLM_LOCK_PUT(lock);
1917 out:
1918         if (rc != ELDLM_OK)
1919                 ptlrpc_connect_import(req->rq_import, NULL);
1920
1921
1922         RETURN(rc);
1923 }
1924
1925 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
1926 {
1927         struct ptlrpc_request *req;
1928         struct ldlm_async_args *aa;
1929         struct ldlm_request   *body;
1930         int flags;
1931         ENTRY;
1932
1933
1934         /* Bug 11974: Do not replay a lock which is actively being canceled */
1935         if (lock->l_flags & LDLM_FL_CANCELING) {
1936                 LDLM_DEBUG(lock, "Not replaying canceled lock:");
1937                 RETURN(0);
1938         }
1939
1940         /* If this is reply-less callback lock, we cannot replay it, since
1941          * server might have long dropped it, but notification of that event was
1942          * lost by network. (and server granted conflicting lock already) */
1943         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
1944                 LDLM_DEBUG(lock, "Not replaying reply-less lock:");
1945                 ldlm_lock_cancel(lock);
1946                 RETURN(0);
1947         }
1948         /*
1949          * If granted mode matches the requested mode, this lock is granted.
1950          *
1951          * If they differ, but we have a granted mode, then we were granted
1952          * one mode and now want another: ergo, converting.
1953          *
1954          * If we haven't been granted anything and are on a resource list,
1955          * then we're blocked/waiting.
1956          *
1957          * If we haven't been granted anything and we're NOT on a resource list,
1958          * then we haven't got a reply yet and don't have a known disposition.
1959          * This happens whenever a lock enqueue is the request that triggers
1960          * recovery.
1961          */
1962         if (lock->l_granted_mode == lock->l_req_mode)
1963                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
1964         else if (lock->l_granted_mode)
1965                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
1966         else if (!list_empty(&lock->l_res_link))
1967                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
1968         else
1969                 flags = LDLM_FL_REPLAY;
1970
1971         req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
1972                                         LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
1973         if (req == NULL)
1974                 RETURN(-ENOMEM);
1975
1976         /* We're part of recovery, so don't wait for it. */
1977         req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
1978
1979         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1980         ldlm_lock2desc(lock, &body->lock_desc);
1981         body->lock_flags = flags;
1982
1983         ldlm_lock2handle(lock, &body->lock_handle[0]);
1984         if (lock->l_lvb_len != 0) {
1985                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB);
1986                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
1987                                      lock->l_lvb_len);
1988         }
1989         ptlrpc_request_set_replen(req);
1990         /* notify the server we've replayed all requests.
1991          * also, we mark the request to be put on a dedicated
1992          * queue to be processed after all request replayes.
1993          * bug 6063 */
1994         lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
1995
1996         LDLM_DEBUG(lock, "replaying lock:");
1997
1998         atomic_inc(&req->rq_import->imp_replay_inflight);
1999         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2000         aa = (struct ldlm_async_args *)&req->rq_async_args;
2001         aa->lock_handle = body->lock_handle[0];
2002         req->rq_interpret_reply = replay_lock_interpret;
2003         ptlrpcd_add_req(req);
2004
2005         RETURN(0);
2006 }
2007
2008 int ldlm_replay_locks(struct obd_import *imp)
2009 {
2010         struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
2011         CFS_LIST_HEAD(list);
2012         struct ldlm_lock *lock, *next;
2013         int rc = 0;
2014
2015         ENTRY;
2016
2017         LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
2018
2019         /* ensure this doesn't fall to 0 before all have been queued */
2020         atomic_inc(&imp->imp_replay_inflight);
2021
2022         (void)ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
2023
2024         list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
2025                 list_del_init(&lock->l_pending_chain);
2026                 if (rc)
2027                         continue; /* or try to do the rest? */
2028                 rc = replay_one_lock(imp, lock);
2029         }
2030
2031         atomic_dec(&imp->imp_replay_inflight);
2032
2033         RETURN(rc);
2034 }