Whamcloud - gitweb
LU-9859 libcfs: don't call unshare_fs_struct()
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 /**
33  * This file contains Asynchronous System Trap (AST) handlers and related
34  * LDLM request-processing routines.
35  *
36  * An AST is a callback issued on a lock when its state is changed. There are
37  * several different types of ASTs (callbacks) registered for each lock:
38  *
39  * - completion AST: when a lock is enqueued by some process, but cannot be
40  *   granted immediately due to other conflicting locks on the same resource,
41  *   the completion AST is sent to notify the caller when the lock is
42  *   eventually granted
43  *
44  * - blocking AST: when a lock is granted to some process, if another process
45  *   enqueues a conflicting (blocking) lock on a resource, a blocking AST is
46  *   sent to notify the holder(s) of the lock(s) of the conflicting lock
47  *   request. The lock holder(s) must release their lock(s) on that resource in
48  *   a timely manner or be evicted by the server.
49  *
50  * - glimpse AST: this is used when a process wants information about a lock
51  *   (i.e. the lock value block (LVB)) but does not necessarily require holding
52  *   the lock. If the resource is locked, the lock holder(s) are sent glimpse
53  *   ASTs and the LVB is returned to the caller, and lock holder(s) may CANCEL
54  *   their lock(s) if they are idle. If the resource is not locked, the server
55  *   may grant the lock.
56  */
57
58 #define DEBUG_SUBSYSTEM S_LDLM
59
60 #include <lustre_errno.h>
61 #include <lustre_dlm.h>
62 #include <obd_class.h>
63 #include <obd.h>
64
65 #include "ldlm_internal.h"
66
67 unsigned int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
68 module_param(ldlm_enqueue_min, uint, 0644);
69 MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
70 EXPORT_SYMBOL(ldlm_enqueue_min);
71
72 /* in client side, whether the cached locks will be canceled before replay */
73 unsigned int ldlm_cancel_unused_locks_before_replay = 1;
74
75 struct lock_wait_data {
76         struct ldlm_lock *lwd_lock;
77         __u32             lwd_conn_cnt;
78 };
79
80 struct ldlm_async_args {
81         struct lustre_handle lock_handle;
82 };
83
84 /**
85  * ldlm_request_bufsize
86  *
87  * If opcode=LDLM_ENQUEUE, 1 slot is already occupied,
88  * LDLM_LOCKREQ_HANDLE -1 slots are available.
89  * Otherwise, LDLM_LOCKREQ_HANDLE slots are available.
90  *
91  * \param[in] count
92  * \param[in] type
93  *
94  * \retval size of the request buffer
95  */
96 int ldlm_request_bufsize(int count, int type)
97 {
98         int avail = LDLM_LOCKREQ_HANDLES;
99
100         if (type == LDLM_ENQUEUE)
101                 avail -= LDLM_ENQUEUE_CANCEL_OFF;
102
103         if (count > avail)
104                 avail = (count - avail) * sizeof(struct lustre_handle);
105         else
106                 avail = 0;
107
108         return sizeof(struct ldlm_request) + avail;
109 }
110
111 void ldlm_expired_completion_wait(struct lock_wait_data *lwd)
112 {
113         struct ldlm_lock *lock = lwd->lwd_lock;
114         struct obd_import *imp;
115         struct obd_device *obd;
116
117         ENTRY;
118         if (lock->l_conn_export == NULL) {
119                 static time64_t next_dump, last_dump;
120
121                 LDLM_ERROR(lock,
122                            "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
123                            lock->l_activity,
124                            ktime_get_real_seconds() - lock->l_activity);
125                 if (ktime_get_seconds() > next_dump) {
126                         last_dump = next_dump;
127                         next_dump = ktime_get_seconds() + 300;
128                         ldlm_namespace_dump(D_DLMTRACE,
129                                             ldlm_lock_to_ns(lock));
130                         if (last_dump == 0)
131                                 libcfs_debug_dumplog();
132                 }
133                 RETURN_EXIT;
134         }
135
136         obd = lock->l_conn_export->exp_obd;
137         imp = obd->u.cli.cl_import;
138         ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
139         LDLM_ERROR(lock,
140                    "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s",
141                    lock->l_activity,
142                    ktime_get_real_seconds() - lock->l_activity,
143                    obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
144
145         EXIT;
146 }
147
148 int is_granted_or_cancelled_nolock(struct ldlm_lock *lock)
149 {
150         int ret = 0;
151
152         check_res_locked(lock->l_resource);
153         if (ldlm_is_granted(lock) && !ldlm_is_cp_reqd(lock))
154                 ret = 1;
155         else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
156                 ret = 1;
157         return ret;
158 }
159 EXPORT_SYMBOL(is_granted_or_cancelled_nolock);
160
161 /**
162  * Calculate the Completion timeout (covering enqueue, BL AST, data flush,
163  * lock cancel, and their replies). Used for lock completion timeout on the
164  * client side.
165  *
166  * \param[in] lock        lock which is waiting the completion callback
167  *
168  * \retval            timeout in seconds to wait for the server reply
169  */
170 /*
171  * We use the same basis for both server side and client side functions
172  * from a single node.
173  */
174 static timeout_t ldlm_cp_timeout(struct ldlm_lock *lock)
175 {
176         timeout_t timeout;
177
178         if (AT_OFF)
179                 return obd_timeout;
180
181         /*
182          * Wait a long time for enqueue - server may have to callback a
183          * lock from another client.  Server will evict the other client if it
184          * doesn't respond reasonably, and then give us the lock.
185          */
186         timeout = at_get(ldlm_lock_to_ns_at(lock));
187         return max(3 * timeout, (timeout_t)ldlm_enqueue_min);
188 }
189
190 /**
191  * Helper function for ldlm_completion_ast(), updating timings when lock is
192  * actually granted.
193  */
194 static int ldlm_completion_tail(struct ldlm_lock *lock, void *data)
195 {
196         int result = 0;
197
198         if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
199                 LDLM_DEBUG(lock, "client-side enqueue: destroyed");
200                 result = -EIO;
201         } else if (data == NULL) {
202                 LDLM_DEBUG(lock, "client-side enqueue: granted");
203         } else {
204                 /* Take into AT only CP RPC, not immediately granted locks */
205                 timeout_t delay = 0;
206
207                 /* Discard negative timeouts. We should also limit the
208                  * maximum value of the timeout
209                  */
210                 if (ktime_get_real_seconds() > lock->l_activity)
211                         delay = ktime_get_real_seconds() - lock->l_activity;
212
213                 LDLM_DEBUG(lock, "client-side enqueue: granted after %ds",
214                            delay);
215                 /* Update our time estimate */
216                 at_measured(ldlm_lock_to_ns_at(lock), delay);
217         }
218         return result;
219 }
220
221 /**
222  * Implementation of ->l_completion_ast() for a client, that doesn't wait
223  * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
224  * other threads that cannot block for long.
225  */
226 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
227 {
228         ENTRY;
229
230         if (flags == LDLM_FL_WAIT_NOREPROC) {
231                 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
232                 RETURN(0);
233         }
234
235         if (!(flags & LDLM_FL_BLOCKED_MASK)) {
236                 wake_up(&lock->l_waitq);
237                 RETURN(ldlm_completion_tail(lock, data));
238         }
239
240         LDLM_DEBUG(lock,
241                    "client-side enqueue returned a blocked lock, going forward");
242         ldlm_reprocess_all(lock->l_resource, NULL);
243         RETURN(0);
244 }
245 EXPORT_SYMBOL(ldlm_completion_ast_async);
246
247 /**
248  * Generic LDLM "completion" AST. This is called in several cases:
249  *
250  *     - when a reply to an ENQUEUE RPC is received from the server
251  *       (ldlm_cli_enqueue_fini()). Lock might be granted or not granted at
252  *       this point (determined by flags);
253  *
254  *     - when LDLM_CP_CALLBACK RPC comes to client to notify it that lock has
255  *       been granted;
256  *
257  *     - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock
258  *       gets correct lvb;
259  *
260  *     - to force all locks when resource is destroyed (cleanup_resource());
261  *
262  * If lock is not granted in the first case, this function waits until second
263  * or penultimate cases happen in some other thread.
264  *
265  */
266 int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
267 {
268         /* XXX ALLOCATE - 160 bytes */
269         struct lock_wait_data lwd;
270         struct obd_device *obd;
271         struct obd_import *imp = NULL;
272         timeout_t timeout;
273         int rc = 0;
274
275         ENTRY;
276
277         if (flags == LDLM_FL_WAIT_NOREPROC) {
278                 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
279                 goto noreproc;
280         }
281
282         if (!(flags & LDLM_FL_BLOCKED_MASK)) {
283                 wake_up(&lock->l_waitq);
284                 RETURN(0);
285         }
286
287         LDLM_DEBUG(lock, "client-side enqueue returned a blocked locksleeping");
288
289 noreproc:
290
291         obd = class_exp2obd(lock->l_conn_export);
292
293         /* if this is a local lock, then there is no import */
294         if (obd != NULL)
295                 imp = obd->u.cli.cl_import;
296
297         timeout = ldlm_cp_timeout(lock);
298
299         lwd.lwd_lock = lock;
300         lock->l_activity = ktime_get_real_seconds();
301
302         if (imp != NULL) {
303                 spin_lock(&imp->imp_lock);
304                 lwd.lwd_conn_cnt = imp->imp_conn_cnt;
305                 spin_unlock(&imp->imp_lock);
306         }
307
308         if (ns_is_client(ldlm_lock_to_ns(lock)) &&
309             OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
310                                  OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
311                 ldlm_set_fail_loc(lock);
312                 rc = -EINTR;
313         } else {
314                 /* Go to sleep until the lock is granted or cancelled. */
315                 if (ldlm_is_no_timeout(lock)) {
316                         LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
317                         rc = l_wait_event_abortable(
318                                 lock->l_waitq,
319                                 is_granted_or_cancelled(lock));
320                 } else {
321                         if (wait_event_idle_timeout(
322                                     lock->l_waitq,
323                                     is_granted_or_cancelled(lock),
324                                     cfs_time_seconds(timeout)) == 0) {
325                                 ldlm_expired_completion_wait(&lwd);
326                                 rc = l_wait_event_abortable(
327                                         lock->l_waitq,
328                                         is_granted_or_cancelled(lock));
329                         }
330                 }
331         }
332
333         if (rc) {
334                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
335                            rc);
336                 RETURN(rc);
337         }
338
339         RETURN(ldlm_completion_tail(lock, data));
340 }
341 EXPORT_SYMBOL(ldlm_completion_ast);
342
343 /**
344  * A helper to build a blocking AST function
345  *
346  * Perform a common operation for blocking ASTs:
347  * defferred lock cancellation.
348  *
349  * \param lock the lock blocking or canceling AST was called on
350  * \retval 0
351  * \see mdt_blocking_ast
352  * \see ldlm_blocking_ast
353  */
354 int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
355 {
356         int do_ast;
357
358         ENTRY;
359
360         ldlm_set_cbpending(lock);
361         do_ast = (!lock->l_readers && !lock->l_writers);
362         unlock_res_and_lock(lock);
363
364         if (do_ast) {
365                 struct lustre_handle lockh;
366                 int rc;
367
368                 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
369                 ldlm_lock2handle(lock, &lockh);
370                 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
371                 if (rc < 0)
372                         CERROR("ldlm_cli_cancel: %d\n", rc);
373         } else {
374                 LDLM_DEBUG(lock,
375                            "Lock still has references, will be cancelled later");
376         }
377         RETURN(0);
378 }
379 EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
380
381 /**
382  * Server blocking AST
383  *
384  * ->l_blocking_ast() callback for LDLM locks acquired by server-side
385  * OBDs.
386  *
387  * \param lock the lock which blocks a request or cancelling lock
388  * \param desc unused
389  * \param data unused
390  * \param flag indicates whether this cancelling or blocking callback
391  * \retval 0
392  * \see ldlm_blocking_ast_nocheck
393  */
394 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
395                       void *data, int flag)
396 {
397         ENTRY;
398
399         if (flag == LDLM_CB_CANCELING) {
400                 /* Don't need to do anything here. */
401                 RETURN(0);
402         }
403
404         lock_res_and_lock(lock);
405         /*
406          * Get this: if ldlm_blocking_ast is racing with intent_policy, such
407          * that ldlm_blocking_ast is called just before intent_policy method
408          * takes the lr_lock, then by the time we get the lock, we might not
409          * be the correct blocking function anymore.  So check, and return
410          * early, if so.
411          */
412         if (lock->l_blocking_ast != ldlm_blocking_ast) {
413                 unlock_res_and_lock(lock);
414                 RETURN(0);
415         }
416         RETURN(ldlm_blocking_ast_nocheck(lock));
417 }
418 EXPORT_SYMBOL(ldlm_blocking_ast);
419
420 /**
421  * Implements ldlm_lock::l_glimpse_ast for extent locks acquired on the server.
422  *
423  * Returning -ELDLM_NO_LOCK_DATA actually works, but the reason for that is
424  * rather subtle: with OST-side locking, it may so happen that _all_ extent
425  * locks are held by the OST. If client wants to obtain the current file size
426  * it calls ll_glimpse_size(), and (as all locks are held only on the server),
427  * this dummy glimpse callback fires and does nothing. The client still
428  * receives the correct file size due to the following fragment of code in
429  * ldlm_cb_interpret():
430  *
431  *      if (rc == -ELDLM_NO_LOCK_DATA) {
432  *              LDLM_DEBUG(lock, "lost race - client has a lock but no"
433  *                         "inode");
434  *              ldlm_res_lvbo_update(lock->l_resource, NULL, 1);
435  *      }
436  *
437  * That is, after the glimpse returns this error, ofd_lvbo_update() is called
438  * and returns the updated file attributes from the inode to the client.
439  *
440  * See also comment in ofd_intent_policy() on why servers must set a non-NULL
441  * l_glimpse_ast when grabbing DLM locks.  Otherwise, the server will assume
442  * that the object is in the process of being destroyed.
443  *
444  * \param[in] lock      DLM lock being glimpsed, unused
445  * \param[in] reqp      pointer to ptlrpc_request, unused
446  *
447  * \retval              -ELDLM_NO_LOCK_DATA to get attributes from disk object
448  */
449 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp)
450 {
451         return -ELDLM_NO_LOCK_DATA;
452 }
453
454 /**
455  * Enqueue a local lock (typically on a server).
456  */
457 int ldlm_cli_enqueue_local(const struct lu_env *env,
458                            struct ldlm_namespace *ns,
459                            const struct ldlm_res_id *res_id,
460                            enum ldlm_type type, union ldlm_policy_data *policy,
461                            enum ldlm_mode mode, __u64 *flags,
462                            ldlm_blocking_callback blocking,
463                            ldlm_completion_callback completion,
464                            ldlm_glimpse_callback glimpse,
465                            void *data, __u32 lvb_len, enum lvb_type lvb_type,
466                            const __u64 *client_cookie,
467                            struct lustre_handle *lockh)
468 {
469         struct ldlm_lock *lock;
470         int err;
471         const struct ldlm_callback_suite cbs = { .lcs_completion = completion,
472                                                  .lcs_blocking   = blocking,
473                                                  .lcs_glimpse    = glimpse,
474         };
475
476         ENTRY;
477
478         LASSERT(!(*flags & LDLM_FL_REPLAY));
479         if (unlikely(ns_is_client(ns))) {
480                 CERROR("Trying to enqueue local lock in a shadow namespace\n");
481                 LBUG();
482         }
483
484         lock = ldlm_lock_create(ns, res_id, type, mode, &cbs, data, lvb_len,
485                                 lvb_type);
486         if (IS_ERR(lock))
487                 GOTO(out_nolock, err = PTR_ERR(lock));
488
489         err = ldlm_lvbo_init(lock->l_resource);
490         if (err < 0) {
491                 LDLM_ERROR(lock, "delayed lvb init failed (rc %d)", err);
492                 ldlm_lock_destroy_nolock(lock);
493                 GOTO(out, err);
494         }
495
496         ldlm_lock2handle(lock, lockh);
497
498         /*
499          * NB: we don't have any lock now (lock_res_and_lock)
500          * because it's a new lock
501          */
502         ldlm_lock_addref_internal_nolock(lock, mode);
503         ldlm_set_local(lock);
504         if (*flags & LDLM_FL_ATOMIC_CB)
505                 ldlm_set_atomic_cb(lock);
506
507         if (*flags & LDLM_FL_CANCEL_ON_BLOCK)
508                 ldlm_set_cancel_on_block(lock);
509
510         if (policy != NULL)
511                 lock->l_policy_data = *policy;
512         if (client_cookie != NULL)
513                 lock->l_client_cookie = *client_cookie;
514         if (type == LDLM_EXTENT) {
515                 /* extent lock without policy is a bug */
516                 if (policy == NULL)
517                         LBUG();
518
519                 lock->l_req_extent = policy->l_extent;
520         }
521
522         err = ldlm_lock_enqueue(env, ns, &lock, policy, flags);
523         if (unlikely(err != ELDLM_OK))
524                 GOTO(out, err);
525
526         if (policy != NULL)
527                 *policy = lock->l_policy_data;
528
529         if (lock->l_completion_ast)
530                 lock->l_completion_ast(lock, *flags, NULL);
531
532         LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
533         EXIT;
534  out:
535         LDLM_LOCK_RELEASE(lock);
536  out_nolock:
537         return err;
538 }
539 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
540
541 static void failed_lock_cleanup(struct ldlm_namespace *ns,
542                                 struct ldlm_lock *lock, int mode)
543 {
544         int need_cancel = 0;
545
546         /* Set a flag to prevent us from sending a CANCEL (b=407) */
547         lock_res_and_lock(lock);
548         /* Check that lock is not granted or failed, we might race. */
549         if (!ldlm_is_granted(lock) && !ldlm_is_failed(lock)) {
550                 /*
551                  * Make sure that this lock will not be found by raced
552                  * bl_ast and -EINVAL reply is sent to server anyways.
553                  * b=17645
554                  */
555                 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
556                                  LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
557                 need_cancel = 1;
558         }
559         unlock_res_and_lock(lock);
560
561         if (need_cancel)
562                 LDLM_DEBUG(lock,
563                            "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
564         else
565                 LDLM_DEBUG(lock, "lock was granted or failed in race");
566
567         /*
568          * XXX - HACK because we shouldn't call ldlm_lock_destroy()
569          *       from llite/file.c/ll_file_flock().
570          */
571         /*
572          * This code makes for the fact that we do not have blocking handler on
573          * a client for flock locks. As such this is the place where we must
574          * completely kill failed locks. (interrupted and those that
575          * were waiting to be granted when server evicted us.
576          */
577         if (lock->l_resource->lr_type == LDLM_FLOCK) {
578                 lock_res_and_lock(lock);
579                 if (!ldlm_is_destroyed(lock)) {
580                         ldlm_resource_unlink_lock(lock);
581                         ldlm_lock_decref_internal_nolock(lock, mode);
582                         ldlm_lock_destroy_nolock(lock);
583                 }
584                 unlock_res_and_lock(lock);
585         } else {
586                 ldlm_lock_decref_internal(lock, mode);
587         }
588 }
589
590 static bool ldlm_request_slot_needed(enum ldlm_type type)
591 {
592         return type == LDLM_FLOCK || type == LDLM_IBITS;
593 }
594
595 /**
596  * Finishing portion of client lock enqueue code.
597  *
598  * Called after receiving reply from server.
599  */
600 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
601                           enum ldlm_type type, __u8 with_policy,
602                           enum ldlm_mode mode, __u64 *flags, void *lvb,
603                           __u32 lvb_len, const struct lustre_handle *lockh,
604                           int rc)
605 {
606         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
607         const struct lu_env *env = NULL;
608         int is_replay = *flags & LDLM_FL_REPLAY;
609         struct ldlm_lock *lock;
610         struct ldlm_reply *reply;
611         int cleanup_phase = 1;
612
613         ENTRY;
614
615         if (ldlm_request_slot_needed(type))
616                 obd_put_request_slot(&req->rq_import->imp_obd->u.cli);
617
618         ptlrpc_put_mod_rpc_slot(req);
619
620         if (req && req->rq_svc_thread)
621                 env = req->rq_svc_thread->t_env;
622
623         lock = ldlm_handle2lock(lockh);
624         /* ldlm_cli_enqueue is holding a reference on this lock. */
625         if (!lock) {
626                 LASSERT(type == LDLM_FLOCK);
627                 RETURN(-ENOLCK);
628         }
629
630         LASSERTF(ergo(lvb_len != 0, lvb_len == lock->l_lvb_len),
631                  "lvb_len = %d, l_lvb_len = %d\n", lvb_len, lock->l_lvb_len);
632
633         if (rc != ELDLM_OK) {
634                 LASSERT(!is_replay);
635                 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
636                            rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
637
638                 if (rc != ELDLM_LOCK_ABORTED)
639                         GOTO(cleanup, rc);
640         }
641
642         /* Before we return, swab the reply */
643         reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
644         if (reply == NULL)
645                 GOTO(cleanup, rc = -EPROTO);
646
647         if (lvb_len > 0) {
648                 int size = 0;
649
650                 size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
651                                             RCL_SERVER);
652                 if (size < 0) {
653                         LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", size);
654                         GOTO(cleanup, rc = size);
655                 } else if (unlikely(size > lvb_len)) {
656                         LDLM_ERROR(lock,
657                                    "Replied LVB is larger than expectation, expected = %d, replied = %d",
658                                    lvb_len, size);
659                         GOTO(cleanup, rc = -EINVAL);
660                 }
661                 lvb_len = size;
662         }
663
664         if (rc == ELDLM_LOCK_ABORTED) {
665                 if (lvb_len > 0 && lvb != NULL)
666                         rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
667                                            lvb, lvb_len);
668                 GOTO(cleanup, rc = rc ? : ELDLM_LOCK_ABORTED);
669         }
670
671         /* lock enqueued on the server */
672         cleanup_phase = 0;
673
674         lock_res_and_lock(lock);
675         /* Key change rehash lock in per-export hash with new key */
676         if (exp->exp_lock_hash) {
677                 /*
678                  * In the function below, .hs_keycmp resolves to
679                  * ldlm_export_lock_keycmp()
680                  */
681                 /* coverity[overrun-buffer-val] */
682                 cfs_hash_rehash_key(exp->exp_lock_hash,
683                                     &lock->l_remote_handle,
684                                     &reply->lock_handle,
685                                     &lock->l_exp_hash);
686         } else {
687                 lock->l_remote_handle = reply->lock_handle;
688         }
689
690         *flags = ldlm_flags_from_wire(reply->lock_flags);
691         lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
692                                               LDLM_FL_INHERIT_MASK);
693         unlock_res_and_lock(lock);
694
695         CDEBUG(D_INFO, "local: %p, remote cookie: %#llx, flags: %#llx\n",
696                lock, reply->lock_handle.cookie, *flags);
697
698         /*
699          * If enqueue returned a blocked lock but the completion handler has
700          * already run, then it fixed up the resource and we don't need to do it
701          * again.
702          */
703         if ((*flags) & LDLM_FL_LOCK_CHANGED) {
704                 int newmode = reply->lock_desc.l_req_mode;
705
706                 LASSERT(!is_replay);
707                 if (newmode && newmode != lock->l_req_mode) {
708                         LDLM_DEBUG(lock, "server returned different mode %s",
709                                    ldlm_lockname[newmode]);
710                         lock->l_req_mode = newmode;
711                 }
712
713                 if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
714                                  &lock->l_resource->lr_name)) {
715                         CDEBUG(D_INFO,
716                                "remote intent success, locking "DLDLMRES", instead of "DLDLMRES"\n",
717                                PLDLMRES(&reply->lock_desc.l_resource),
718                                PLDLMRES(lock->l_resource));
719
720                         rc = ldlm_lock_change_resource(ns, lock,
721                                         &reply->lock_desc.l_resource.lr_name);
722                         if (rc || lock->l_resource == NULL)
723                                 GOTO(cleanup, rc = -ENOMEM);
724                         LDLM_DEBUG(lock, "client-side enqueue, new resource");
725                 }
726
727                 if (with_policy) {
728                         /* We assume lock type cannot change on server*/
729                         ldlm_convert_policy_to_local(exp,
730                                                 lock->l_resource->lr_type,
731                                                 &reply->lock_desc.l_policy_data,
732                                                 &lock->l_policy_data);
733                 }
734
735                 if (type != LDLM_PLAIN)
736                         LDLM_DEBUG(lock,
737                                    "client-side enqueue, new policy data");
738         }
739
740         if ((*flags) & LDLM_FL_AST_SENT) {
741                 lock_res_and_lock(lock);
742                 ldlm_bl_desc2lock(&reply->lock_desc, lock);
743                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
744                 unlock_res_and_lock(lock);
745                 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
746         }
747
748         /*
749          * If the lock has already been granted by a completion AST, don't
750          * clobber the LVB with an older one.
751          */
752         if (lvb_len > 0) {
753                 /*
754                  * We must lock or a racing completion might update lvb without
755                  * letting us know and we'll clobber the correct value.
756                  * Cannot unlock after the check either, a that still leaves
757                  * a tiny window for completion to get in
758                  */
759                 lock_res_and_lock(lock);
760                 if (!ldlm_is_granted(lock))
761                         rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
762                                            lock->l_lvb_data, lvb_len);
763                 unlock_res_and_lock(lock);
764                 if (rc < 0) {
765                         cleanup_phase = 1;
766                         GOTO(cleanup, rc);
767                 }
768         }
769
770         if (!is_replay) {
771                 rc = ldlm_lock_enqueue(env, ns, &lock, NULL, flags);
772                 if (lock->l_completion_ast != NULL) {
773                         int err = lock->l_completion_ast(lock, *flags, NULL);
774
775                         if (!rc)
776                                 rc = err;
777                         if (rc)
778                                 cleanup_phase = 1;
779                 }
780         }
781
782         if (lvb_len > 0 && lvb != NULL) {
783                 /*
784                  * Copy the LVB here, and not earlier, because the completion
785                  * AST (if any) can override what we got in the reply
786                  */
787                 memcpy(lvb, lock->l_lvb_data, lvb_len);
788         }
789
790         LDLM_DEBUG(lock, "client-side enqueue END");
791         EXIT;
792 cleanup:
793         if (cleanup_phase == 1 && rc)
794                 failed_lock_cleanup(ns, lock, mode);
795         /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
796         LDLM_LOCK_PUT(lock);
797         LDLM_LOCK_RELEASE(lock);
798         return rc;
799 }
800 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
801
802 /**
803  * Estimate number of lock handles that would fit into request of given
804  * size.  PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
805  * a single page on the send/receive side. XXX: 512 should be changed to
806  * more adequate value.
807  */
808 static inline int ldlm_req_handles_avail(int req_size, int off)
809 {
810         int avail;
811
812         avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
813         if (likely(avail >= 0))
814                 avail /= (int)sizeof(struct lustre_handle);
815         else
816                 avail = 0;
817         avail += LDLM_LOCKREQ_HANDLES - off;
818
819         return avail;
820 }
821
822 static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
823                                              enum req_location loc,
824                                              int off)
825 {
826         __u32 size = req_capsule_msg_size(pill, loc);
827
828         return ldlm_req_handles_avail(size, off);
829 }
830
831 static inline int ldlm_format_handles_avail(struct obd_import *imp,
832                                             const struct req_format *fmt,
833                                             enum req_location loc, int off)
834 {
835         __u32 size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
836
837         return ldlm_req_handles_avail(size, off);
838 }
839
840 /**
841  * Cancel LRU locks and pack them into the enqueue request. Pack there the given
842  * \a count locks in \a cancels.
843  *
844  * This is to be called by functions preparing their own requests that
845  * might contain lists of locks to cancel in addition to actual operation
846  * that needs to be performed.
847  */
848 int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
849                       int version, int opc, int canceloff,
850                       struct list_head *cancels, int count)
851 {
852         struct ldlm_namespace   *ns = exp->exp_obd->obd_namespace;
853         struct req_capsule      *pill = &req->rq_pill;
854         struct ldlm_request     *dlm = NULL;
855         LIST_HEAD(head);
856         enum ldlm_lru_flags lru_flags;
857         int avail, to_free, pack = 0;
858         int rc;
859
860         ENTRY;
861
862         if (cancels == NULL)
863                 cancels = &head;
864         if (ns_connect_cancelset(ns)) {
865                 /* Estimate the amount of available space in the request. */
866                 req_capsule_filled_sizes(pill, RCL_CLIENT);
867                 avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
868
869                 lru_flags = LDLM_LRU_FLAG_NO_WAIT | (ns_connect_lru_resize(ns) ?
870                         LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED);
871                 to_free = !ns_connect_lru_resize(ns) &&
872                         opc == LDLM_ENQUEUE ? 1 : 0;
873
874                 /*
875                  * Cancel LRU locks here _only_ if the server supports
876                  * EARLY_CANCEL. Otherwise we have to send extra CANCEL
877                  * RPC, which will make us slower.
878                  */
879                 if (avail > count)
880                         count += ldlm_cancel_lru_local(ns, cancels, to_free,
881                                                        avail - count, 0,
882                                                        lru_flags);
883                 if (avail > count)
884                         pack = count;
885                 else
886                         pack = avail;
887                 req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
888                                      ldlm_request_bufsize(pack, opc));
889         }
890
891         rc = ptlrpc_request_pack(req, version, opc);
892         if (rc) {
893                 ldlm_lock_list_put(cancels, l_bl_ast, count);
894                 RETURN(rc);
895         }
896
897         if (ns_connect_cancelset(ns)) {
898                 if (canceloff) {
899                         dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
900                         LASSERT(dlm);
901                         /*
902                          * Skip first lock handler in ldlm_request_pack(),
903                          * this method will increment @lock_count according
904                          * to the lock handle amount actually written to
905                          * the buffer.
906                          */
907                         dlm->lock_count = canceloff;
908                 }
909                 /* Pack into the request @pack lock handles. */
910                 ldlm_cli_cancel_list(cancels, pack, req, 0);
911                 /* Prepare and send separate cancel RPC for others. */
912                 ldlm_cli_cancel_list(cancels, count - pack, NULL, 0);
913         } else {
914                 ldlm_lock_list_put(cancels, l_bl_ast, count);
915         }
916         RETURN(0);
917 }
918 EXPORT_SYMBOL(ldlm_prep_elc_req);
919
920 int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
921                           struct list_head *cancels, int count)
922 {
923         return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
924                                  LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
925 }
926 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
927
928 struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len)
929 {
930         struct ptlrpc_request *req;
931         int rc;
932
933         ENTRY;
934
935         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
936         if (req == NULL)
937                 RETURN(ERR_PTR(-ENOMEM));
938
939         rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
940         if (rc) {
941                 ptlrpc_request_free(req);
942                 RETURN(ERR_PTR(rc));
943         }
944
945         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
946         ptlrpc_request_set_replen(req);
947         RETURN(req);
948 }
949 EXPORT_SYMBOL(ldlm_enqueue_pack);
950
951 /**
952  * Client-side lock enqueue.
953  *
954  * If a request has some specific initialisation it is passed in \a reqp,
955  * otherwise it is created in ldlm_cli_enqueue.
956  *
957  * Supports sync and async requests, pass \a async flag accordingly. If a
958  * request was created in ldlm_cli_enqueue and it is the async request,
959  * pass it to the caller in \a reqp.
960  */
961 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
962                      struct ldlm_enqueue_info *einfo,
963                      const struct ldlm_res_id *res_id,
964                      union ldlm_policy_data const *policy, __u64 *flags,
965                      void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
966                      struct lustre_handle *lockh, int async)
967 {
968         struct ldlm_namespace *ns;
969         struct ldlm_lock      *lock;
970         struct ldlm_request   *body;
971         int                    is_replay = *flags & LDLM_FL_REPLAY;
972         int                    req_passed_in = 1;
973         int                    rc, err;
974         struct ptlrpc_request *req;
975
976         ENTRY;
977
978         LASSERT(exp != NULL);
979
980         ns = exp->exp_obd->obd_namespace;
981
982         /*
983          * If we're replaying this lock, just check some invariants.
984          * If we're creating a new lock, get everything all setup nice.
985          */
986         if (is_replay) {
987                 lock = ldlm_handle2lock_long(lockh, 0);
988                 LASSERT(lock != NULL);
989                 LDLM_DEBUG(lock, "client-side enqueue START");
990                 LASSERT(exp == lock->l_conn_export);
991         } else {
992                 const struct ldlm_callback_suite cbs = {
993                         .lcs_completion = einfo->ei_cb_cp,
994                         .lcs_blocking   = einfo->ei_cb_bl,
995                         .lcs_glimpse    = einfo->ei_cb_gl
996                 };
997                 lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
998                                         einfo->ei_mode, &cbs, einfo->ei_cbdata,
999                                         lvb_len, lvb_type);
1000                 if (IS_ERR(lock))
1001                         RETURN(PTR_ERR(lock));
1002
1003                 if (einfo->ei_cb_created)
1004                         einfo->ei_cb_created(lock);
1005
1006                 /* for the local lock, add the reference */
1007                 ldlm_lock_addref_internal(lock, einfo->ei_mode);
1008                 ldlm_lock2handle(lock, lockh);
1009                 if (policy != NULL)
1010                         lock->l_policy_data = *policy;
1011
1012                 if (einfo->ei_type == LDLM_EXTENT) {
1013                         /* extent lock without policy is a bug */
1014                         if (policy == NULL)
1015                                 LBUG();
1016
1017                         lock->l_req_extent = policy->l_extent;
1018                 }
1019                 LDLM_DEBUG(lock, "client-side enqueue START, flags %#llx",
1020                            *flags);
1021         }
1022
1023         lock->l_conn_export = exp;
1024         lock->l_export = NULL;
1025         lock->l_blocking_ast = einfo->ei_cb_bl;
1026         lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL |
1027                                     LDLM_FL_ATOMIC_CB));
1028         lock->l_activity = ktime_get_real_seconds();
1029
1030         /* lock not sent to server yet */
1031         if (reqp == NULL || *reqp == NULL) {
1032                 req = ldlm_enqueue_pack(exp, lvb_len);
1033                 if (IS_ERR(req)) {
1034                         failed_lock_cleanup(ns, lock, einfo->ei_mode);
1035                         LDLM_LOCK_RELEASE(lock);
1036                         RETURN(PTR_ERR(req));
1037                 }
1038
1039                 req_passed_in = 0;
1040                 if (reqp)
1041                         *reqp = req;
1042         } else {
1043                 int len;
1044
1045                 req = *reqp;
1046                 len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ,
1047                                            RCL_CLIENT);
1048                 LASSERTF(len >= sizeof(*body), "buflen[%d] = %d, not %d\n",
1049                          DLM_LOCKREQ_OFF, len, (int)sizeof(*body));
1050         }
1051
1052         if (*flags & LDLM_FL_NDELAY) {
1053                 DEBUG_REQ(D_DLMTRACE, req, "enqueue lock with no delay");
1054                 req->rq_no_resend = req->rq_no_delay = 1;
1055                 /*
1056                  * probably set a shorter timeout value and handle ETIMEDOUT
1057                  * in osc_lock_upcall() correctly
1058                  */
1059                 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
1060         }
1061
1062         /* Dump lock data into the request buffer */
1063         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1064         ldlm_lock2desc(lock, &body->lock_desc);
1065         body->lock_flags = ldlm_flags_to_wire(*flags);
1066         body->lock_handle[0] = *lockh;
1067
1068         /* extended LDLM opcodes in client stats */
1069         if (exp->exp_obd->obd_svc_stats != NULL) {
1070                 bool glimpse = *flags & LDLM_FL_HAS_INTENT;
1071
1072                 /* OST glimpse has no intent buffer */
1073                 if (req_capsule_has_field(&req->rq_pill, &RMF_LDLM_INTENT,
1074                                           RCL_CLIENT)) {
1075                         struct ldlm_intent *it;
1076
1077                         it = req_capsule_client_get(&req->rq_pill,
1078                                                     &RMF_LDLM_INTENT);
1079                         glimpse = (it && (it->opc == IT_GLIMPSE));
1080                 }
1081
1082                 if (!glimpse)
1083                         ldlm_svc_get_eopc(body, exp->exp_obd->obd_svc_stats);
1084                 else
1085                         lprocfs_counter_incr(exp->exp_obd->obd_svc_stats,
1086                                              PTLRPC_LAST_CNTR +
1087                                              LDLM_GLIMPSE_ENQUEUE);
1088         }
1089
1090         /* It is important to obtain modify RPC slot first (if applicable), so
1091          * that threads that are waiting for a modify RPC slot are not polluting
1092          * our rpcs in flight counter. */
1093
1094         if (einfo->ei_enq_slot)
1095                 ptlrpc_get_mod_rpc_slot(req);
1096
1097         if (ldlm_request_slot_needed(einfo->ei_type)) {
1098                 rc = obd_get_request_slot(&req->rq_import->imp_obd->u.cli);
1099                 if (rc) {
1100                         if (einfo->ei_enq_slot)
1101                                 ptlrpc_put_mod_rpc_slot(req);
1102                         failed_lock_cleanup(ns, lock, einfo->ei_mode);
1103                         LDLM_LOCK_RELEASE(lock);
1104                         GOTO(out, rc);
1105                 }
1106         }
1107
1108         if (async) {
1109                 LASSERT(reqp != NULL);
1110                 RETURN(0);
1111         }
1112
1113         LDLM_DEBUG(lock, "sending request");
1114
1115         rc = ptlrpc_queue_wait(req);
1116
1117         err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
1118                                     einfo->ei_mode, flags, lvb, lvb_len,
1119                                     lockh, rc);
1120
1121         /*
1122          * If ldlm_cli_enqueue_fini did not find the lock, we need to free
1123          * one reference that we took
1124          */
1125         if (err == -ENOLCK)
1126                 LDLM_LOCK_RELEASE(lock);
1127         else
1128                 rc = err;
1129
1130 out:
1131         if (!req_passed_in && req != NULL) {
1132                 ptlrpc_req_finished(req);
1133                 if (reqp)
1134                         *reqp = NULL;
1135         }
1136
1137         RETURN(rc);
1138 }
1139 EXPORT_SYMBOL(ldlm_cli_enqueue);
1140
1141 /**
1142  * Client-side IBITS lock convert.
1143  *
1144  * Inform server that lock has been converted instead of canceling.
1145  * Server finishes convert on own side and does reprocess to grant
1146  * all related waiting locks.
1147  *
1148  * Since convert means only ibits downgrading, client doesn't need to
1149  * wait for server reply to finish local converting process so this request
1150  * is made asynchronous.
1151  *
1152  */
1153 int ldlm_cli_convert_req(struct ldlm_lock *lock, __u32 *flags, __u64 new_bits)
1154 {
1155         struct ldlm_request *body;
1156         struct ptlrpc_request *req;
1157         struct obd_export *exp = lock->l_conn_export;
1158
1159         ENTRY;
1160
1161         LASSERT(exp != NULL);
1162
1163         /*
1164          * this is better to check earlier and it is done so already,
1165          * but this check is kept too as final one to issue an error
1166          * if any new code will miss such check.
1167          */
1168         if (!exp_connect_lock_convert(exp)) {
1169                 LDLM_ERROR(lock, "server doesn't support lock convert\n");
1170                 RETURN(-EPROTO);
1171         }
1172
1173         if (lock->l_resource->lr_type != LDLM_IBITS) {
1174                 LDLM_ERROR(lock, "convert works with IBITS locks only.");
1175                 RETURN(-EINVAL);
1176         }
1177
1178         LDLM_DEBUG(lock, "client-side convert");
1179
1180         req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
1181                                         &RQF_LDLM_CONVERT, LUSTRE_DLM_VERSION,
1182                                         LDLM_CONVERT);
1183         if (req == NULL)
1184                 RETURN(-ENOMEM);
1185
1186         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1187         body->lock_handle[0] = lock->l_remote_handle;
1188
1189         body->lock_desc.l_req_mode = lock->l_req_mode;
1190         body->lock_desc.l_granted_mode = lock->l_granted_mode;
1191
1192         body->lock_desc.l_policy_data.l_inodebits.bits = new_bits;
1193         body->lock_desc.l_policy_data.l_inodebits.cancel_bits = 0;
1194
1195         body->lock_flags = ldlm_flags_to_wire(*flags);
1196         body->lock_count = 1;
1197
1198         ptlrpc_request_set_replen(req);
1199
1200         /*
1201          * Use cancel portals for convert as well as high-priority handling.
1202          */
1203         req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
1204         req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
1205
1206         ptlrpc_at_set_req_timeout(req);
1207
1208         if (exp->exp_obd->obd_svc_stats != NULL)
1209                 lprocfs_counter_incr(exp->exp_obd->obd_svc_stats,
1210                                      LDLM_CONVERT - LDLM_FIRST_OPC);
1211
1212         ptlrpcd_add_req(req);
1213         RETURN(0);
1214 }
1215
1216 /**
1217  * Cancel locks locally.
1218  * Returns:
1219  * \retval LDLM_FL_LOCAL_ONLY if there is no need for a CANCEL RPC to the server
1220  * \retval LDLM_FL_CANCELING otherwise;
1221  * \retval LDLM_FL_BL_AST if there is a need for a separate CANCEL RPC.
1222  */
1223 static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
1224 {
1225         __u64 rc = LDLM_FL_LOCAL_ONLY;
1226
1227         ENTRY;
1228
1229         if (lock->l_conn_export) {
1230                 bool local_only;
1231
1232                 LDLM_DEBUG(lock, "client-side cancel");
1233                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL_LOCAL,
1234                                  cfs_fail_val);
1235
1236                 /* Set this flag to prevent others from getting new references*/
1237                 lock_res_and_lock(lock);
1238                 ldlm_set_cbpending(lock);
1239                 local_only = !!(lock->l_flags &
1240                                 (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
1241                 ldlm_cancel_callback(lock);
1242                 rc = (ldlm_is_bl_ast(lock)) ?
1243                         LDLM_FL_BL_AST : LDLM_FL_CANCELING;
1244                 unlock_res_and_lock(lock);
1245
1246                 if (local_only) {
1247                         CDEBUG(D_DLMTRACE,
1248                                "not sending request (at caller's instruction)\n");
1249                         rc = LDLM_FL_LOCAL_ONLY;
1250                 }
1251                 ldlm_lock_cancel(lock);
1252         } else {
1253                 if (ns_is_client(ldlm_lock_to_ns(lock))) {
1254                         LDLM_ERROR(lock, "Trying to cancel local lock");
1255                         LBUG();
1256                 }
1257                 LDLM_DEBUG(lock, "server-side local cancel");
1258                 ldlm_lock_cancel(lock);
1259                 ldlm_reprocess_all(lock->l_resource, lock);
1260         }
1261
1262         RETURN(rc);
1263 }
1264
1265 /**
1266  * Pack \a count locks in \a head into ldlm_request buffer of request \a req.
1267  */
1268 static void ldlm_cancel_pack(struct ptlrpc_request *req,
1269                              struct list_head *head, int count)
1270 {
1271         struct ldlm_request *dlm;
1272         struct ldlm_lock *lock;
1273         int max, packed = 0;
1274
1275         ENTRY;
1276
1277         dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1278         LASSERT(dlm != NULL);
1279
1280         /* Check the room in the request buffer. */
1281         max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
1282                 sizeof(struct ldlm_request);
1283         max /= sizeof(struct lustre_handle);
1284         max += LDLM_LOCKREQ_HANDLES;
1285         LASSERT(max >= dlm->lock_count + count);
1286
1287         /*
1288          * XXX: it would be better to pack lock handles grouped by resource.
1289          * so that the server cancel would call filter_lvbo_update() less
1290          * frequently.
1291          */
1292         list_for_each_entry(lock, head, l_bl_ast) {
1293                 if (!count--)
1294                         break;
1295                 LASSERT(lock->l_conn_export);
1296                 /* Pack the lock handle to the given request buffer. */
1297                 LDLM_DEBUG(lock, "packing");
1298                 dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle;
1299                 packed++;
1300         }
1301         CDEBUG(D_DLMTRACE, "%d locks packed\n", packed);
1302         EXIT;
1303 }
1304
1305 /**
1306  * Prepare and send a batched cancel RPC. It will include \a count lock
1307  * handles of locks given in \a cancels list.
1308  */
1309 int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
1310                         int count, enum ldlm_cancel_flags flags)
1311 {
1312         struct ptlrpc_request *req = NULL;
1313         struct obd_import *imp;
1314         int free, sent = 0;
1315         int rc = 0;
1316
1317         ENTRY;
1318
1319         LASSERT(exp != NULL);
1320         LASSERT(count > 0);
1321
1322         CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
1323
1324         if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
1325                 RETURN(count);
1326
1327         free = ldlm_format_handles_avail(class_exp2cliimp(exp),
1328                                          &RQF_LDLM_CANCEL, RCL_CLIENT, 0);
1329         if (count > free)
1330                 count = free;
1331
1332         while (1) {
1333                 imp = class_exp2cliimp(exp);
1334                 if (imp == NULL || imp->imp_invalid) {
1335                         CDEBUG(D_DLMTRACE,
1336                                "skipping cancel on invalid import %p\n", imp);
1337                         RETURN(count);
1338                 }
1339
1340                 req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
1341                 if (req == NULL)
1342                         GOTO(out, rc = -ENOMEM);
1343
1344                 req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
1345                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
1346                                      ldlm_request_bufsize(count, LDLM_CANCEL));
1347
1348                 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CANCEL);
1349                 if (rc) {
1350                         ptlrpc_request_free(req);
1351                         GOTO(out, rc);
1352                 }
1353
1354                 /*
1355                  * If OSP want cancel cross-MDT lock, let's not block it in
1356                  * in recovery, otherwise the lock will not released, if
1357                  * the remote target is also in recovery, and it also need
1358                  * this lock, it might cause deadlock.
1359                  */
1360                 if (exp_connect_flags(exp) & OBD_CONNECT_MDS_MDS &&
1361                     exp->exp_obd->obd_lu_dev != NULL &&
1362                     exp->exp_obd->obd_lu_dev->ld_site != NULL) {
1363                         struct lu_device *top_dev;
1364
1365                         top_dev = exp->exp_obd->obd_lu_dev->ld_site->ls_top_dev;
1366                         if (top_dev != NULL &&
1367                             top_dev->ld_obd->obd_recovering)
1368                                 req->rq_allow_replay = 1;
1369                 }
1370
1371                 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
1372                 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
1373                 ptlrpc_at_set_req_timeout(req);
1374
1375                 ldlm_cancel_pack(req, cancels, count);
1376
1377                 ptlrpc_request_set_replen(req);
1378                 if (flags & LCF_ASYNC) {
1379                         ptlrpcd_add_req(req);
1380                         sent = count;
1381                         GOTO(out, 0);
1382                 }
1383
1384                 rc = ptlrpc_queue_wait(req);
1385                 if (rc == LUSTRE_ESTALE) {
1386                         CDEBUG(D_DLMTRACE,
1387                                "client/server (nid %s) out of sync -- not fatal\n",
1388                                libcfs_nid2str(req->rq_import->imp_connection->c_peer.nid));
1389                         rc = 0;
1390                 } else if (rc == -ETIMEDOUT && /* check there was no reconnect*/
1391                            req->rq_import_generation == imp->imp_generation) {
1392                         ptlrpc_req_finished(req);
1393                         continue;
1394                 } else if (rc != ELDLM_OK) {
1395                         /* -ESHUTDOWN is common on umount */
1396                         CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
1397                                      "Got rc %d from cancel RPC: canceling anyway\n",
1398                                      rc);
1399                         break;
1400                 }
1401                 sent = count;
1402                 break;
1403         }
1404
1405         ptlrpc_req_finished(req);
1406         EXIT;
1407 out:
1408         return sent ? sent : rc;
1409 }
1410
1411 static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
1412 {
1413         LASSERT(imp != NULL);
1414         return &imp->imp_obd->obd_namespace->ns_pool;
1415 }
1416
1417 /**
1418  * Update client's OBD pool related fields with new SLV and Limit from \a req.
1419  */
1420 int ldlm_cli_update_pool(struct ptlrpc_request *req)
1421 {
1422         struct obd_device *obd;
1423         __u64 new_slv;
1424         __u32 new_limit;
1425
1426         ENTRY;
1427         if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
1428                      !imp_connect_lru_resize(req->rq_import)))
1429                 /* Do nothing for corner cases. */
1430                 RETURN(0);
1431
1432         /*
1433          * In some cases RPC may contain SLV and limit zeroed out. This
1434          * is the case when server does not support LRU resize feature.
1435          * This is also possible in some recovery cases when server-side
1436          * reqs have no reference to the OBD export and thus access to
1437          * server-side namespace is not possible.
1438          */
1439         if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
1440             lustre_msg_get_limit(req->rq_repmsg) == 0) {
1441                 DEBUG_REQ(D_HA, req,
1442                           "Zero SLV or limit found (SLV=%llu, limit=%u)",
1443                           lustre_msg_get_slv(req->rq_repmsg),
1444                           lustre_msg_get_limit(req->rq_repmsg));
1445                 RETURN(0);
1446         }
1447
1448         new_limit = lustre_msg_get_limit(req->rq_repmsg);
1449         new_slv = lustre_msg_get_slv(req->rq_repmsg);
1450         obd = req->rq_import->imp_obd;
1451
1452         read_lock(&obd->obd_pool_lock);
1453         if (obd->obd_pool_slv == new_slv &&
1454             obd->obd_pool_limit == new_limit) {
1455                 read_unlock(&obd->obd_pool_lock);
1456                 RETURN(0);
1457         }
1458         read_unlock(&obd->obd_pool_lock);
1459
1460         /*
1461          * Set new SLV and limit in OBD fields to make them accessible
1462          * to the pool thread. We do not access obd_namespace and pool
1463          * directly here as there is no reliable way to make sure that
1464          * they are still alive at cleanup time. Evil races are possible
1465          * which may cause Oops at that time.
1466          */
1467         write_lock(&obd->obd_pool_lock);
1468         obd->obd_pool_slv = new_slv;
1469         obd->obd_pool_limit = new_limit;
1470         write_unlock(&obd->obd_pool_lock);
1471
1472         RETURN(0);
1473 }
1474
1475 int ldlm_cli_convert(struct ldlm_lock *lock,
1476                      enum ldlm_cancel_flags cancel_flags)
1477 {
1478         int rc = -EINVAL;
1479
1480         LASSERT(!lock->l_readers && !lock->l_writers);
1481         LDLM_DEBUG(lock, "client lock convert START");
1482
1483         if (lock->l_resource->lr_type == LDLM_IBITS) {
1484                 lock_res_and_lock(lock);
1485                 do {
1486                         rc = ldlm_cli_inodebits_convert(lock, cancel_flags);
1487                 } while (rc == -EAGAIN);
1488                 unlock_res_and_lock(lock);
1489         }
1490
1491         LDLM_DEBUG(lock, "client lock convert END");
1492         RETURN(rc);
1493 }
1494 EXPORT_SYMBOL(ldlm_cli_convert);
1495
1496 /**
1497  * Client side lock cancel.
1498  *
1499  * Lock must not have any readers or writers by this time.
1500  */
1501 int ldlm_cli_cancel(const struct lustre_handle *lockh,
1502                     enum ldlm_cancel_flags cancel_flags)
1503 {
1504         struct obd_export *exp;
1505         enum ldlm_lru_flags lru_flags;
1506         int avail, count = 1;
1507         __u64 rc = 0;
1508         struct ldlm_namespace *ns;
1509         struct ldlm_lock *lock;
1510         LIST_HEAD(cancels);
1511
1512         ENTRY;
1513
1514         lock = ldlm_handle2lock_long(lockh, 0);
1515         if (lock == NULL) {
1516                 LDLM_DEBUG_NOLOCK("lock is already being destroyed");
1517                 RETURN(0);
1518         }
1519
1520         lock_res_and_lock(lock);
1521         LASSERT(!ldlm_is_converting(lock));
1522
1523         /* Lock is being canceled and the caller doesn't want to wait */
1524         if (ldlm_is_canceling(lock)) {
1525                 if (cancel_flags & LCF_ASYNC) {
1526                         unlock_res_and_lock(lock);
1527                 } else {
1528                         unlock_res_and_lock(lock);
1529                         wait_event_idle(lock->l_waitq, is_bl_done(lock));
1530                 }
1531                 LDLM_LOCK_RELEASE(lock);
1532                 RETURN(0);
1533         }
1534
1535         ldlm_set_canceling(lock);
1536         unlock_res_and_lock(lock);
1537
1538         if (cancel_flags & LCF_LOCAL)
1539                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_LOCAL_CANCEL_PAUSE,
1540                                  cfs_fail_val);
1541
1542         rc = ldlm_cli_cancel_local(lock);
1543         if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) {
1544                 LDLM_LOCK_RELEASE(lock);
1545                 RETURN(0);
1546         }
1547         /*
1548          * Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
1549          * RPC which goes to canceld portal, so we can cancel other LRU locks
1550          * here and send them all as one LDLM_CANCEL RPC.
1551          */
1552         LASSERT(list_empty(&lock->l_bl_ast));
1553         list_add(&lock->l_bl_ast, &cancels);
1554
1555         exp = lock->l_conn_export;
1556         if (exp_connect_cancelset(exp)) {
1557                 avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
1558                                                   &RQF_LDLM_CANCEL,
1559                                                   RCL_CLIENT, 0);
1560                 LASSERT(avail > 0);
1561
1562                 ns = ldlm_lock_to_ns(lock);
1563                 lru_flags = ns_connect_lru_resize(ns) ?
1564                         LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED;
1565                 count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
1566                                                LCF_BL_AST, lru_flags);
1567         }
1568         ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
1569         RETURN(0);
1570 }
1571 EXPORT_SYMBOL(ldlm_cli_cancel);
1572
1573 /**
1574  * Locally cancel up to \a count locks in list \a cancels.
1575  * Return the number of cancelled locks.
1576  */
1577 int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
1578                                enum ldlm_cancel_flags cancel_flags)
1579 {
1580         LIST_HEAD(head);
1581         struct ldlm_lock *lock, *next;
1582         int left = 0, bl_ast = 0;
1583         __u64 rc;
1584
1585         left = count;
1586         list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
1587                 if (left-- == 0)
1588                         break;
1589
1590                 if (cancel_flags & LCF_LOCAL) {
1591                         rc = LDLM_FL_LOCAL_ONLY;
1592                         ldlm_lock_cancel(lock);
1593                 } else {
1594                         rc = ldlm_cli_cancel_local(lock);
1595                 }
1596                 /*
1597                  * Until we have compound requests and can send LDLM_CANCEL
1598                  * requests batched with generic RPCs, we need to send cancels
1599                  * with the LDLM_FL_BL_AST flag in a separate RPC from
1600                  * the one being generated now.
1601                  */
1602                 if (!(cancel_flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
1603                         LDLM_DEBUG(lock, "Cancel lock separately");
1604                         list_move(&lock->l_bl_ast, &head);
1605                         bl_ast++;
1606                         continue;
1607                 }
1608                 if (rc == LDLM_FL_LOCAL_ONLY) {
1609                         /* CANCEL RPC should not be sent to server. */
1610                         list_del_init(&lock->l_bl_ast);
1611                         LDLM_LOCK_RELEASE(lock);
1612                         count--;
1613                 }
1614         }
1615         if (bl_ast > 0) {
1616                 count -= bl_ast;
1617                 ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
1618         }
1619
1620         RETURN(count);
1621 }
1622
1623 /**
1624  * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
1625  * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
1626  * readahead requests, ...)
1627  */
1628 static enum ldlm_policy_res
1629 ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1630                            int unused, int added, int count)
1631 {
1632         enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK;
1633
1634         /*
1635          * don't check added & count since we want to process all locks
1636          * from unused list.
1637          * It's fine to not take lock to access lock->l_resource since
1638          * the lock has already been granted so it won't change.
1639          */
1640         switch (lock->l_resource->lr_type) {
1641                 case LDLM_EXTENT:
1642                 case LDLM_IBITS:
1643                         if (ns->ns_cancel != NULL && ns->ns_cancel(lock) != 0)
1644                                 break;
1645                         /* fallthrough */
1646                 default:
1647                         result = LDLM_POLICY_SKIP_LOCK;
1648                         break;
1649         }
1650
1651         RETURN(result);
1652 }
1653
1654 /**
1655  * Callback function for LRU-resize policy. Decides whether to keep
1656  * \a lock in LRU for current \a LRU size \a unused, added in current
1657  * scan \a added and number of locks to be preferably canceled \a count.
1658  *
1659  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1660  *
1661  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1662  */
1663 static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1664                                                     struct ldlm_lock *lock,
1665                                                     int unused, int added,
1666                                                     int count)
1667 {
1668         ktime_t cur = ktime_get();
1669         struct ldlm_pool *pl = &ns->ns_pool;
1670         u64 slv, lvf, lv;
1671         s64 la;
1672
1673         /*
1674          * Stop LRU processing when we reach past @count or have checked all
1675          * locks in LRU.
1676          */
1677         if (count && added >= count)
1678                 return LDLM_POLICY_KEEP_LOCK;
1679
1680         /*
1681          * Despite of the LV, It doesn't make sense to keep the lock which
1682          * is unused for ns_max_age time.
1683          */
1684         if (ktime_after(ktime_get(),
1685                         ktime_add(lock->l_last_used, ns->ns_max_age)))
1686                 return LDLM_POLICY_CANCEL_LOCK;
1687
1688         slv = ldlm_pool_get_slv(pl);
1689         lvf = ldlm_pool_get_lvf(pl);
1690         la = div_u64(ktime_to_ns(ktime_sub(cur, lock->l_last_used)),
1691                      NSEC_PER_SEC);
1692         lv = lvf * la * unused;
1693
1694         /* Inform pool about current CLV to see it via debugfs. */
1695         ldlm_pool_set_clv(pl, lv);
1696
1697         /*
1698          * Stop when SLV is not yet come from server or lv is smaller than
1699          * it is.
1700          */
1701         if (slv == 0 || lv < slv)
1702                 return LDLM_POLICY_KEEP_LOCK;
1703
1704         return LDLM_POLICY_CANCEL_LOCK;
1705 }
1706
1707 static enum ldlm_policy_res
1708 ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
1709                                 struct ldlm_lock *lock,
1710                                 int unused, int added,
1711                                 int count)
1712 {
1713         enum ldlm_policy_res result;
1714
1715         result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count);
1716         if (result == LDLM_POLICY_KEEP_LOCK)
1717                 return result;
1718
1719         return ldlm_cancel_no_wait_policy(ns, lock, unused, added, count);
1720 }
1721
1722 /**
1723  * Callback function for debugfs used policy. Makes decision whether to keep
1724  * \a lock in LRU for current \a LRU size \a unused, added in current scan \a
1725  * added and number of locks to be preferably canceled \a count.
1726  *
1727  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1728  *
1729  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1730  */
1731 static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
1732                                                       struct ldlm_lock *lock,
1733                                                       int unused, int added,
1734                                                       int count)
1735 {
1736         /*
1737          * Stop LRU processing when we reach past @count or have checked all
1738          * locks in LRU.
1739          */
1740         return (added >= count) ?
1741                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1742 }
1743
1744 /**
1745  * Callback function for aged policy. Makes decision whether to keep \a lock in
1746  * LRU for current LRU size \a unused, added in current scan \a added and
1747  * number of locks to be preferably canceled \a count.
1748  *
1749  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1750  *
1751  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1752  */
1753 static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
1754                                                     struct ldlm_lock *lock,
1755                                                     int unused, int added,
1756                                                     int count)
1757 {
1758         if ((added >= count) &&
1759             ktime_before(ktime_get(),
1760                          ktime_add(lock->l_last_used, ns->ns_max_age)))
1761                 return LDLM_POLICY_KEEP_LOCK;
1762
1763         return LDLM_POLICY_CANCEL_LOCK;
1764 }
1765
1766 static enum ldlm_policy_res
1767 ldlm_cancel_aged_no_wait_policy(struct ldlm_namespace *ns,
1768                                 struct ldlm_lock *lock,
1769                                 int unused, int added, int count)
1770 {
1771         enum ldlm_policy_res result;
1772
1773         result = ldlm_cancel_aged_policy(ns, lock, unused, added, count);
1774         if (result == LDLM_POLICY_KEEP_LOCK)
1775                 return result;
1776
1777         return ldlm_cancel_no_wait_policy(ns, lock, unused, added, count);
1778 }
1779
1780 /**
1781  * Callback function for default policy. Makes decision whether to keep \a lock
1782  * in LRU for current LRU size \a unused, added in current scan \a added and
1783  * number of locks to be preferably canceled \a count.
1784  *
1785  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1786  *
1787  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1788  */
1789 static
1790 enum ldlm_policy_res ldlm_cancel_default_policy(struct ldlm_namespace *ns,
1791                                                 struct ldlm_lock *lock,
1792                                                 int unused, int added,
1793                                                 int count)
1794 {
1795         /*
1796          * Stop LRU processing when we reach past count or have checked all
1797          * locks in LRU.
1798          */
1799         return (added >= count) ?
1800                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1801 }
1802
1803 typedef enum ldlm_policy_res
1804 (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1805                             int unused, int added, int count);
1806
1807 static ldlm_cancel_lru_policy_t
1808 ldlm_cancel_lru_policy(struct ldlm_namespace *ns, enum ldlm_lru_flags lru_flags)
1809 {
1810         if (ns_connect_lru_resize(ns)) {
1811                 if (lru_flags & LDLM_LRU_FLAG_SHRINK)
1812                         /* We kill passed number of old locks. */
1813                         return ldlm_cancel_passed_policy;
1814                 if (lru_flags & LDLM_LRU_FLAG_LRUR) {
1815                         if (lru_flags & LDLM_LRU_FLAG_NO_WAIT)
1816                                 return ldlm_cancel_lrur_no_wait_policy;
1817                         else
1818                                 return ldlm_cancel_lrur_policy;
1819                 }
1820                 if (lru_flags & LDLM_LRU_FLAG_PASSED)
1821                         return ldlm_cancel_passed_policy;
1822         } else {
1823                 if (lru_flags & LDLM_LRU_FLAG_AGED) {
1824                         if (lru_flags & LDLM_LRU_FLAG_NO_WAIT)
1825                                 return ldlm_cancel_aged_no_wait_policy;
1826                         else
1827                                 return ldlm_cancel_aged_policy;
1828                 }
1829         }
1830         if (lru_flags & LDLM_LRU_FLAG_NO_WAIT)
1831                 return ldlm_cancel_no_wait_policy;
1832
1833         return ldlm_cancel_default_policy;
1834 }
1835
1836 /**
1837  * - Free space in LRU for \a count new locks,
1838  *   redundant unused locks are canceled locally;
1839  * - also cancel locally unused aged locks;
1840  * - do not cancel more than \a max locks;
1841  * - GET the found locks and add them into the \a cancels list.
1842  *
1843  * A client lock can be added to the l_bl_ast list only when it is
1844  * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing
1845  * CANCEL.  There are the following use cases:
1846  * ldlm_cancel_resource_local(), ldlm_cancel_lru_local() and
1847  * ldlm_cli_cancel(), which check and set this flag properly. As any
1848  * attempt to cancel a lock rely on this flag, l_bl_ast list is accessed
1849  * later without any special locking.
1850  *
1851  * Calling policies for enabled LRU resize:
1852  * ----------------------------------------
1853  * flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to
1854  *                              cancel not more than \a count locks;
1855  *
1856  * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located
1857  *                              at the beginning of LRU list);
1858  *
1859  * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according
1860  *                              to memory pressre policy function;
1861  *
1862  * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to "aged policy"
1863  *
1864  * flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible
1865  *                              (typically before replaying locks) w/o
1866  *                              sending any RPCs or waiting for any
1867  *                              outstanding RPC to complete.
1868  *
1869  * flags & LDLM_CANCEL_CLEANUP - when cancelling read locks, do not check for
1870  *                              other read locks covering the same pages, just
1871  *                              discard those pages.
1872  */
1873 static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1874                                  struct list_head *cancels, int count, int max,
1875                                  enum ldlm_lru_flags lru_flags)
1876 {
1877         ldlm_cancel_lru_policy_t pf;
1878         int added = 0;
1879         int no_wait = lru_flags & LDLM_LRU_FLAG_NO_WAIT;
1880
1881         ENTRY;
1882
1883         if (!ns_connect_lru_resize(ns))
1884                 count += ns->ns_nr_unused - ns->ns_max_unused;
1885
1886         pf = ldlm_cancel_lru_policy(ns, lru_flags);
1887         LASSERT(pf != NULL);
1888
1889         /* For any flags, stop scanning if @max is reached. */
1890         while (!list_empty(&ns->ns_unused_list) && (max == 0 || added < max)) {
1891                 struct ldlm_lock *lock;
1892                 struct list_head *item, *next;
1893                 enum ldlm_policy_res result;
1894                 ktime_t last_use = ktime_set(0, 0);
1895
1896                 spin_lock(&ns->ns_lock);
1897                 item = no_wait ? ns->ns_last_pos : &ns->ns_unused_list;
1898                 for (item = item->next, next = item->next;
1899                      item != &ns->ns_unused_list;
1900                      item = next, next = item->next) {
1901                         lock = list_entry(item, struct ldlm_lock, l_lru);
1902
1903                         /* No locks which got blocking requests. */
1904                         LASSERT(!ldlm_is_bl_ast(lock));
1905
1906                         if (!ldlm_is_canceling(lock))
1907                                 break;
1908
1909                         /*
1910                          * Somebody is already doing CANCEL. No need for this
1911                          * lock in LRU, do not traverse it again.
1912                          */
1913                         ldlm_lock_remove_from_lru_nolock(lock);
1914                 }
1915                 if (item == &ns->ns_unused_list) {
1916                         spin_unlock(&ns->ns_lock);
1917                         break;
1918                 }
1919
1920                 last_use = lock->l_last_used;
1921
1922                 LDLM_LOCK_GET(lock);
1923                 spin_unlock(&ns->ns_lock);
1924                 lu_ref_add(&lock->l_reference, __FUNCTION__, current);
1925
1926                 /*
1927                  * Pass the lock through the policy filter and see if it
1928                  * should stay in LRU.
1929                  *
1930                  * Even for shrinker policy we stop scanning if
1931                  * we find a lock that should stay in the cache.
1932                  * We should take into account lock age anyway
1933                  * as a new lock is a valuable resource even if
1934                  * it has a low weight.
1935                  *
1936                  * That is, for shrinker policy we drop only
1937                  * old locks, but additionally choose them by
1938                  * their weight. Big extent locks will stay in
1939                  * the cache.
1940                  */
1941                 result = pf(ns, lock, ns->ns_nr_unused, added, count);
1942                 if (result == LDLM_POLICY_KEEP_LOCK) {
1943                         lu_ref_del(&lock->l_reference, __func__, current);
1944                         LDLM_LOCK_RELEASE(lock);
1945                         break;
1946                 }
1947
1948                 if (result == LDLM_POLICY_SKIP_LOCK) {
1949                         lu_ref_del(&lock->l_reference, __func__, current);
1950                         if (no_wait) {
1951                                 spin_lock(&ns->ns_lock);
1952                                 if (!list_empty(&lock->l_lru) &&
1953                                     lock->l_lru.prev == ns->ns_last_pos)
1954                                         ns->ns_last_pos = &lock->l_lru;
1955                                 spin_unlock(&ns->ns_lock);
1956                         }
1957
1958                         LDLM_LOCK_RELEASE(lock);
1959                         continue;
1960                 }
1961
1962                 lock_res_and_lock(lock);
1963                 /* Check flags again under the lock. */
1964                 if (ldlm_is_canceling(lock) ||
1965                     ldlm_lock_remove_from_lru_check(lock, last_use) == 0) {
1966                         /*
1967                          * Another thread is removing lock from LRU, or
1968                          * somebody is already doing CANCEL, or there
1969                          * is a blocking request which will send cancel
1970                          * by itself, or the lock is no longer unused or
1971                          * the lock has been used since the pf() call and
1972                          * pages could be put under it.
1973                          */
1974                         unlock_res_and_lock(lock);
1975                         lu_ref_del(&lock->l_reference, __FUNCTION__, current);
1976                         LDLM_LOCK_RELEASE(lock);
1977                         continue;
1978                 }
1979                 LASSERT(!lock->l_readers && !lock->l_writers);
1980
1981                 /*
1982                  * If we have chosen to cancel this lock voluntarily, we
1983                  * better send cancel notification to server, so that it
1984                  * frees appropriate state. This might lead to a race
1985                  * where while we are doing cancel here, server is also
1986                  * silently cancelling this lock.
1987                  */
1988                 ldlm_clear_cancel_on_block(lock);
1989
1990                 /*
1991                  * Setting the CBPENDING flag is a little misleading,
1992                  * but prevents an important race; namely, once
1993                  * CBPENDING is set, the lock can accumulate no more
1994                  * readers/writers. Since readers and writers are
1995                  * already zero here, ldlm_lock_decref() won't see
1996                  * this flag and call l_blocking_ast
1997                  */
1998                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
1999
2000                 if ((lru_flags & LDLM_LRU_FLAG_CLEANUP) &&
2001                     (lock->l_resource->lr_type == LDLM_EXTENT ||
2002                      ldlm_has_dom(lock)) && lock->l_granted_mode == LCK_PR)
2003                         ldlm_set_discard_data(lock);
2004
2005                 /*
2006                  * We can't re-add to l_lru as it confuses the
2007                  * refcounting in ldlm_lock_remove_from_lru() if an AST
2008                  * arrives after we drop lr_lock below. We use l_bl_ast
2009                  * and can't use l_pending_chain as it is used both on
2010                  * server and client nevertheless b=5666 says it is
2011                  * used only on server
2012                  */
2013                 LASSERT(list_empty(&lock->l_bl_ast));
2014                 list_add(&lock->l_bl_ast, cancels);
2015                 unlock_res_and_lock(lock);
2016                 lu_ref_del(&lock->l_reference, __FUNCTION__, current);
2017                 added++;
2018         }
2019         RETURN(added);
2020 }
2021
2022 int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
2023                           int count, int max,
2024                           enum ldlm_cancel_flags cancel_flags,
2025                           enum ldlm_lru_flags lru_flags)
2026 {
2027         int added;
2028
2029         added = ldlm_prepare_lru_list(ns, cancels, count, max, lru_flags);
2030         if (added <= 0)
2031                 return added;
2032
2033         return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
2034 }
2035
2036 /**
2037  * Cancel at least \a nr locks from given namespace LRU.
2038  *
2039  * When called with LCF_ASYNC the blocking callback will be handled
2040  * in a thread and this function will return after the thread has been
2041  * asked to call the callback.  When called with LCF_ASYNC the blocking
2042  * callback will be performed in this function.
2043  */
2044 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
2045                     enum ldlm_cancel_flags cancel_flags,
2046                     enum ldlm_lru_flags lru_flags)
2047 {
2048         LIST_HEAD(cancels);
2049         int count, rc;
2050
2051         ENTRY;
2052
2053         /*
2054          * Just prepare the list of locks, do not actually cancel them yet.
2055          * Locks are cancelled later in a separate thread.
2056          */
2057         count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, lru_flags);
2058         rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
2059         if (rc == 0)
2060                 RETURN(count);
2061
2062         RETURN(0);
2063 }
2064
2065 /**
2066  * Find and cancel locally unused locks found on resource, matched to the
2067  * given policy, mode. GET the found locks and add them into the \a cancels
2068  * list.
2069  */
2070 int ldlm_cancel_resource_local(struct ldlm_resource *res,
2071                                struct list_head *cancels,
2072                                union ldlm_policy_data *policy,
2073                                enum ldlm_mode mode, __u64 lock_flags,
2074                                enum ldlm_cancel_flags cancel_flags,
2075                                void *opaque)
2076 {
2077         struct ldlm_lock *lock;
2078         int count = 0;
2079
2080         ENTRY;
2081
2082         lock_res(res);
2083         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
2084                 if (opaque != NULL && lock->l_ast_data != opaque) {
2085                         LDLM_ERROR(lock, "data %p doesn't match opaque %p",
2086                                    lock->l_ast_data, opaque);
2087                         continue;
2088                 }
2089
2090                 if (lock->l_readers || lock->l_writers)
2091                         continue;
2092
2093                 /*
2094                  * If somebody is already doing CANCEL, or blocking AST came
2095                  * then skip this lock.
2096                  */
2097                 if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
2098                         continue;
2099
2100                 if (lockmode_compat(lock->l_granted_mode, mode))
2101                         continue;
2102
2103                 /*
2104                  * If policy is given and this is IBITS lock, add to list only
2105                  * those locks that match by policy.
2106                  */
2107                 if (policy && (lock->l_resource->lr_type == LDLM_IBITS)) {
2108                         if (!(lock->l_policy_data.l_inodebits.bits &
2109                               policy->l_inodebits.bits))
2110                                 continue;
2111                         /* Skip locks with DoM bit if it is not set in policy
2112                          * to don't flush data by side-bits. Lock convert will
2113                          * drop those bits separately.
2114                          */
2115                         if (ldlm_has_dom(lock) &&
2116                             !(policy->l_inodebits.bits & MDS_INODELOCK_DOM))
2117                                 continue;
2118                 }
2119
2120                 /* See CBPENDING comment in ldlm_cancel_lru */
2121                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
2122                                  lock_flags;
2123                 LASSERT(list_empty(&lock->l_bl_ast));
2124                 list_add(&lock->l_bl_ast, cancels);
2125                 LDLM_LOCK_GET(lock);
2126                 count++;
2127         }
2128         unlock_res(res);
2129
2130         RETURN(ldlm_cli_cancel_list_local(cancels, count, cancel_flags));
2131 }
2132 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2133
2134 /**
2135  * Cancel client-side locks from a list and send/prepare cancel RPCs to the
2136  * server.
2137  * If \a req is NULL, send CANCEL request to server with handles of locks
2138  * in the \a cancels. If EARLY_CANCEL is not supported, send CANCEL requests
2139  * separately per lock.
2140  * If \a req is not NULL, put handles of locks in \a cancels into the request
2141  * buffer at the offset \a off.
2142  * Destroy \a cancels at the end.
2143  */
2144 int ldlm_cli_cancel_list(struct list_head *cancels, int count,
2145                          struct ptlrpc_request *req,
2146                          enum ldlm_cancel_flags flags)
2147 {
2148         struct ldlm_lock *lock;
2149         int res = 0;
2150
2151         ENTRY;
2152
2153         if (list_empty(cancels) || count == 0)
2154                 RETURN(0);
2155
2156         /*
2157          * XXX: requests (both batched and not) could be sent in parallel.
2158          * Usually it is enough to have just 1 RPC, but it is possible that
2159          * there are too many locks to be cancelled in LRU or on a resource.
2160          * It would also speed up the case when the server does not support
2161          * the feature.
2162          */
2163         while (count > 0) {
2164                 LASSERT(!list_empty(cancels));
2165                 lock = list_entry(cancels->next, struct ldlm_lock,
2166                                   l_bl_ast);
2167                 LASSERT(lock->l_conn_export);
2168
2169                 if (exp_connect_cancelset(lock->l_conn_export)) {
2170                         res = count;
2171                         if (req)
2172                                 ldlm_cancel_pack(req, cancels, count);
2173                         else
2174                                 res = ldlm_cli_cancel_req(lock->l_conn_export,
2175                                                           cancels, count,
2176                                                           flags);
2177                 } else {
2178                         res = ldlm_cli_cancel_req(lock->l_conn_export,
2179                                                   cancels, 1, flags);
2180                 }
2181
2182                 if (res < 0) {
2183                         CDEBUG_LIMIT(res == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
2184                                      "ldlm_cli_cancel_list: %d\n", res);
2185                         res = count;
2186                 }
2187
2188                 count -= res;
2189                 ldlm_lock_list_put(cancels, l_bl_ast, res);
2190         }
2191         LASSERT(count == 0);
2192         RETURN(0);
2193 }
2194 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2195
2196 /**
2197  * Cancel all locks on a resource that have 0 readers/writers.
2198  *
2199  * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
2200  * to notify the server.
2201  */
2202 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
2203                                     const struct ldlm_res_id *res_id,
2204                                     union ldlm_policy_data *policy,
2205                                     enum ldlm_mode mode,
2206                                     enum ldlm_cancel_flags flags, void *opaque)
2207 {
2208         struct ldlm_resource *res;
2209         LIST_HEAD(cancels);
2210         int count;
2211         int rc;
2212
2213         ENTRY;
2214
2215         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
2216         if (IS_ERR(res)) {
2217                 /* This is not a problem. */
2218                 CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
2219                 RETURN(0);
2220         }
2221
2222         LDLM_RESOURCE_ADDREF(res);
2223         count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
2224                                            0, flags | LCF_BL_AST, opaque);
2225         rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
2226         if (rc != ELDLM_OK)
2227                 CERROR("canceling unused lock "DLDLMRES": rc = %d\n",
2228                        PLDLMRES(res), rc);
2229
2230         LDLM_RESOURCE_DELREF(res);
2231         ldlm_resource_putref(res);
2232         RETURN(0);
2233 }
2234 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2235
2236 struct ldlm_cli_cancel_arg {
2237         int     lc_flags;
2238         void   *lc_opaque;
2239 };
2240
2241 static int
2242 ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2243                             struct hlist_node *hnode, void *arg)
2244 {
2245         struct ldlm_resource           *res = cfs_hash_object(hs, hnode);
2246         struct ldlm_cli_cancel_arg     *lc = arg;
2247
2248         ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
2249                                         NULL, LCK_MINMODE, lc->lc_flags,
2250                                         lc->lc_opaque);
2251         /* must return 0 for hash iteration */
2252         return 0;
2253 }
2254
2255 /**
2256  * Cancel all locks on a namespace (or a specific resource, if given)
2257  * that have 0 readers/writers.
2258  *
2259  * If flags & LCF_LOCAL, throw the locks away without trying
2260  * to notify the server.
2261  */
2262 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
2263                            const struct ldlm_res_id *res_id,
2264                            enum ldlm_cancel_flags flags, void *opaque)
2265 {
2266         struct ldlm_cli_cancel_arg arg = {
2267                 .lc_flags       = flags,
2268                 .lc_opaque      = opaque,
2269         };
2270
2271         ENTRY;
2272
2273         if (ns == NULL)
2274                 RETURN(ELDLM_OK);
2275
2276         if (res_id != NULL) {
2277                 RETURN(ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
2278                                                        LCK_MINMODE, flags,
2279                                                        opaque));
2280         } else {
2281                 cfs_hash_for_each_nolock(ns->ns_rs_hash,
2282                                          ldlm_cli_hash_cancel_unused, &arg, 0);
2283                 RETURN(ELDLM_OK);
2284         }
2285 }
2286
2287 /* Lock iterators. */
2288
2289 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
2290                           void *closure)
2291 {
2292         struct list_head *tmp, *next;
2293         struct ldlm_lock *lock;
2294         int rc = LDLM_ITER_CONTINUE;
2295
2296         ENTRY;
2297
2298         if (!res)
2299                 RETURN(LDLM_ITER_CONTINUE);
2300
2301         lock_res(res);
2302         list_for_each_safe(tmp, next, &res->lr_granted) {
2303                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
2304
2305                 if (iter(lock, closure) == LDLM_ITER_STOP)
2306                         GOTO(out, rc = LDLM_ITER_STOP);
2307         }
2308
2309         list_for_each_safe(tmp, next, &res->lr_waiting) {
2310                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
2311
2312                 if (iter(lock, closure) == LDLM_ITER_STOP)
2313                         GOTO(out, rc = LDLM_ITER_STOP);
2314         }
2315 out:
2316         unlock_res(res);
2317         RETURN(rc);
2318 }
2319
2320 struct iter_helper_data {
2321         ldlm_iterator_t iter;
2322         void *closure;
2323 };
2324
2325 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
2326 {
2327         struct iter_helper_data *helper = closure;
2328
2329         return helper->iter(lock, helper->closure);
2330 }
2331
2332 static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2333                                 struct hlist_node *hnode, void *arg)
2334
2335 {
2336         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
2337
2338         return ldlm_resource_foreach(res, ldlm_iter_helper, arg) ==
2339                                      LDLM_ITER_STOP;
2340 }
2341
2342 void ldlm_namespace_foreach(struct ldlm_namespace *ns,
2343                             ldlm_iterator_t iter, void *closure)
2344
2345 {
2346         struct iter_helper_data helper = { .iter = iter, .closure = closure };
2347
2348         cfs_hash_for_each_nolock(ns->ns_rs_hash,
2349                                  ldlm_res_iter_helper, &helper, 0);
2350
2351 }
2352
2353 /*
2354  * non-blocking function to manipulate a lock whose cb_data is being put away.
2355  * return  0:  find no resource
2356  *       > 0:  must be LDLM_ITER_STOP/LDLM_ITER_CONTINUE.
2357  *       < 0:  errors
2358  */
2359 int ldlm_resource_iterate(struct ldlm_namespace *ns,
2360                           const struct ldlm_res_id *res_id,
2361                           ldlm_iterator_t iter, void *data)
2362 {
2363         struct ldlm_resource *res;
2364         int rc;
2365
2366         ENTRY;
2367
2368         LASSERTF(ns != NULL, "must pass in namespace\n");
2369
2370         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
2371         if (IS_ERR(res))
2372                 RETURN(0);
2373
2374         LDLM_RESOURCE_ADDREF(res);
2375         rc = ldlm_resource_foreach(res, iter, data);
2376         LDLM_RESOURCE_DELREF(res);
2377         ldlm_resource_putref(res);
2378         RETURN(rc);
2379 }
2380 EXPORT_SYMBOL(ldlm_resource_iterate);
2381
2382 /* Lock replay */
2383 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
2384 {
2385         struct list_head *list = closure;
2386
2387         /* we use l_pending_chain here, because it's unused on clients. */
2388         LASSERTF(list_empty(&lock->l_pending_chain),
2389                  "lock %p next %p prev %p\n",
2390                  lock, &lock->l_pending_chain.next,
2391                  &lock->l_pending_chain.prev);
2392         /*
2393          * b=9573: don't replay locks left after eviction, or
2394          * b=17614: locks being actively cancelled. Get a reference
2395          * on a lock so that it does not disapear under us (e.g. due to cancel)
2396          */
2397         if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_BL_DONE))) {
2398                 list_add(&lock->l_pending_chain, list);
2399                 LDLM_LOCK_GET(lock);
2400         }
2401
2402         return LDLM_ITER_CONTINUE;
2403 }
2404
2405 static int replay_lock_interpret(const struct lu_env *env,
2406                                  struct ptlrpc_request *req, void *args, int rc)
2407 {
2408         struct ldlm_async_args *aa = args;
2409         struct ldlm_lock     *lock;
2410         struct ldlm_reply    *reply;
2411         struct obd_export    *exp;
2412
2413         ENTRY;
2414         atomic_dec(&req->rq_import->imp_replay_inflight);
2415         wake_up(&req->rq_import->imp_replay_waitq);
2416
2417         if (rc != ELDLM_OK)
2418                 GOTO(out, rc);
2419
2420         reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2421         if (reply == NULL)
2422                 GOTO(out, rc = -EPROTO);
2423
2424         lock = ldlm_handle2lock(&aa->lock_handle);
2425         if (!lock) {
2426                 CERROR("received replay ack for unknown local cookie %#llx remote cookie %#llx from server %s id %s\n",
2427                        aa->lock_handle.cookie, reply->lock_handle.cookie,
2428                        req->rq_export->exp_client_uuid.uuid,
2429                        libcfs_id2str(req->rq_peer));
2430                 GOTO(out, rc = -ESTALE);
2431         }
2432
2433         /* Key change rehash lock in per-export hash with new key */
2434         exp = req->rq_export;
2435         if (exp && exp->exp_lock_hash) {
2436                 /*
2437                  * In the function below, .hs_keycmp resolves to
2438                  * ldlm_export_lock_keycmp()
2439                  */
2440                 /* coverity[overrun-buffer-val] */
2441                 cfs_hash_rehash_key(exp->exp_lock_hash,
2442                                     &lock->l_remote_handle,
2443                                     &reply->lock_handle,
2444                                     &lock->l_exp_hash);
2445         } else {
2446                 lock->l_remote_handle = reply->lock_handle;
2447         }
2448
2449         LDLM_DEBUG(lock, "replayed lock:");
2450         ptlrpc_import_recovery_state_machine(req->rq_import);
2451         LDLM_LOCK_PUT(lock);
2452 out:
2453         if (rc != ELDLM_OK)
2454                 ptlrpc_connect_import(req->rq_import);
2455
2456         RETURN(rc);
2457 }
2458
2459 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
2460 {
2461         struct ptlrpc_request *req;
2462         struct ldlm_async_args *aa;
2463         struct ldlm_request   *body;
2464         int flags;
2465
2466         ENTRY;
2467
2468
2469         /* b=11974: Do not replay a lock which is actively being canceled */
2470         if (ldlm_is_bl_done(lock)) {
2471                 LDLM_DEBUG(lock, "Not replaying canceled lock:");
2472                 RETURN(0);
2473         }
2474
2475         /*
2476          * If this is reply-less callback lock, we cannot replay it, since
2477          * server might have long dropped it, but notification of that event was
2478          * lost by network. (and server granted conflicting lock already)
2479          */
2480         if (ldlm_is_cancel_on_block(lock)) {
2481                 LDLM_DEBUG(lock, "Not replaying reply-less lock:");
2482                 ldlm_lock_cancel(lock);
2483                 RETURN(0);
2484         }
2485
2486         /*
2487          * If granted mode matches the requested mode, this lock is granted.
2488          *
2489          * If we haven't been granted anything and are on a resource list,
2490          * then we're blocked/waiting.
2491          *
2492          * If we haven't been granted anything and we're NOT on a resource list,
2493          * then we haven't got a reply yet and don't have a known disposition.
2494          * This happens whenever a lock enqueue is the request that triggers
2495          * recovery.
2496          */
2497         if (ldlm_is_granted(lock))
2498                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
2499         else if (!list_empty(&lock->l_res_link))
2500                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
2501         else
2502                 flags = LDLM_FL_REPLAY;
2503
2504         req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
2505                                         LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
2506         if (req == NULL)
2507                 RETURN(-ENOMEM);
2508
2509         /* We're part of recovery, so don't wait for it. */
2510         req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
2511
2512         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2513         ldlm_lock2desc(lock, &body->lock_desc);
2514         body->lock_flags = ldlm_flags_to_wire(flags);
2515
2516         ldlm_lock2handle(lock, &body->lock_handle[0]);
2517         if (lock->l_lvb_len > 0)
2518                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB);
2519         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
2520                              lock->l_lvb_len);
2521         ptlrpc_request_set_replen(req);
2522         /*
2523          * notify the server we've replayed all requests.
2524          * also, we mark the request to be put on a dedicated
2525          * queue to be processed after all request replayes.
2526          * b=6063
2527          */
2528         lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
2529
2530         LDLM_DEBUG(lock, "replaying lock:");
2531
2532         atomic_inc(&imp->imp_replay_inflight);
2533         aa = ptlrpc_req_async_args(aa, req);
2534         aa->lock_handle = body->lock_handle[0];
2535         req->rq_interpret_reply = replay_lock_interpret;
2536         ptlrpcd_add_req(req);
2537
2538         RETURN(0);
2539 }
2540
2541 /**
2542  * Cancel as many unused locks as possible before replay. since we are
2543  * in recovery, we can't wait for any outstanding RPCs to send any RPC
2544  * to the server.
2545  *
2546  * Called only in recovery before replaying locks. there is no need to
2547  * replay locks that are unused. since the clients may hold thousands of
2548  * cached unused locks, dropping the unused locks can greatly reduce the
2549  * load on the servers at recovery time.
2550  */
2551 static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
2552 {
2553         int canceled;
2554         LIST_HEAD(cancels);
2555
2556         CDEBUG(D_DLMTRACE,
2557                "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
2558                ldlm_ns_name(ns), ns->ns_nr_unused);
2559
2560         /*
2561          * We don't need to care whether or not LRU resize is enabled
2562          * because the LDLM_LRU_FLAG_NO_WAIT policy doesn't use the
2563          * count parameter
2564          */
2565         canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
2566                                          LCF_LOCAL, LDLM_LRU_FLAG_NO_WAIT);
2567
2568         CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
2569                            canceled, ldlm_ns_name(ns));
2570 }
2571
2572 static int lock_can_replay(struct obd_import *imp)
2573 {
2574         struct client_obd *cli = &imp->imp_obd->u.cli;
2575
2576         CDEBUG(D_HA, "check lock replay limit, inflights = %u(%u)\n",
2577                atomic_read(&imp->imp_replay_inflight) - 1,
2578                cli->cl_max_rpcs_in_flight);
2579
2580         /* +1 due to ldlm_lock_replay() increment */
2581         return atomic_read(&imp->imp_replay_inflight) <
2582                1 + min_t(u32, cli->cl_max_rpcs_in_flight, 8);
2583 }
2584
2585 int __ldlm_replay_locks(struct obd_import *imp, bool rate_limit)
2586 {
2587         struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
2588         LIST_HEAD(list);
2589         struct ldlm_lock *lock, *next;
2590         int rc = 0;
2591
2592         ENTRY;
2593
2594         LASSERT(atomic_read(&imp->imp_replay_inflight) == 1);
2595
2596         /* don't replay locks if import failed recovery */
2597         if (imp->imp_vbr_failed)
2598                 RETURN(0);
2599
2600         if (ldlm_cancel_unused_locks_before_replay)
2601                 ldlm_cancel_unused_locks_for_replay(ns);
2602
2603         ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
2604
2605         list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
2606                 list_del_init(&lock->l_pending_chain);
2607                 if (rc) {
2608                         LDLM_LOCK_RELEASE(lock);
2609                         continue; /* or try to do the rest? */
2610                 }
2611                 rc = replay_one_lock(imp, lock);
2612                 LDLM_LOCK_RELEASE(lock);
2613
2614                 if (rate_limit)
2615                         wait_event_idle_exclusive(imp->imp_replay_waitq,
2616                                                   lock_can_replay(imp));
2617         }
2618
2619         RETURN(rc);
2620 }
2621
2622 /**
2623  * Lock replay uses rate control and can sleep waiting so
2624  * must be in separate thread from ptlrpcd itself
2625  */
2626 static int ldlm_lock_replay_thread(void *data)
2627 {
2628         struct obd_import *imp = data;
2629
2630         CDEBUG(D_HA, "lock replay thread %s to %s@%s\n",
2631                imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
2632                imp->imp_connection->c_remote_uuid.uuid);
2633
2634         __ldlm_replay_locks(imp, true);
2635         atomic_dec(&imp->imp_replay_inflight);
2636         ptlrpc_import_recovery_state_machine(imp);
2637         class_import_put(imp);
2638
2639         return 0;
2640 }
2641
2642 int ldlm_replay_locks(struct obd_import *imp)
2643 {
2644         struct task_struct *task;
2645         int rc = 0;
2646
2647         class_import_get(imp);
2648         /* ensure this doesn't fall to 0 before all have been queued */
2649         atomic_inc(&imp->imp_replay_inflight);
2650
2651         task = kthread_run(ldlm_lock_replay_thread, imp, "ldlm_lock_replay");
2652         if (IS_ERR(task)) {
2653                 rc = PTR_ERR(task);
2654                 CDEBUG(D_HA, "can't start lock replay thread: rc = %d\n", rc);
2655
2656                 /* run lock replay without rate control */
2657                 rc = __ldlm_replay_locks(imp, false);
2658                 atomic_dec(&imp->imp_replay_inflight);
2659                 class_import_put(imp);
2660         }
2661
2662         return rc;
2663 }