Whamcloud - gitweb
LU-4423 ptlrpc: incorporate BUILD_BUG_ON into ptlrpc_req_async_args()
[fs/lustre-release.git] / lustre / ldlm / ldlm_lockd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_lockd.c
33  *
34  * Author: Peter Braam <braam@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LDLM
39
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <libcfs/libcfs.h>
43 #include <lustre_errno.h>
44 #include <lustre_dlm.h>
45 #include <obd_class.h>
46 #include "ldlm_internal.h"
47
48 static int ldlm_num_threads;
49 module_param(ldlm_num_threads, int, 0444);
50 MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
51
52 static unsigned int ldlm_cpu_bind = 1;
53 module_param(ldlm_cpu_bind, uint, 0444);
54 MODULE_PARM_DESC(ldlm_cpu_bind,
55                  "bind DLM service threads to particular CPU partitions");
56
57 static char *ldlm_cpts;
58 module_param(ldlm_cpts, charp, 0444);
59 MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
60
61 static DEFINE_MUTEX(ldlm_ref_mutex);
62 static int ldlm_refcount;
63
64 struct kobject *ldlm_kobj;
65 struct kset *ldlm_ns_kset;
66 struct kset *ldlm_svc_kset;
67
68 /* LDLM state */
69
70 static struct ldlm_state *ldlm_state;
71
72 /*
73  * timeout for initial callback (AST) reply (bz10399)
74  * Due to having to send a 32 bit time value over the
75  * wire return it as time_t instead of time64_t
76  */
77 static inline time_t ldlm_get_rq_timeout(void)
78 {
79         /* Non-AT value */
80         time_t timeout = min(ldlm_timeout, obd_timeout / 3);
81
82         return timeout < 1 ? 1 : timeout;
83 }
84
85 struct ldlm_bl_pool {
86         spinlock_t blp_lock;
87
88         /*
89          * blp_prio_list is used for callbacks that should be handled
90          * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
91          * see b=13843
92          */
93         struct list_head blp_prio_list;
94
95         /*
96          * blp_list is used for all other callbacks which are likely
97          * to take longer to process.
98          */
99         struct list_head blp_list;
100
101         wait_queue_head_t blp_waitq;
102         struct completion blp_comp;
103         atomic_t blp_num_threads;
104         atomic_t blp_busy_threads;
105         int blp_min_threads;
106         int blp_max_threads;
107 };
108
109 struct ldlm_bl_work_item {
110         struct list_head        blwi_entry;
111         struct ldlm_namespace   *blwi_ns;
112         struct ldlm_lock_desc   blwi_ld;
113         struct ldlm_lock        *blwi_lock;
114         struct list_head        blwi_head;
115         int                     blwi_count;
116         struct completion       blwi_comp;
117         enum ldlm_cancel_flags  blwi_flags;
118         int                     blwi_mem_pressure;
119 };
120
121 #ifdef HAVE_SERVER_SUPPORT
122
123 /**
124  * Protects both waiting_locks_list and expired_lock_thread.
125  */
126 static DEFINE_SPINLOCK(waiting_locks_spinlock); /* BH lock (timer) */
127
128 /**
129  * List for contended locks.
130  *
131  * As soon as a lock is contended, it gets placed on this list and
132  * expected time to get a response is filled in the lock. A special
133  * thread walks the list looking for locks that should be released and
134  * schedules client evictions for those that have not been released in
135  * time.
136  *
137  * All access to it should be under waiting_locks_spinlock.
138  */
139 static LIST_HEAD(waiting_locks_list);
140 static void waiting_locks_callback(TIMER_DATA_TYPE unused);
141 static CFS_DEFINE_TIMER(waiting_locks_timer, waiting_locks_callback, 0, 0);
142
143 enum elt_state {
144         ELT_STOPPED,
145         ELT_READY,
146         ELT_TERMINATE,
147 };
148
149 static DECLARE_WAIT_QUEUE_HEAD(expired_lock_wait_queue);
150 static enum elt_state expired_lock_thread_state = ELT_STOPPED;
151 static int expired_lock_dump;
152 static LIST_HEAD(expired_lock_list);
153
154 static int ldlm_lock_busy(struct ldlm_lock *lock);
155 static int ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t timeout);
156 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds);
157
158 static inline int have_expired_locks(void)
159 {
160         int need_to_run;
161
162         ENTRY;
163         spin_lock_bh(&waiting_locks_spinlock);
164         need_to_run = !list_empty(&expired_lock_list);
165         spin_unlock_bh(&waiting_locks_spinlock);
166
167         RETURN(need_to_run);
168 }
169
170 /**
171  * Check expired lock list for expired locks and time them out.
172  */
173 static int expired_lock_main(void *arg)
174 {
175         struct list_head *expired = &expired_lock_list;
176         struct l_wait_info lwi = { 0 };
177         int do_dump;
178
179         ENTRY;
180
181         expired_lock_thread_state = ELT_READY;
182         wake_up(&expired_lock_wait_queue);
183
184         while (1) {
185                 l_wait_event(expired_lock_wait_queue,
186                              have_expired_locks() ||
187                              expired_lock_thread_state == ELT_TERMINATE,
188                              &lwi);
189
190                 spin_lock_bh(&waiting_locks_spinlock);
191                 if (expired_lock_dump) {
192                         spin_unlock_bh(&waiting_locks_spinlock);
193
194                         /* from waiting_locks_callback, but not in timer */
195                         libcfs_debug_dumplog();
196
197                         spin_lock_bh(&waiting_locks_spinlock);
198                         expired_lock_dump = 0;
199                 }
200
201                 do_dump = 0;
202
203                 while (!list_empty(expired)) {
204                         struct obd_export *export;
205                         struct ldlm_lock *lock;
206
207                         lock = list_entry(expired->next, struct ldlm_lock,
208                                           l_pending_chain);
209                         if ((void *)lock < LP_POISON + PAGE_SIZE &&
210                             (void *)lock >= LP_POISON) {
211                                 spin_unlock_bh(&waiting_locks_spinlock);
212                                 CERROR("free lock on elt list %p\n", lock);
213                                 LBUG();
214                         }
215                         list_del_init(&lock->l_pending_chain);
216                         if ((void *)lock->l_export <
217                              LP_POISON + PAGE_SIZE &&
218                             (void *)lock->l_export >= LP_POISON) {
219                                 CERROR("lock with free export on elt list %p\n",
220                                        lock->l_export);
221                                 lock->l_export = NULL;
222                                 LDLM_ERROR(lock, "free export");
223                                 /*
224                                  * release extra ref grabbed by
225                                  * ldlm_add_waiting_lock() or
226                                  * ldlm_failed_ast()
227                                  */
228                                 LDLM_LOCK_RELEASE(lock);
229                                 continue;
230                         }
231
232                         if (ldlm_is_destroyed(lock)) {
233                                 /*
234                                  * release the lock refcount where
235                                  * waiting_locks_callback() founds
236                                  */
237                                 LDLM_LOCK_RELEASE(lock);
238                                 continue;
239                         }
240                         export = class_export_lock_get(lock->l_export, lock);
241                         spin_unlock_bh(&waiting_locks_spinlock);
242
243                         /* Check if we need to prolong timeout */
244                         if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
245                             lock->l_callback_timeout != 0 && /* not AST error */
246                             ldlm_lock_busy(lock)) {
247                                 LDLM_DEBUG(lock, "prolong the busy lock");
248                                 lock_res_and_lock(lock);
249                                 ldlm_add_waiting_lock(lock,
250                                                 ldlm_bl_timeout(lock) >> 1);
251                                 unlock_res_and_lock(lock);
252                         } else {
253                                 spin_lock_bh(&export->exp_bl_list_lock);
254                                 list_del_init(&lock->l_exp_list);
255                                 spin_unlock_bh(&export->exp_bl_list_lock);
256
257                                 LDLM_ERROR(lock,
258                                            "lock callback timer expired after %llds: evicting client at %s ",
259                                            ktime_get_real_seconds() -
260                                            lock->l_blast_sent,
261                                            obd_export_nid2str(export));
262                                 ldlm_lock_to_ns(lock)->ns_timeouts++;
263                                 do_dump++;
264                                 class_fail_export(export);
265                         }
266                         class_export_lock_put(export, lock);
267                         /*
268                          * release extra ref grabbed by ldlm_add_waiting_lock()
269                          * or ldlm_failed_ast()
270                          */
271                         LDLM_LOCK_RELEASE(lock);
272
273                         spin_lock_bh(&waiting_locks_spinlock);
274                 }
275                 spin_unlock_bh(&waiting_locks_spinlock);
276
277                 if (do_dump && obd_dump_on_eviction) {
278                         CERROR("dump the log upon eviction\n");
279                         libcfs_debug_dumplog();
280                 }
281
282                 if (expired_lock_thread_state == ELT_TERMINATE)
283                         break;
284         }
285
286         expired_lock_thread_state = ELT_STOPPED;
287         wake_up(&expired_lock_wait_queue);
288         RETURN(0);
289 }
290
291 /**
292  * Check if there is a request in the export request list
293  * which prevents the lock canceling.
294  */
295 static int ldlm_lock_busy(struct ldlm_lock *lock)
296 {
297         struct ptlrpc_request *req;
298         int match = 0;
299
300         ENTRY;
301
302         if (lock->l_export == NULL)
303                 return 0;
304
305         spin_lock(&lock->l_export->exp_rpc_lock);
306         list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
307                                 rq_exp_list) {
308                 if (req->rq_ops->hpreq_lock_match) {
309                         match = req->rq_ops->hpreq_lock_match(req, lock);
310                         if (match)
311                                 break;
312                 }
313         }
314         spin_unlock(&lock->l_export->exp_rpc_lock);
315         RETURN(match);
316 }
317
318 /* This is called from within a timer interrupt and cannot schedule */
319 static void waiting_locks_callback(TIMER_DATA_TYPE unused)
320 {
321         struct ldlm_lock *lock;
322         int need_dump = 0;
323
324         spin_lock_bh(&waiting_locks_spinlock);
325         while (!list_empty(&waiting_locks_list)) {
326                 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
327                                   l_pending_chain);
328                 if (lock->l_callback_timeout > ktime_get_seconds() ||
329                     lock->l_req_mode == LCK_GROUP)
330                         break;
331
332                 /*
333                  * no needs to take an extra ref on the lock since it was in
334                  * the waiting_locks_list and ldlm_add_waiting_lock()
335                  * already grabbed a ref
336                  */
337                 list_del(&lock->l_pending_chain);
338                 list_add(&lock->l_pending_chain, &expired_lock_list);
339                 need_dump = 1;
340         }
341
342         if (!list_empty(&expired_lock_list)) {
343                 if (obd_dump_on_timeout && need_dump)
344                         expired_lock_dump = __LINE__;
345
346                 wake_up(&expired_lock_wait_queue);
347         }
348
349         /*
350          * Make sure the timer will fire again if we have any locks
351          * left.
352          */
353         if (!list_empty(&waiting_locks_list)) {
354                 unsigned long timeout_jiffies;
355
356                 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
357                                   l_pending_chain);
358                 timeout_jiffies = cfs_time_seconds(lock->l_callback_timeout);
359                 mod_timer(&waiting_locks_timer, timeout_jiffies);
360         }
361         spin_unlock_bh(&waiting_locks_spinlock);
362 }
363
364 /**
365  * Add lock to the list of contended locks.
366  *
367  * Indicate that we're waiting for a client to call us back cancelling a given
368  * lock.  We add it to the pending-callback chain, and schedule the lock-timeout
369  * timer to fire appropriately.  (We round up to the next second, to avoid
370  * floods of timer firings during periods of high lock contention and traffic).
371  * As done by ldlm_add_waiting_lock(), the caller must grab a lock reference
372  * if it has been added to the waiting list (1 is returned).
373  *
374  * Called with the namespace lock held.
375  */
376 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds)
377 {
378         unsigned long timeout_jiffies;
379         time64_t timeout;
380
381         if (!list_empty(&lock->l_pending_chain))
382                 return 0;
383
384         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
385             OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
386                 seconds = 1;
387
388         timeout = ktime_get_seconds() + seconds;
389         if (likely(timeout > lock->l_callback_timeout))
390                 lock->l_callback_timeout = timeout;
391
392         timeout_jiffies = cfs_time_seconds(lock->l_callback_timeout);
393
394         if (time_before(timeout_jiffies, waiting_locks_timer.expires) ||
395             !timer_pending(&waiting_locks_timer))
396                 mod_timer(&waiting_locks_timer, timeout_jiffies);
397
398         /*
399          * if the new lock has a shorter timeout than something earlier on
400          * the list, we'll wait the longer amount of time; no big deal.
401          */
402         /* FIFO */
403         list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
404         return 1;
405 }
406
407 static void ldlm_add_blocked_lock(struct ldlm_lock *lock)
408 {
409         spin_lock_bh(&lock->l_export->exp_bl_list_lock);
410         if (list_empty(&lock->l_exp_list)) {
411                 if (!ldlm_is_granted(lock))
412                         list_add_tail(&lock->l_exp_list,
413                                       &lock->l_export->exp_bl_list);
414                 else
415                         list_add(&lock->l_exp_list,
416                                  &lock->l_export->exp_bl_list);
417         }
418         spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
419
420         /*
421          * A blocked lock is added. Adjust the position in
422          * the stale list if the export is in the list.
423          * If export is stale and not in the list - it is being
424          * processed and will be placed on the right position
425          * on obd_stale_export_put().
426          */
427         if (!list_empty(&lock->l_export->exp_stale_list))
428                 obd_stale_export_adjust(lock->l_export);
429 }
430
431 static int ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
432 {
433         int ret;
434
435         /* NB: must be called with hold of lock_res_and_lock() */
436         LASSERT(ldlm_is_res_locked(lock));
437         LASSERT(!ldlm_is_cancel_on_block(lock));
438
439         /*
440          * Do not put cross-MDT lock in the waiting list, since we
441          * will not evict it due to timeout for now
442          */
443         if (lock->l_export != NULL &&
444             (exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS))
445                 return 0;
446
447         spin_lock_bh(&waiting_locks_spinlock);
448         if (ldlm_is_cancel(lock)) {
449                 spin_unlock_bh(&waiting_locks_spinlock);
450                 return 0;
451         }
452
453         if (ldlm_is_destroyed(lock)) {
454                 static time64_t next;
455
456                 spin_unlock_bh(&waiting_locks_spinlock);
457                 LDLM_ERROR(lock, "not waiting on destroyed lock (b=5653)");
458                 if (ktime_get_seconds() > next) {
459                         next = ktime_get_seconds() + 14400;
460                         libcfs_debug_dumpstack(NULL);
461                 }
462                 return 0;
463         }
464
465         ldlm_set_waited(lock);
466         lock->l_blast_sent = ktime_get_real_seconds();
467         ret = __ldlm_add_waiting_lock(lock, timeout);
468         if (ret) {
469                 /*
470                  * grab ref on the lock if it has been added to the
471                  * waiting list
472                  */
473                 LDLM_LOCK_GET(lock);
474         }
475         spin_unlock_bh(&waiting_locks_spinlock);
476
477         if (ret)
478                 ldlm_add_blocked_lock(lock);
479
480         LDLM_DEBUG(lock, "%sadding to wait list(timeout: %lld, AT: %s)",
481                    ret == 0 ? "not re-" : "", timeout,
482                    AT_OFF ? "off" : "on");
483         return ret;
484 }
485
486 /**
487  * Remove a lock from the pending list, likely because it had its cancellation
488  * callback arrive without incident.  This adjusts the lock-timeout timer if
489  * needed.  Returns 0 if the lock wasn't pending after all, 1 if it was.
490  * As done by ldlm_del_waiting_lock(), the caller must release the lock
491  * reference when the lock is removed from any list (1 is returned).
492  *
493  * Called with namespace lock held.
494  */
495 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
496 {
497         struct list_head *list_next;
498
499         if (list_empty(&lock->l_pending_chain))
500                 return 0;
501
502         list_next = lock->l_pending_chain.next;
503         if (lock->l_pending_chain.prev == &waiting_locks_list) {
504                 /* Removing the head of the list, adjust timer. */
505                 if (list_next == &waiting_locks_list) {
506                         /* No more, just cancel. */
507                         del_timer(&waiting_locks_timer);
508                 } else {
509                         struct ldlm_lock *next;
510
511                         next = list_entry(list_next, struct ldlm_lock,
512                                           l_pending_chain);
513                         mod_timer(&waiting_locks_timer,
514                                   cfs_time_seconds(next->l_callback_timeout));
515                 }
516         }
517         list_del_init(&lock->l_pending_chain);
518
519         return 1;
520 }
521
522 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
523 {
524         int ret;
525
526         if (lock->l_export == NULL) {
527                 /* We don't have a "waiting locks list" on clients. */
528                 CDEBUG(D_DLMTRACE, "Client lock %p : no-op\n", lock);
529                 return 0;
530         }
531
532         spin_lock_bh(&waiting_locks_spinlock);
533         ret = __ldlm_del_waiting_lock(lock);
534         ldlm_clear_waited(lock);
535         spin_unlock_bh(&waiting_locks_spinlock);
536
537         /* remove the lock out of export blocking list */
538         spin_lock_bh(&lock->l_export->exp_bl_list_lock);
539         list_del_init(&lock->l_exp_list);
540         spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
541
542         if (ret) {
543                 /*
544                  * release lock ref if it has indeed been removed
545                  * from a list
546                  */
547                 LDLM_LOCK_RELEASE(lock);
548         }
549
550         LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
551         return ret;
552 }
553
554 /**
555  * Prolong the contended lock waiting time.
556  *
557  * Called with namespace lock held.
558  */
559 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
560 {
561         if (lock->l_export == NULL) {
562                 /* We don't have a "waiting locks list" on clients. */
563                 LDLM_DEBUG(lock, "client lock: no-op");
564                 return 0;
565         }
566
567         if (exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS) {
568                 /* We don't have a "waiting locks list" on OSP. */
569                 LDLM_DEBUG(lock, "MDS-MDS lock: no-op");
570                 return 0;
571         }
572
573         spin_lock_bh(&waiting_locks_spinlock);
574
575         if (list_empty(&lock->l_pending_chain)) {
576                 spin_unlock_bh(&waiting_locks_spinlock);
577                 LDLM_DEBUG(lock, "wasn't waiting");
578                 return 0;
579         }
580
581         /*
582          * we remove/add the lock to the waiting list, so no needs to
583          * release/take a lock reference
584          */
585         __ldlm_del_waiting_lock(lock);
586         __ldlm_add_waiting_lock(lock, timeout);
587         spin_unlock_bh(&waiting_locks_spinlock);
588
589         LDLM_DEBUG(lock, "refreshed");
590         return 1;
591 }
592 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
593
594 #else /* HAVE_SERVER_SUPPORT */
595
596 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
597 {
598         RETURN(0);
599 }
600
601 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
602 {
603         RETURN(0);
604 }
605
606 #endif /* !HAVE_SERVER_SUPPORT */
607
608 #ifdef HAVE_SERVER_SUPPORT
609
610 /**
611  * Calculate the per-export Blocking timeout (covering BL AST, data flush,
612  * lock cancel, and their replies). Used for lock callback timeout and AST
613  * re-send period.
614  *
615  * \param[in] lock        lock which is getting the blocking callback
616  *
617  * \retval            timeout in seconds to wait for the client reply
618  */
619 time64_t ldlm_bl_timeout(struct ldlm_lock *lock)
620 {
621         time64_t timeout;
622
623         if (AT_OFF)
624                 return obd_timeout / 2;
625
626         /*
627          * Since these are non-updating timeouts, we should be conservative.
628          * Take more than usually, 150%
629          * It would be nice to have some kind of "early reply" mechanism for
630          * lock callbacks too...
631          */
632         timeout = at_get(&lock->l_export->exp_bl_lock_at);
633         return max(timeout + (timeout >> 1), (time64_t)ldlm_enqueue_min);
634 }
635 EXPORT_SYMBOL(ldlm_bl_timeout);
636
637 /**
638  * Perform lock cleanup if AST sending failed.
639  */
640 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
641                             const char *ast_type)
642 {
643         LCONSOLE_ERROR_MSG(0x138,
644                            "%s: A client on nid %s was evicted due to a lock %s callback time out: rc %d\n",
645                            lock->l_export->exp_obd->obd_name,
646                            obd_export_nid2str(lock->l_export), ast_type, rc);
647
648         if (obd_dump_on_timeout)
649                 libcfs_debug_dumplog();
650         spin_lock_bh(&waiting_locks_spinlock);
651         if (__ldlm_del_waiting_lock(lock) == 0)
652                 /*
653                  * the lock was not in any list, grab an extra ref before adding
654                  * the lock to the expired list
655                  */
656                 LDLM_LOCK_GET(lock);
657         lock->l_callback_timeout = 0; /* differentiate it from expired locks */
658         list_add(&lock->l_pending_chain, &expired_lock_list);
659         wake_up(&expired_lock_wait_queue);
660         spin_unlock_bh(&waiting_locks_spinlock);
661 }
662
663 /**
664  * Perform lock cleanup if AST reply came with error.
665  */
666 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
667                                  struct ptlrpc_request *req, int rc,
668                                  const char *ast_type)
669 {
670         struct lnet_process_id peer = req->rq_import->imp_connection->c_peer;
671
672         if (!req->rq_replied || (rc && rc != -EINVAL)) {
673                 if (ldlm_is_cancel(lock)) {
674                         LDLM_DEBUG(lock,
675                                    "%s AST (req@%p x%llu) timeout from nid %s, but cancel was received (AST reply lost?)",
676                                    ast_type, req, req->rq_xid,
677                                    libcfs_nid2str(peer.nid));
678                         ldlm_lock_cancel(lock);
679                         rc = -ERESTART;
680                 } else if (rc == -ENODEV || rc == -ESHUTDOWN ||
681                            (rc == -EIO &&
682                             req->rq_import->imp_state == LUSTRE_IMP_CLOSED)) {
683                         /*
684                          * Upon umount process the AST fails because cannot be
685                          * sent. This shouldn't lead to the client eviction.
686                          * -ENODEV error is returned by ptl_send_rpc() for
687                          *  new request in such import.
688                          * -SHUTDOWN is returned by ptlrpc_import_delay_req()
689                          *  if imp_invalid is set or obd_no_recov.
690                          * Meanwhile there is also check for LUSTRE_IMP_CLOSED
691                          * in ptlrpc_import_delay_req() as well with -EIO code.
692                          * In all such cases errors are ignored.
693                          */
694                         LDLM_DEBUG(lock,
695                                    "%s AST can't be sent due to a server %s failure or umount process: rc = %d\n",
696                                     ast_type,
697                                      req->rq_import->imp_obd->obd_name, rc);
698                 } else {
699                         LDLM_ERROR(lock,
700                                    "client (nid %s) %s %s AST (req@%p x%llu status %d rc %d), evict it",
701                                    libcfs_nid2str(peer.nid),
702                                    req->rq_replied ? "returned error from" :
703                                    "failed to reply to",
704                                    ast_type, req, req->rq_xid,
705                                    (req->rq_repmsg != NULL) ?
706                                    lustre_msg_get_status(req->rq_repmsg) : 0,
707                                    rc);
708                         ldlm_failed_ast(lock, rc, ast_type);
709                 }
710                 return rc;
711         }
712
713         if (rc == -EINVAL) {
714                 struct ldlm_resource *res = lock->l_resource;
715
716                 LDLM_DEBUG(lock,
717                            "client (nid %s) returned %d from %s AST (req@%p x%llu) - normal race",
718                            libcfs_nid2str(peer.nid),
719                            req->rq_repmsg ?
720                            lustre_msg_get_status(req->rq_repmsg) : -1,
721                            ast_type, req, req->rq_xid);
722                 if (res) {
723                         /*
724                          * update lvbo to return proper attributes.
725                          * see b=23174
726                          */
727                         ldlm_resource_getref(res);
728                         ldlm_lvbo_update(res, lock, NULL, 1);
729                         ldlm_resource_putref(res);
730                 }
731                 ldlm_lock_cancel(lock);
732                 rc = -ERESTART;
733         }
734
735         return rc;
736 }
737
738 static int ldlm_cb_interpret(const struct lu_env *env,
739                              struct ptlrpc_request *req, void *args, int rc)
740 {
741         struct ldlm_cb_async_args *ca = args;
742         struct ldlm_lock *lock = ca->ca_lock;
743         struct ldlm_cb_set_arg *arg  = ca->ca_set_arg;
744
745         ENTRY;
746
747         LASSERT(lock != NULL);
748
749         switch (arg->type) {
750         case LDLM_GL_CALLBACK:
751                 /*
752                  * Update the LVB from disk if the AST failed
753                  * (this is a legal race)
754                  *
755                  * - Glimpse callback of local lock just returns
756                  *   -ELDLM_NO_LOCK_DATA.
757                  * - Glimpse callback of remote lock might return
758                  *   -ELDLM_NO_LOCK_DATA when inode is cleared. LU-274
759                  */
760                 if (unlikely(arg->gl_interpret_reply)) {
761                         rc = arg->gl_interpret_reply(NULL, req, args, rc);
762                 } else if (rc == -ELDLM_NO_LOCK_DATA) {
763                         LDLM_DEBUG(lock,
764                                    "lost race - client has a lock but no inode");
765                         ldlm_lvbo_update(lock->l_resource, lock, NULL, 1);
766                 } else if (rc != 0) {
767                         rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
768                 } else {
769                         rc = ldlm_lvbo_update(lock->l_resource,
770                                               lock, req, 1);
771                 }
772                 break;
773         case LDLM_BL_CALLBACK:
774                 if (rc != 0)
775                         rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
776                 break;
777         case LDLM_CP_CALLBACK:
778                 if (rc != 0)
779                         rc = ldlm_handle_ast_error(lock, req, rc, "completion");
780                 break;
781         default:
782                 LDLM_ERROR(lock, "invalid opcode for lock callback %d",
783                            arg->type);
784                 LBUG();
785         }
786
787         /* release extra reference taken in ldlm_ast_fini() */
788         LDLM_LOCK_RELEASE(lock);
789
790         if (rc == -ERESTART)
791                 atomic_inc(&arg->restart);
792
793         RETURN(0);
794 }
795
796 static void ldlm_update_resend(struct ptlrpc_request *req, void *data)
797 {
798         struct ldlm_cb_async_args *ca = data;
799         struct ldlm_lock *lock = ca->ca_lock;
800
801         ldlm_refresh_waiting_lock(lock, ldlm_bl_timeout(lock));
802 }
803
804 static inline int ldlm_ast_fini(struct ptlrpc_request *req,
805                                 struct ldlm_cb_set_arg *arg,
806                                 struct ldlm_lock *lock,
807                                 int instant_cancel)
808 {
809         int rc = 0;
810
811         ENTRY;
812
813         if (unlikely(instant_cancel)) {
814                 rc = ptl_send_rpc(req, 1);
815                 ptlrpc_req_finished(req);
816                 if (rc == 0)
817                         atomic_inc(&arg->restart);
818         } else {
819                 LDLM_LOCK_GET(lock);
820                 ptlrpc_set_add_req(arg->set, req);
821         }
822
823         RETURN(rc);
824 }
825
826 /**
827  * Check if there are requests in the export request list which prevent
828  * the lock canceling and make these requests high priority ones.
829  */
830 static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
831 {
832         struct ptlrpc_request *req;
833
834         ENTRY;
835
836         if (lock->l_export == NULL) {
837                 LDLM_DEBUG(lock, "client lock: no-op");
838                 RETURN_EXIT;
839         }
840
841         spin_lock(&lock->l_export->exp_rpc_lock);
842         list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
843                             rq_exp_list) {
844                 /*
845                  * Do not process requests that were not yet added to there
846                  * incoming queue or were already removed from there for
847                  * processing. We evaluate ptlrpc_nrs_req_can_move() without
848                  * holding svcpt->scp_req_lock, and then redo the check with
849                  * the lock held once we need to obtain a reliable result.
850                  */
851                 if (ptlrpc_nrs_req_can_move(req) &&
852                     req->rq_ops->hpreq_lock_match &&
853                     req->rq_ops->hpreq_lock_match(req, lock))
854                         ptlrpc_nrs_req_hp_move(req);
855         }
856         spin_unlock(&lock->l_export->exp_rpc_lock);
857         EXIT;
858 }
859
860 /**
861  * ->l_blocking_ast() method for server-side locks. This is invoked when newly
862  * enqueued server lock conflicts with given one.
863  *
864  * Sends blocking AST RPC to the client owning that lock; arms timeout timer
865  * to wait for client response.
866  */
867 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
868                              struct ldlm_lock_desc *desc,
869                              void *data, int flag)
870 {
871         struct ldlm_cb_async_args *ca;
872         struct ldlm_cb_set_arg *arg = data;
873         struct ldlm_request *body;
874         struct ptlrpc_request  *req;
875         int instant_cancel = 0;
876         int rc = 0;
877
878         ENTRY;
879
880         if (flag == LDLM_CB_CANCELING)
881                 /* Don't need to do anything here. */
882                 RETURN(0);
883
884         if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_SRV_BL_AST)) {
885                 LDLM_DEBUG(lock, "dropping BL AST");
886                 RETURN(0);
887         }
888
889         LASSERT(lock);
890         LASSERT(data != NULL);
891         if (lock->l_export->exp_obd->obd_recovering != 0)
892                 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
893
894         ldlm_lock_reorder_req(lock);
895
896         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
897                                         &RQF_LDLM_BL_CALLBACK,
898                                         LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
899         if (req == NULL)
900                 RETURN(-ENOMEM);
901
902         ca = ptlrpc_req_async_args(ca, req);
903         ca->ca_set_arg = arg;
904         ca->ca_lock = lock;
905
906         req->rq_interpret_reply = ldlm_cb_interpret;
907
908         lock_res_and_lock(lock);
909         if (ldlm_is_destroyed(lock)) {
910                 /* What's the point? */
911                 unlock_res_and_lock(lock);
912                 ptlrpc_req_finished(req);
913                 RETURN(0);
914         }
915
916         if (!ldlm_is_granted(lock)) {
917                 /*
918                  * this blocking AST will be communicated as part of the
919                  * completion AST instead
920                  */
921                 ldlm_add_blocked_lock(lock);
922                 ldlm_set_waited(lock);
923                 unlock_res_and_lock(lock);
924
925                 ptlrpc_req_finished(req);
926                 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
927                 RETURN(0);
928         }
929
930         if (ldlm_is_cancel_on_block(lock))
931                 instant_cancel = 1;
932
933         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
934         body->lock_handle[0] = lock->l_remote_handle;
935         body->lock_desc = *desc;
936         body->lock_flags |= ldlm_flags_to_wire(lock->l_flags & LDLM_FL_AST_MASK);
937
938         LDLM_DEBUG(lock, "server preparing blocking AST");
939
940         ptlrpc_request_set_replen(req);
941         ldlm_set_cbpending(lock);
942         if (instant_cancel) {
943                 unlock_res_and_lock(lock);
944                 ldlm_lock_cancel(lock);
945
946                 req->rq_no_resend = 1;
947         } else {
948                 LASSERT(ldlm_is_granted(lock));
949                 ldlm_add_waiting_lock(lock, ldlm_bl_timeout(lock));
950                 unlock_res_and_lock(lock);
951
952                 /* Do not resend after lock callback timeout */
953                 req->rq_delay_limit = ldlm_bl_timeout(lock);
954                 req->rq_resend_cb = ldlm_update_resend;
955         }
956
957         req->rq_send_state = LUSTRE_IMP_FULL;
958         /* ptlrpc_request_alloc_pack already set timeout */
959         if (AT_OFF)
960                 req->rq_timeout = ldlm_get_rq_timeout();
961
962         if (lock->l_export && lock->l_export->exp_nid_stats &&
963             lock->l_export->exp_nid_stats->nid_ldlm_stats)
964                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
965                                      LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
966
967         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
968
969         RETURN(rc);
970 }
971
972 /**
973  * ->l_completion_ast callback for a remote lock in server namespace.
974  *
975  *  Sends AST to the client notifying it of lock granting.  If initial
976  *  lock response was not sent yet, instead of sending another RPC, just
977  *  mark the lock as granted and client will understand
978  */
979 int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
980 {
981         struct ldlm_cb_set_arg *arg = data;
982         struct ldlm_request *body;
983         struct ptlrpc_request *req;
984         struct ldlm_cb_async_args *ca;
985         int instant_cancel = 0;
986         int rc = 0;
987         int lvb_len;
988
989         ENTRY;
990
991         LASSERT(lock != NULL);
992         LASSERT(data != NULL);
993
994         if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_SRV_CP_AST)) {
995                 LDLM_DEBUG(lock, "dropping CP AST");
996                 RETURN(0);
997         }
998
999         req = ptlrpc_request_alloc(lock->l_export->exp_imp_reverse,
1000                                    &RQF_LDLM_CP_CALLBACK);
1001         if (req == NULL)
1002                 RETURN(-ENOMEM);
1003
1004         /* server namespace, doesn't need lock */
1005         lvb_len = ldlm_lvbo_size(lock);
1006         /*
1007          * LU-3124 & LU-2187: to not return layout in completion AST because
1008          * it may deadlock for LU-2187, or client may not have enough space
1009          * for large layout. The layout will be returned to client with an
1010          * extra RPC to fetch xattr.lov
1011          */
1012         if (ldlm_has_layout(lock))
1013                 lvb_len = 0;
1014
1015         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT, lvb_len);
1016         rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
1017         if (rc) {
1018                 ptlrpc_request_free(req);
1019                 RETURN(rc);
1020         }
1021
1022         ca = ptlrpc_req_async_args(ca, req);
1023         ca->ca_set_arg = arg;
1024         ca->ca_lock = lock;
1025
1026         req->rq_interpret_reply = ldlm_cb_interpret;
1027         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1028
1029         body->lock_handle[0] = lock->l_remote_handle;
1030         body->lock_flags = ldlm_flags_to_wire(flags);
1031         ldlm_lock2desc(lock, &body->lock_desc);
1032         if (lvb_len > 0) {
1033                 void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
1034                 lvb_len = ldlm_lvbo_fill(lock, lvb, &lvb_len);
1035                 if (lvb_len < 0) {
1036                         /*
1037                          * We still need to send the RPC to wake up the blocked
1038                          * enqueue thread on the client.
1039                          *
1040                          * Consider old client, there is no better way to notify
1041                          * the failure, just zero-sized the LVB, then the client
1042                          * will fail out as "-EPROTO".
1043                          */
1044                         req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB, 0,
1045                                            RCL_CLIENT);
1046                         instant_cancel = 1;
1047                 } else {
1048                         req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB, lvb_len,
1049                                            RCL_CLIENT);
1050                 }
1051         }
1052
1053         LDLM_DEBUG(lock, "server preparing completion AST");
1054
1055         ptlrpc_request_set_replen(req);
1056
1057         req->rq_send_state = LUSTRE_IMP_FULL;
1058         /* ptlrpc_request_pack already set timeout */
1059         if (AT_OFF)
1060                 req->rq_timeout = ldlm_get_rq_timeout();
1061
1062         /* We only send real blocking ASTs after the lock is granted */
1063         lock_res_and_lock(lock);
1064         if (ldlm_is_ast_sent(lock)) {
1065                 body->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
1066                 /* Copy AST flags like LDLM_FL_DISCARD_DATA. */
1067                 body->lock_flags |= ldlm_flags_to_wire(lock->l_flags &
1068                                                        LDLM_FL_AST_MASK);
1069
1070                 /*
1071                  * We might get here prior to ldlm_handle_enqueue setting
1072                  * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
1073                  * into waiting list, but this is safe and similar code in
1074                  * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
1075                  * that would not only cancel the lock, but will also remove
1076                  * it from waiting list
1077                  */
1078                 if (ldlm_is_cancel_on_block(lock)) {
1079                         unlock_res_and_lock(lock);
1080                         ldlm_lock_cancel(lock);
1081
1082                         instant_cancel = 1;
1083                         req->rq_no_resend = 1;
1084
1085                         lock_res_and_lock(lock);
1086                 } else {
1087                         /* start the lock-timeout clock */
1088                         ldlm_add_waiting_lock(lock, ldlm_bl_timeout(lock));
1089                         /* Do not resend after lock callback timeout */
1090                         req->rq_delay_limit = ldlm_bl_timeout(lock);
1091                         req->rq_resend_cb = ldlm_update_resend;
1092                 }
1093         }
1094         unlock_res_and_lock(lock);
1095
1096         if (lock->l_export && lock->l_export->exp_nid_stats &&
1097             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1098                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1099                                      LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
1100
1101         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
1102
1103         RETURN(lvb_len < 0 ? lvb_len : rc);
1104 }
1105
1106 /**
1107  * Server side ->l_glimpse_ast handler for client locks.
1108  *
1109  * Sends glimpse AST to the client and waits for reply. Then updates
1110  * lvbo with the result.
1111  */
1112 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
1113 {
1114         struct ldlm_cb_set_arg *arg = data;
1115         struct ldlm_request *body;
1116         struct ptlrpc_request *req;
1117         struct ldlm_cb_async_args *ca;
1118         int rc;
1119         struct req_format *req_fmt;
1120
1121         ENTRY;
1122
1123         LASSERT(lock != NULL);
1124
1125         if (arg->gl_desc != NULL)
1126                 /* There is a glimpse descriptor to pack */
1127                 req_fmt = &RQF_LDLM_GL_CALLBACK_DESC;
1128         else
1129                 req_fmt = &RQF_LDLM_GL_CALLBACK;
1130
1131         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
1132                                         req_fmt, LUSTRE_DLM_VERSION,
1133                                         LDLM_GL_CALLBACK);
1134
1135         if (req == NULL)
1136                 RETURN(-ENOMEM);
1137
1138         if (arg->gl_desc != NULL) {
1139                 /* copy the GL descriptor */
1140                 union ldlm_gl_desc      *desc;
1141
1142                 desc = req_capsule_client_get(&req->rq_pill, &RMF_DLM_GL_DESC);
1143                 *desc = *arg->gl_desc;
1144         }
1145
1146         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1147         body->lock_handle[0] = lock->l_remote_handle;
1148         ldlm_lock2desc(lock, &body->lock_desc);
1149
1150         ca = ptlrpc_req_async_args(ca, req);
1151         ca->ca_set_arg = arg;
1152         ca->ca_lock = lock;
1153
1154         /* server namespace, doesn't need lock */
1155         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
1156                              ldlm_lvbo_size(lock));
1157         ptlrpc_request_set_replen(req);
1158
1159         req->rq_send_state = LUSTRE_IMP_FULL;
1160         /* ptlrpc_request_alloc_pack already set timeout */
1161         if (AT_OFF)
1162                 req->rq_timeout = ldlm_get_rq_timeout();
1163
1164         req->rq_interpret_reply = ldlm_cb_interpret;
1165
1166         if (lock->l_export && lock->l_export->exp_nid_stats) {
1167                 struct nid_stat *nid_stats = lock->l_export->exp_nid_stats;
1168
1169                 lprocfs_counter_incr(nid_stats->nid_ldlm_stats,
1170                                      LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
1171         }
1172
1173         rc = ldlm_ast_fini(req, arg, lock, 0);
1174
1175         RETURN(rc);
1176 }
1177 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
1178
1179 int ldlm_glimpse_locks(struct ldlm_resource *res,
1180                        struct list_head *gl_work_list)
1181 {
1182         int rc;
1183
1184         ENTRY;
1185
1186         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), gl_work_list,
1187                                LDLM_WORK_GL_AST);
1188         if (rc == -ERESTART)
1189                 ldlm_reprocess_all(res, NULL);
1190
1191         RETURN(rc);
1192 }
1193 EXPORT_SYMBOL(ldlm_glimpse_locks);
1194
1195 /* return LDLM lock associated with a lock callback request */
1196 struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req)
1197 {
1198         struct ldlm_cb_async_args *ca;
1199         struct ldlm_lock *lock;
1200
1201         ENTRY;
1202
1203         ca = ptlrpc_req_async_args(ca, req);
1204         lock = ca->ca_lock;
1205         if (lock == NULL)
1206                 RETURN(ERR_PTR(-EFAULT));
1207
1208         RETURN(lock);
1209 }
1210 EXPORT_SYMBOL(ldlm_request_lock);
1211
1212 /**
1213  * Main server-side entry point into LDLM for enqueue. This is called by ptlrpc
1214  * service threads to carry out client lock enqueueing requests.
1215  */
1216 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
1217                          struct ptlrpc_request *req,
1218                          const struct ldlm_request *dlm_req,
1219                          const struct ldlm_callback_suite *cbs)
1220 {
1221         struct ldlm_reply *dlm_rep;
1222         __u64 flags;
1223         enum ldlm_error err = ELDLM_OK;
1224         struct ldlm_lock *lock = NULL;
1225         void *cookie = NULL;
1226         int rc = 0;
1227         struct ldlm_resource *res = NULL;
1228         const struct lu_env *env = req->rq_svc_thread->t_env;
1229
1230         ENTRY;
1231
1232         LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
1233
1234         ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF, LATF_SKIP);
1235         flags = ldlm_flags_from_wire(dlm_req->lock_flags);
1236
1237         LASSERT(req->rq_export);
1238
1239         /* for intent enqueue the stat will be updated inside intent policy */
1240         if (ptlrpc_req2svc(req)->srv_stats != NULL &&
1241             !(dlm_req->lock_flags & LDLM_FL_HAS_INTENT))
1242                 ldlm_svc_get_eopc(dlm_req, ptlrpc_req2svc(req)->srv_stats);
1243
1244         if (req->rq_export && req->rq_export->exp_nid_stats &&
1245             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1246                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1247                                      LDLM_ENQUEUE - LDLM_FIRST_OPC);
1248
1249         if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
1250                      dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
1251                 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
1252                           dlm_req->lock_desc.l_resource.lr_type);
1253                 GOTO(out, rc = -EFAULT);
1254         }
1255
1256         if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
1257                      dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
1258                      dlm_req->lock_desc.l_req_mode &
1259                      (dlm_req->lock_desc.l_req_mode-1))) {
1260                 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
1261                           dlm_req->lock_desc.l_req_mode);
1262                 GOTO(out, rc = -EFAULT);
1263         }
1264
1265         if (unlikely((flags & LDLM_FL_REPLAY) ||
1266                      (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))) {
1267                 /* Find an existing lock in the per-export lock hash */
1268                 /*
1269                  * In the function below, .hs_keycmp resolves to
1270                  * ldlm_export_lock_keycmp()
1271                  */
1272                 /* coverity[overrun-buffer-val] */
1273                 lock = cfs_hash_lookup(req->rq_export->exp_lock_hash,
1274                                        (void *)&dlm_req->lock_handle[0]);
1275                 if (lock != NULL) {
1276                         DEBUG_REQ(D_DLMTRACE, req,
1277                                   "found existing lock cookie %#llx",
1278                                   lock->l_handle.h_cookie);
1279                         flags |= LDLM_FL_RESENT;
1280                         GOTO(existing_lock, rc = 0);
1281                 }
1282         } else {
1283                 if (ldlm_reclaim_full()) {
1284                         DEBUG_REQ(D_DLMTRACE, req,
1285                                   "Too many granted locks, reject current enqueue request and let the client retry later.\n");
1286                         GOTO(out, rc = -EINPROGRESS);
1287                 }
1288         }
1289
1290         /* The lock's callback data might be set in the policy function */
1291         lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
1292                                 dlm_req->lock_desc.l_resource.lr_type,
1293                                 dlm_req->lock_desc.l_req_mode,
1294                                 cbs, NULL, 0, LVB_T_NONE);
1295         if (IS_ERR(lock)) {
1296                 rc = PTR_ERR(lock);
1297                 lock = NULL;
1298                 GOTO(out, rc);
1299         }
1300
1301         lock->l_remote_handle = dlm_req->lock_handle[0];
1302         LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
1303
1304         /*
1305          * Initialize resource lvb but not for a lock being replayed since
1306          * Client already got lvb sent in this case.
1307          * This must occur early since some policy methods assume resource
1308          * lvb is available (lr_lvb_data != NULL).
1309          */
1310         res = lock->l_resource;
1311         if (!(flags & LDLM_FL_REPLAY)) {
1312                 /* non-replayed lock, delayed lvb init may need to be done */
1313                 rc = ldlm_lvbo_init(res);
1314                 if (rc < 0) {
1315                         LDLM_DEBUG(lock, "delayed lvb init failed (rc %d)", rc);
1316                         GOTO(out, rc);
1317                 }
1318         }
1319
1320         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
1321         /*
1322          * Don't enqueue a lock onto the export if it is been disonnected
1323          * due to eviction (b=3822) or server umount (b=24324).
1324          * Cancel it now instead.
1325          */
1326         if (req->rq_export->exp_disconnected) {
1327                 LDLM_ERROR(lock, "lock on disconnected export %p",
1328                            req->rq_export);
1329                 GOTO(out, rc = -ENOTCONN);
1330         }
1331
1332         lock->l_export = class_export_lock_get(req->rq_export, lock);
1333         if (lock->l_export->exp_lock_hash)
1334                 cfs_hash_add(lock->l_export->exp_lock_hash,
1335                              &lock->l_remote_handle,
1336                              &lock->l_exp_hash);
1337
1338         /*
1339          * Inherit the enqueue flags before the operation, because we do not
1340          * keep the res lock on return and next operations (BL AST) may proceed
1341          * without them.
1342          */
1343         lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
1344                                               LDLM_FL_INHERIT_MASK);
1345
1346         ldlm_convert_policy_to_local(req->rq_export,
1347                                      dlm_req->lock_desc.l_resource.lr_type,
1348                                      &dlm_req->lock_desc.l_policy_data,
1349                                      &lock->l_policy_data);
1350         if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
1351                 lock->l_req_extent = lock->l_policy_data.l_extent;
1352
1353 existing_lock:
1354         if (flags & LDLM_FL_HAS_INTENT) {
1355                 /*
1356                  * In this case, the reply buffer is allocated deep in
1357                  * local_lock_enqueue by the policy function.
1358                  */
1359                 cookie = req;
1360         } else {
1361                 /*
1362                  * based on the assumption that lvb size never changes during
1363                  * resource life time otherwise it need resource->lr_lock's
1364                  * protection
1365                  */
1366                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
1367                                      RCL_SERVER, ldlm_lvbo_size(lock));
1368
1369                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
1370                         GOTO(out, rc = -ENOMEM);
1371
1372                 rc = req_capsule_server_pack(&req->rq_pill);
1373                 if (rc)
1374                         GOTO(out, rc);
1375         }
1376
1377         err = ldlm_lock_enqueue(env, ns, &lock, cookie, &flags);
1378         if (err) {
1379                 if ((int)err < 0)
1380                         rc = (int)err;
1381                 GOTO(out, err);
1382         }
1383
1384         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1385
1386         ldlm_lock2desc(lock, &dlm_rep->lock_desc);
1387         ldlm_lock2handle(lock, &dlm_rep->lock_handle);
1388
1389         if (lock && lock->l_resource->lr_type == LDLM_EXTENT)
1390                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_BL_EVICT, 6);
1391
1392         /*
1393          * We never send a blocking AST until the lock is granted, but
1394          * we can tell it right now
1395          */
1396         lock_res_and_lock(lock);
1397
1398         /*
1399          * Now take into account flags to be inherited from original lock
1400          * request both in reply to client and in our own lock flags.
1401          */
1402         dlm_rep->lock_flags = ldlm_flags_to_wire(flags);
1403         lock->l_flags |= flags & LDLM_FL_INHERIT_MASK;
1404
1405         /*
1406          * Don't move a pending lock onto the export if it has already been
1407          * disconnected due to eviction (b=5683) or server umount (b=24324).
1408          * Cancel it now instead.
1409          */
1410         if (unlikely(req->rq_export->exp_disconnected ||
1411                      OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
1412                 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
1413                 rc = -ENOTCONN;
1414         } else if (ldlm_is_ast_sent(lock)) {
1415                 /* fill lock desc for possible lock convert */
1416                 if (lock->l_blocking_lock &&
1417                     lock->l_resource->lr_type == LDLM_IBITS) {
1418                         struct ldlm_lock *bl_lock = lock->l_blocking_lock;
1419                         struct ldlm_lock_desc *rep_desc = &dlm_rep->lock_desc;
1420
1421                         LDLM_DEBUG(lock,
1422                                    "save blocking bits %llx in granted lock",
1423                                    bl_lock->l_policy_data.l_inodebits.bits);
1424                         /*
1425                          * If lock is blocked then save blocking ibits
1426                          * in returned lock policy for the possible lock
1427                          * convert on a client.
1428                          */
1429                         rep_desc->l_policy_data.l_inodebits.cancel_bits =
1430                                 bl_lock->l_policy_data.l_inodebits.bits;
1431                 }
1432                 dlm_rep->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
1433                 if (ldlm_is_granted(lock)) {
1434                         /*
1435                          * Only cancel lock if it was granted, because it would
1436                          * be destroyed immediately and would never be granted
1437                          * in the future, causing timeouts on client.  Not
1438                          * granted lock will be cancelled immediately after
1439                          * sending completion AST.
1440                          */
1441                         if (ldlm_is_cancel_on_block(lock)) {
1442                                 unlock_res_and_lock(lock);
1443                                 ldlm_lock_cancel(lock);
1444                                 lock_res_and_lock(lock);
1445                         } else {
1446                                 ldlm_add_waiting_lock(lock,
1447                                                       ldlm_bl_timeout(lock));
1448                         }
1449                 }
1450         }
1451         unlock_res_and_lock(lock);
1452
1453         EXIT;
1454 out:
1455         req->rq_status = rc ?: err; /* return either error - b=11190 */
1456         if (!req->rq_packed_final) {
1457                 err = lustre_pack_reply(req, 1, NULL, NULL);
1458                 if (rc == 0)
1459                         rc = err;
1460         }
1461
1462         /*
1463          * The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1464          * ldlm_reprocess_all.  If this moves, revisit that code. -phil
1465          */
1466         if (lock != NULL) {
1467                 LDLM_DEBUG(lock,
1468                            "server-side enqueue handler, sending reply (err=%d, rc=%d)",
1469                            err, rc);
1470
1471                 if (rc == 0 &&
1472                     req_capsule_has_field(&req->rq_pill, &RMF_DLM_LVB,
1473                                           RCL_SERVER) &&
1474                     ldlm_lvbo_size(lock) > 0) {
1475                         void *buf;
1476                         int buflen;
1477
1478 retry:
1479                         buf = req_capsule_server_get(&req->rq_pill,
1480                                                      &RMF_DLM_LVB);
1481                         LASSERTF(buf != NULL, "req %p, lock %p\n", req, lock);
1482                         buflen = req_capsule_get_size(&req->rq_pill,
1483                                         &RMF_DLM_LVB, RCL_SERVER);
1484                         /*
1485                          * non-replayed lock, delayed lvb init may
1486                          * need to be occur now
1487                          */
1488                         if ((buflen > 0) && !(flags & LDLM_FL_REPLAY)) {
1489                                 int rc2;
1490
1491                                 rc2 = ldlm_lvbo_fill(lock, buf, &buflen);
1492                                 if (rc2 >= 0) {
1493                                         req_capsule_shrink(&req->rq_pill,
1494                                                            &RMF_DLM_LVB,
1495                                                            rc2, RCL_SERVER);
1496                                 } else if (rc2 == -ERANGE) {
1497                                         rc2 = req_capsule_server_grow(
1498                                                         &req->rq_pill,
1499                                                         &RMF_DLM_LVB, buflen);
1500                                         if (!rc2) {
1501                                                 goto retry;
1502                                         } else {
1503                                                 /*
1504                                                  * if we can't grow the buffer,
1505                                                  * it's ok to return empty lvb
1506                                                  * to client.
1507                                                  */
1508                                                 req_capsule_shrink(
1509                                                         &req->rq_pill,
1510                                                         &RMF_DLM_LVB, 0,
1511                                                         RCL_SERVER);
1512                                         }
1513                                 } else {
1514                                         rc = rc2;
1515                                 }
1516                         } else if (flags & LDLM_FL_REPLAY) {
1517                                 /* no LVB resend upon replay */
1518                                 if (buflen > 0)
1519                                         req_capsule_shrink(&req->rq_pill,
1520                                                            &RMF_DLM_LVB,
1521                                                            0, RCL_SERVER);
1522                                 else
1523                                         rc = buflen;
1524                         } else {
1525                                 rc = buflen;
1526                         }
1527                 }
1528
1529                 if (rc != 0 && !(flags & LDLM_FL_RESENT)) {
1530                         if (lock->l_export) {
1531                                 ldlm_lock_cancel(lock);
1532                         } else {
1533                                 lock_res_and_lock(lock);
1534                                 ldlm_resource_unlink_lock(lock);
1535                                 ldlm_lock_destroy_nolock(lock);
1536                                 unlock_res_and_lock(lock);
1537
1538                         }
1539                 }
1540
1541                 if (!err && !ldlm_is_cbpending(lock) &&
1542                     dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1543                         ldlm_reprocess_all(lock->l_resource, lock);
1544
1545                 LDLM_LOCK_RELEASE(lock);
1546         }
1547
1548         LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1549                           lock, rc);
1550
1551         return rc;
1552 }
1553
1554 /*
1555  * Clear the blocking lock, the race is possible between ldlm_handle_convert0()
1556  * and ldlm_work_bl_ast_lock(), so this is done under lock with check for NULL.
1557  */
1558 void ldlm_clear_blocking_lock(struct ldlm_lock *lock)
1559 {
1560         if (lock->l_blocking_lock) {
1561                 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
1562                 lock->l_blocking_lock = NULL;
1563         }
1564 }
1565
1566 /* A lock can be converted to new ibits or mode and should be considered
1567  * as new lock. Clear all states related to a previous blocking AST
1568  * processing so new conflicts will cause new blocking ASTs.
1569  *
1570  * This is used during lock convert below and lock downgrade to COS mode in
1571  * ldlm_lock_mode_downgrade().
1572  */
1573 void ldlm_clear_blocking_data(struct ldlm_lock *lock)
1574 {
1575         ldlm_clear_ast_sent(lock);
1576         lock->l_bl_ast_run = 0;
1577         ldlm_clear_blocking_lock(lock);
1578 }
1579
1580 /**
1581  * Main LDLM entry point for server code to process lock conversion requests.
1582  */
1583 int ldlm_handle_convert0(struct ptlrpc_request *req,
1584                          const struct ldlm_request *dlm_req)
1585 {
1586         struct obd_export *exp = req->rq_export;
1587         struct ldlm_reply *dlm_rep;
1588         struct ldlm_lock *lock;
1589         int rc;
1590
1591         ENTRY;
1592
1593         if (exp && exp->exp_nid_stats && exp->exp_nid_stats->nid_ldlm_stats)
1594                 lprocfs_counter_incr(exp->exp_nid_stats->nid_ldlm_stats,
1595                                      LDLM_CONVERT - LDLM_FIRST_OPC);
1596
1597         rc = req_capsule_server_pack(&req->rq_pill);
1598         if (rc)
1599                 RETURN(rc);
1600
1601         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1602         dlm_rep->lock_flags = dlm_req->lock_flags;
1603
1604         lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1605         if (lock) {
1606                 __u64 bits;
1607                 __u64 new;
1608
1609                 bits = lock->l_policy_data.l_inodebits.bits;
1610                 new = dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
1611                 LDLM_DEBUG(lock, "server-side convert handler START");
1612
1613                 if (ldlm_is_cancel(lock)) {
1614                         LDLM_ERROR(lock, "convert on canceled lock!");
1615                         rc = ELDLM_NO_LOCK_DATA;
1616                 } else if (dlm_req->lock_desc.l_req_mode !=
1617                            lock->l_granted_mode) {
1618                         LDLM_ERROR(lock, "lock mode differs!");
1619                         rc = ELDLM_NO_LOCK_DATA;
1620                 } else if (bits == new) {
1621                         /*
1622                          * This can be valid situation if CONVERT RPCs are
1623                          * re-ordered. Just finish silently
1624                          */
1625                         LDLM_DEBUG(lock, "lock is converted already!");
1626                         rc = ELDLM_OK;
1627                 } else {
1628                         lock_res_and_lock(lock);
1629                         if (ldlm_is_waited(lock))
1630                                 ldlm_del_waiting_lock(lock);
1631
1632                         ldlm_clear_cbpending(lock);
1633                         lock->l_policy_data.l_inodebits.cancel_bits = 0;
1634                         ldlm_inodebits_drop(lock, bits & ~new);
1635
1636                         ldlm_clear_blocking_data(lock);
1637                         unlock_res_and_lock(lock);
1638
1639                         ldlm_reprocess_all(lock->l_resource, NULL);
1640                         rc = ELDLM_OK;
1641                 }
1642
1643                 if (rc == ELDLM_OK) {
1644                         dlm_rep->lock_handle = lock->l_remote_handle;
1645                         ldlm_ibits_policy_local_to_wire(&lock->l_policy_data,
1646                                         &dlm_rep->lock_desc.l_policy_data);
1647                 }
1648
1649                 LDLM_DEBUG(lock, "server-side convert handler END, rc = %d",
1650                            rc);
1651                 LDLM_LOCK_PUT(lock);
1652         } else {
1653                 rc = ELDLM_NO_LOCK_DATA;
1654                 LDLM_DEBUG_NOLOCK("server-side convert handler END, rc = %d",
1655                                   rc);
1656         }
1657
1658         req->rq_status = rc;
1659
1660         RETURN(0);
1661 }
1662
1663 /**
1664  * Cancel all the locks whose handles are packed into ldlm_request
1665  *
1666  * Called by server code expecting such combined cancel activity
1667  * requests.
1668  */
1669 int ldlm_request_cancel(struct ptlrpc_request *req,
1670                         const struct ldlm_request *dlm_req,
1671                         int first, enum lustre_at_flags flags)
1672 {
1673         struct ldlm_resource *res, *pres = NULL;
1674         struct ldlm_lock *lock;
1675         int i, count, done = 0;
1676
1677         ENTRY;
1678
1679         count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1680         if (first >= count)
1681                 RETURN(0);
1682
1683         if (count == 1 && dlm_req->lock_handle[0].cookie == 0)
1684                 RETURN(0);
1685
1686         /*
1687          * There is no lock on the server at the replay time,
1688          * skip lock cancelling to make replay tests to pass.
1689          */
1690         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1691                 RETURN(0);
1692
1693         LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, starting at %d",
1694                           count, first);
1695
1696         for (i = first; i < count; i++) {
1697                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1698                 if (!lock) {
1699                         LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock (cookie %llu)",
1700                                           dlm_req->lock_handle[i].cookie);
1701                         continue;
1702                 }
1703
1704                 res = lock->l_resource;
1705                 done++;
1706
1707                 /*
1708                  * This code is an optimization to only attempt lock
1709                  * granting on the resource (that could be CPU-expensive)
1710                  * after we are done cancelling lock in that resource.
1711                  */
1712                 if (res != pres) {
1713                         if (pres != NULL) {
1714                                 ldlm_reprocess_all(pres, NULL);
1715                                 LDLM_RESOURCE_DELREF(pres);
1716                                 ldlm_resource_putref(pres);
1717                         }
1718                         if (res != NULL) {
1719                                 ldlm_resource_getref(res);
1720                                 LDLM_RESOURCE_ADDREF(res);
1721
1722                                 if (!ldlm_is_discard_data(lock))
1723                                         ldlm_lvbo_update(res, lock,
1724                                                          NULL, 1);
1725                         }
1726                         pres = res;
1727                 }
1728
1729                 if ((flags & LATF_STATS) && ldlm_is_ast_sent(lock) &&
1730                     lock->l_blast_sent != 0) {
1731                         time64_t delay = ktime_get_real_seconds() -
1732                                          lock->l_blast_sent;
1733                         LDLM_DEBUG(lock,
1734                                    "server cancels blocked lock after %llds",
1735                                    (s64)delay);
1736                         at_measured(&lock->l_export->exp_bl_lock_at, delay);
1737                 }
1738                 ldlm_lock_cancel(lock);
1739                 LDLM_LOCK_PUT(lock);
1740         }
1741         if (pres != NULL) {
1742                 ldlm_reprocess_all(pres, NULL);
1743                 LDLM_RESOURCE_DELREF(pres);
1744                 ldlm_resource_putref(pres);
1745         }
1746         LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1747         RETURN(done);
1748 }
1749 EXPORT_SYMBOL(ldlm_request_cancel);
1750
1751 /**
1752  * Main LDLM entry point for server code to cancel locks.
1753  *
1754  * Typically gets called from service handler on LDLM_CANCEL opc.
1755  */
1756 int ldlm_handle_cancel(struct ptlrpc_request *req)
1757 {
1758         struct ldlm_request *dlm_req;
1759         int rc;
1760
1761         ENTRY;
1762
1763         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1764         if (dlm_req == NULL) {
1765                 CDEBUG(D_INFO, "bad request buffer for cancel\n");
1766                 RETURN(-EFAULT);
1767         }
1768
1769         if (req->rq_export && req->rq_export->exp_nid_stats &&
1770             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1771                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1772                                      LDLM_CANCEL - LDLM_FIRST_OPC);
1773
1774         rc = req_capsule_server_pack(&req->rq_pill);
1775         if (rc)
1776                 RETURN(rc);
1777
1778         if (!ldlm_request_cancel(req, dlm_req, 0, LATF_STATS))
1779                 req->rq_status = LUSTRE_ESTALE;
1780
1781         RETURN(ptlrpc_reply(req));
1782 }
1783 #endif /* HAVE_SERVER_SUPPORT */
1784
1785 /**
1786  * Callback handler for receiving incoming blocking ASTs.
1787  *
1788  * This can only happen on client side.
1789  */
1790 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1791                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1792 {
1793         int do_ast;
1794
1795         ENTRY;
1796
1797         LDLM_DEBUG(lock, "client blocking AST callback handler");
1798
1799         lock_res_and_lock(lock);
1800
1801         /* set bits to cancel for this lock for possible lock convert */
1802         if (ns_is_client(ns) && (lock->l_resource->lr_type == LDLM_IBITS)) {
1803                 /*
1804                  * Lock description contains policy of blocking lock,
1805                  * and its cancel_bits is used to pass conflicting bits.
1806                  * NOTE: ld can be NULL or can be not NULL but zeroed if
1807                  * passed from ldlm_bl_thread_blwi(), check below used bits
1808                  * in ld to make sure it is valid description.
1809                  *
1810                  * If server may replace lock resource keeping the same cookie,
1811                  * never use cancel bits from different resource, full cancel
1812                  * is to be used.
1813                  */
1814                 if (ld && ld->l_policy_data.l_inodebits.bits &&
1815                     ldlm_res_eq(&ld->l_resource.lr_name,
1816                                 &lock->l_resource->lr_name))
1817                         lock->l_policy_data.l_inodebits.cancel_bits =
1818                                 ld->l_policy_data.l_inodebits.cancel_bits;
1819                 /*
1820                  * if there is no valid ld and lock is cbpending already
1821                  * then cancel_bits should be kept, otherwise it is zeroed.
1822                  */
1823                 else if (!ldlm_is_cbpending(lock))
1824                         lock->l_policy_data.l_inodebits.cancel_bits = 0;
1825         }
1826         ldlm_set_cbpending(lock);
1827
1828         do_ast = (!lock->l_readers && !lock->l_writers);
1829         unlock_res_and_lock(lock);
1830
1831         if (do_ast) {
1832                 CDEBUG(D_DLMTRACE,
1833                        "Lock %p already unused, calling callback (%p)\n",
1834                        lock, lock->l_blocking_ast);
1835                 if (lock->l_blocking_ast != NULL)
1836                         lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1837                                              LDLM_CB_BLOCKING);
1838         } else {
1839                 CDEBUG(D_DLMTRACE,
1840                        "Lock %p is referenced, will be cancelled later\n",
1841                        lock);
1842         }
1843
1844         LDLM_DEBUG(lock, "client blocking callback handler END");
1845         LDLM_LOCK_RELEASE(lock);
1846         EXIT;
1847 }
1848
1849 /**
1850  * Callback handler for receiving incoming completion ASTs.
1851  *
1852  * This only can happen on client side.
1853  */
1854 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1855                                     struct ldlm_namespace *ns,
1856                                     struct ldlm_request *dlm_req,
1857                                     struct ldlm_lock *lock)
1858 {
1859         struct list_head ast_list;
1860         int lvb_len;
1861         int rc = 0;
1862
1863         ENTRY;
1864
1865         LDLM_DEBUG(lock, "client completion callback handler START");
1866
1867         INIT_LIST_HEAD(&ast_list);
1868         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
1869                 long to = cfs_time_seconds(1);
1870
1871                 while (to > 0) {
1872                         set_current_state(TASK_INTERRUPTIBLE);
1873                         schedule_timeout(to);
1874                         if (ldlm_is_granted(lock) ||
1875                             ldlm_is_destroyed(lock))
1876                                 break;
1877                 }
1878         }
1879
1880         lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
1881         if (lvb_len < 0) {
1882                 LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len);
1883                 GOTO(out, rc = lvb_len);
1884         } else if (lvb_len > 0) {
1885                 if (lock->l_lvb_len > 0) {
1886                         /* for extent lock, lvb contains ost_lvb{}. */
1887                         LASSERT(lock->l_lvb_data != NULL);
1888
1889                         if (unlikely(lock->l_lvb_len < lvb_len)) {
1890                                 LDLM_ERROR(lock,
1891                                            "Replied LVB is larger than expectation, expected = %d, replied = %d",
1892                                            lock->l_lvb_len, lvb_len);
1893                                 GOTO(out, rc = -EINVAL);
1894                         }
1895                 }
1896         }
1897
1898         lock_res_and_lock(lock);
1899
1900         if (!ldlm_res_eq(&dlm_req->lock_desc.l_resource.lr_name,
1901                          &lock->l_resource->lr_name)) {
1902                 ldlm_resource_unlink_lock(lock);
1903                 unlock_res_and_lock(lock);
1904                 rc = ldlm_lock_change_resource(ns, lock,
1905                                 &dlm_req->lock_desc.l_resource.lr_name);
1906                 if (rc < 0) {
1907                         LDLM_ERROR(lock, "Failed to allocate resource");
1908                         GOTO(out, rc);
1909                 }
1910                 LDLM_DEBUG(lock, "completion AST, new resource");
1911                 lock_res_and_lock(lock);
1912         }
1913
1914         if (ldlm_is_destroyed(lock) ||
1915             ldlm_is_granted(lock)) {
1916                 /* b=11300: the lock has already been granted */
1917                 unlock_res_and_lock(lock);
1918                 LDLM_DEBUG(lock, "Double grant race happened");
1919                 GOTO(out, rc = 0);
1920         }
1921
1922         /*
1923          * If we receive the completion AST before the actual enqueue returned,
1924          * then we might need to switch lock modes, resources, or extents.
1925          */
1926         if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1927                 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1928                 LDLM_DEBUG(lock, "completion AST, new lock mode");
1929         }
1930
1931         if (lock->l_resource->lr_type != LDLM_PLAIN) {
1932                 ldlm_convert_policy_to_local(req->rq_export,
1933                                           dlm_req->lock_desc.l_resource.lr_type,
1934                                           &dlm_req->lock_desc.l_policy_data,
1935                                           &lock->l_policy_data);
1936                 LDLM_DEBUG(lock, "completion AST, new policy data");
1937         }
1938
1939         ldlm_resource_unlink_lock(lock);
1940
1941         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1942                 /*
1943                  * BL_AST locks are not needed in LRU.
1944                  * Let ldlm_cancel_lru() be fast.
1945                  */
1946                 ldlm_lock_remove_from_lru(lock);
1947                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
1948                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1949         }
1950
1951         if (lock->l_lvb_len > 0) {
1952                 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT,
1953                                    lock->l_lvb_data, lvb_len);
1954                 if (rc < 0) {
1955                         unlock_res_and_lock(lock);
1956                         GOTO(out, rc);
1957                 }
1958         }
1959
1960         ldlm_grant_lock(lock, &ast_list);
1961         unlock_res_and_lock(lock);
1962
1963         LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1964
1965         /*
1966          * Let Enqueue to call osc_lock_upcall() and initialize
1967          * l_ast_data
1968          */
1969         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
1970
1971         ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
1972
1973         LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1974                           lock);
1975         GOTO(out, rc);
1976
1977 out:
1978         if (rc < 0) {
1979                 lock_res_and_lock(lock);
1980                 ldlm_set_failed(lock);
1981                 unlock_res_and_lock(lock);
1982                 wake_up(&lock->l_waitq);
1983         }
1984         LDLM_LOCK_RELEASE(lock);
1985 }
1986
1987 /**
1988  * Callback handler for receiving incoming glimpse ASTs.
1989  *
1990  * This only can happen on client side.  After handling the glimpse AST
1991  * we also consider dropping the lock here if it is unused locally for a
1992  * long time.
1993  */
1994 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1995                                     struct ldlm_namespace *ns,
1996                                     struct ldlm_request *dlm_req,
1997                                     struct ldlm_lock *lock)
1998 {
1999         int rc = -ENOSYS;
2000
2001         ENTRY;
2002
2003         LDLM_DEBUG(lock, "client glimpse AST callback handler");
2004
2005         if (lock->l_glimpse_ast != NULL)
2006                 rc = lock->l_glimpse_ast(lock, req);
2007
2008         if (req->rq_repmsg != NULL) {
2009                 ptlrpc_reply(req);
2010         } else {
2011                 req->rq_status = rc;
2012                 ptlrpc_error(req);
2013         }
2014
2015         lock_res_and_lock(lock);
2016         if (lock->l_granted_mode == LCK_PW &&
2017             !lock->l_readers && !lock->l_writers &&
2018             ktime_after(ktime_get(),
2019                         ktime_add(lock->l_last_used,
2020                                   ktime_set(ns->ns_dirty_age_limit, 0)))) {
2021                 unlock_res_and_lock(lock);
2022                 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
2023                         ldlm_handle_bl_callback(ns, NULL, lock);
2024
2025                 EXIT;
2026                 return;
2027         }
2028         unlock_res_and_lock(lock);
2029         LDLM_LOCK_RELEASE(lock);
2030         EXIT;
2031 }
2032
2033 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
2034 {
2035         if (req->rq_no_reply)
2036                 return 0;
2037
2038         req->rq_status = rc;
2039         if (!req->rq_packed_final) {
2040                 rc = lustre_pack_reply(req, 1, NULL, NULL);
2041                 if (rc)
2042                         return rc;
2043         }
2044         return ptlrpc_reply(req);
2045 }
2046
2047 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
2048                                enum ldlm_cancel_flags cancel_flags)
2049 {
2050         struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
2051
2052         ENTRY;
2053
2054         spin_lock(&blp->blp_lock);
2055         if (blwi->blwi_lock &&
2056             ldlm_is_discard_data(blwi->blwi_lock)) {
2057                 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
2058                 list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
2059         } else {
2060                 /* other blocking callbacks are added to the regular list */
2061                 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
2062         }
2063         spin_unlock(&blp->blp_lock);
2064
2065         wake_up(&blp->blp_waitq);
2066
2067         /*
2068          * can not check blwi->blwi_flags as blwi could be already freed in
2069          * LCF_ASYNC mode
2070          */
2071         if (!(cancel_flags & LCF_ASYNC))
2072                 wait_for_completion(&blwi->blwi_comp);
2073
2074         RETURN(0);
2075 }
2076
2077 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
2078                              struct ldlm_namespace *ns,
2079                              struct ldlm_lock_desc *ld,
2080                              struct list_head *cancels, int count,
2081                              struct ldlm_lock *lock,
2082                              enum ldlm_cancel_flags cancel_flags)
2083 {
2084         init_completion(&blwi->blwi_comp);
2085         INIT_LIST_HEAD(&blwi->blwi_head);
2086
2087         if (memory_pressure_get())
2088                 blwi->blwi_mem_pressure = 1;
2089
2090         blwi->blwi_ns = ns;
2091         blwi->blwi_flags = cancel_flags;
2092         if (ld != NULL)
2093                 blwi->blwi_ld = *ld;
2094         if (count) {
2095                 list_add(&blwi->blwi_head, cancels);
2096                 list_del_init(cancels);
2097                 blwi->blwi_count = count;
2098         } else {
2099                 blwi->blwi_lock = lock;
2100         }
2101 }
2102
2103 /**
2104  * Queues a list of locks \a cancels containing \a count locks
2105  * for later processing by a blocking thread.  If \a count is zero,
2106  * then the lock referenced as \a lock is queued instead.
2107  *
2108  * The blocking thread would then call ->l_blocking_ast callback in the lock.
2109  * If list addition fails an error is returned and caller is supposed to
2110  * call ->l_blocking_ast itself.
2111  */
2112 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
2113                              struct ldlm_lock_desc *ld,
2114                              struct ldlm_lock *lock,
2115                              struct list_head *cancels, int count,
2116                              enum ldlm_cancel_flags cancel_flags)
2117 {
2118         ENTRY;
2119
2120         if (cancels && count == 0)
2121                 RETURN(0);
2122
2123         if (cancel_flags & LCF_ASYNC) {
2124                 struct ldlm_bl_work_item *blwi;
2125
2126                 OBD_ALLOC(blwi, sizeof(*blwi));
2127                 if (blwi == NULL)
2128                         RETURN(-ENOMEM);
2129                 init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
2130
2131                 RETURN(__ldlm_bl_to_thread(blwi, cancel_flags));
2132         } else {
2133                 /*
2134                  * if it is synchronous call do minimum mem alloc, as it could
2135                  * be triggered from kernel shrinker
2136                  */
2137                 struct ldlm_bl_work_item blwi;
2138
2139                 memset(&blwi, 0, sizeof(blwi));
2140                 init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
2141                 RETURN(__ldlm_bl_to_thread(&blwi, cancel_flags));
2142         }
2143 }
2144
2145
2146 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
2147                            struct ldlm_lock *lock)
2148 {
2149         return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC);
2150 }
2151
2152 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
2153                            struct list_head *cancels, int count,
2154                            enum ldlm_cancel_flags cancel_flags)
2155 {
2156         return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
2157 }
2158
2159 int ldlm_bl_thread_wakeup(void)
2160 {
2161         wake_up(&ldlm_state->ldlm_bl_pool->blp_waitq);
2162         return 0;
2163 }
2164
2165 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
2166 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
2167 {
2168         struct obd_device *obd = req->rq_export->exp_obd;
2169         char *key;
2170         void *val;
2171         int keylen, vallen;
2172         int rc = -ENOSYS;
2173
2174         ENTRY;
2175
2176         DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
2177
2178         req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
2179
2180         key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2181         if (key == NULL) {
2182                 DEBUG_REQ(D_IOCTL, req, "no set_info key");
2183                 RETURN(-EFAULT);
2184         }
2185         keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
2186                                       RCL_CLIENT);
2187         val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
2188         if (val == NULL) {
2189                 DEBUG_REQ(D_IOCTL, req, "no set_info val");
2190                 RETURN(-EFAULT);
2191         }
2192         vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
2193                                       RCL_CLIENT);
2194
2195         /* We are responsible for swabbing contents of val */
2196
2197         if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
2198                 /* Pass it on to mdc (the "export" in this case) */
2199                 rc = obd_set_info_async(req->rq_svc_thread->t_env,
2200                                         req->rq_export,
2201                                         sizeof(KEY_HSM_COPYTOOL_SEND),
2202                                         KEY_HSM_COPYTOOL_SEND,
2203                                         vallen, val, NULL);
2204         else
2205                 DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
2206
2207         return rc;
2208 }
2209
2210 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
2211                                         const char *msg, int rc,
2212                                         const struct lustre_handle *handle)
2213 {
2214         DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
2215                   "%s: [nid %s] [rc %d] [lock %#llx]",
2216                   msg, libcfs_id2str(req->rq_peer), rc,
2217                   handle ? handle->cookie : 0);
2218         if (req->rq_no_reply)
2219                 CWARN("No reply was sent, maybe cause b=21636.\n");
2220         else if (rc)
2221                 CWARN("Send reply failed, maybe cause b=21636.\n");
2222 }
2223
2224 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
2225 static int ldlm_callback_handler(struct ptlrpc_request *req)
2226 {
2227         struct ldlm_namespace *ns;
2228         struct ldlm_request *dlm_req;
2229         struct ldlm_lock *lock;
2230         int rc;
2231
2232         ENTRY;
2233
2234         /*
2235          * Requests arrive in sender's byte order.  The ptlrpc service
2236          * handler has already checked and, if necessary, byte-swapped the
2237          * incoming request message body, but I am responsible for the
2238          * message buffers.
2239          */
2240
2241         /* do nothing for sec context finalize */
2242         if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
2243                 RETURN(0);
2244
2245         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2246
2247         if (req->rq_export == NULL) {
2248                 rc = ldlm_callback_reply(req, -ENOTCONN);
2249                 ldlm_callback_errmsg(req, "Operate on unconnected server",
2250                                      rc, NULL);
2251                 RETURN(0);
2252         }
2253
2254         LASSERT(req->rq_export != NULL);
2255         LASSERT(req->rq_export->exp_obd != NULL);
2256
2257         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2258         case LDLM_BL_CALLBACK:
2259                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET)) {
2260                         if (cfs_fail_err)
2261                                 ldlm_callback_reply(req, -(int)cfs_fail_err);
2262                         RETURN(0);
2263                 }
2264                 break;
2265         case LDLM_CP_CALLBACK:
2266                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
2267                         RETURN(0);
2268                 break;
2269         case LDLM_GL_CALLBACK:
2270                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
2271                         RETURN(0);
2272                 break;
2273         case LDLM_SET_INFO:
2274                 rc = ldlm_handle_setinfo(req);
2275                 ldlm_callback_reply(req, rc);
2276                 RETURN(0);
2277         default:
2278                 CERROR("unknown opcode %u\n",
2279                        lustre_msg_get_opc(req->rq_reqmsg));
2280                 ldlm_callback_reply(req, -EPROTO);
2281                 RETURN(0);
2282         }
2283
2284         ns = req->rq_export->exp_obd->obd_namespace;
2285         LASSERT(ns != NULL);
2286
2287         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2288
2289         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2290         if (dlm_req == NULL) {
2291                 rc = ldlm_callback_reply(req, -EPROTO);
2292                 ldlm_callback_errmsg(req, "Operate without parameter", rc,
2293                                      NULL);
2294                 RETURN(0);
2295         }
2296
2297         /*
2298          * Force a known safe race, send a cancel to the server for a lock
2299          * which the server has already started a blocking callback on.
2300          */
2301         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
2302             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2303                 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
2304                 if (rc < 0)
2305                         CERROR("ldlm_cli_cancel: %d\n", rc);
2306         }
2307
2308         lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
2309         if (!lock) {
2310                 CDEBUG(D_DLMTRACE,
2311                        "callback on lock %#llx - lock disappeared\n",
2312                        dlm_req->lock_handle[0].cookie);
2313                 rc = ldlm_callback_reply(req, -EINVAL);
2314                 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
2315                                      &dlm_req->lock_handle[0]);
2316                 RETURN(0);
2317         }
2318
2319         if (ldlm_is_fail_loc(lock) &&
2320             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
2321                 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
2322
2323         /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
2324         lock_res_and_lock(lock);
2325         lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
2326                                               LDLM_FL_AST_MASK);
2327         if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2328                 /*
2329                  * If somebody cancels lock and cache is already dropped,
2330                  * or lock is failed before cp_ast received on client,
2331                  * we can tell the server we have no lock. Otherwise, we
2332                  * should send cancel after dropping the cache.
2333                  */
2334                 if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
2335                      ldlm_is_failed(lock)) {
2336                         LDLM_DEBUG(lock,
2337                                    "callback on lock %llx - lock disappeared",
2338                                    dlm_req->lock_handle[0].cookie);
2339                         unlock_res_and_lock(lock);
2340                         LDLM_LOCK_RELEASE(lock);
2341                         rc = ldlm_callback_reply(req, -EINVAL);
2342                         ldlm_callback_errmsg(req, "Operate on stale lock", rc,
2343                                              &dlm_req->lock_handle[0]);
2344                         RETURN(0);
2345                 }
2346                 /*
2347                  * BL_AST locks are not needed in LRU.
2348                  * Let ldlm_cancel_lru() be fast.
2349                  */
2350                 ldlm_lock_remove_from_lru(lock);
2351                 ldlm_set_bl_ast(lock);
2352         }
2353         unlock_res_and_lock(lock);
2354
2355         /*
2356          * We want the ost thread to get this reply so that it can respond
2357          * to ost requests (write cache writeback) that might be triggered
2358          * in the callback.
2359          *
2360          * But we'd also like to be able to indicate in the reply that we're
2361          * cancelling right now, because it's unused, or have an intent result
2362          * in the reply, so we might have to push the responsibility for sending
2363          * the reply down into the AST handlers, alas.
2364          */
2365
2366         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2367         case LDLM_BL_CALLBACK:
2368                 CDEBUG(D_INODE, "blocking ast\n");
2369                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
2370                 if (!ldlm_is_cancel_on_block(lock)) {
2371                         rc = ldlm_callback_reply(req, 0);
2372                         if (req->rq_no_reply || rc)
2373                                 ldlm_callback_errmsg(req, "Normal process", rc,
2374                                                      &dlm_req->lock_handle[0]);
2375                 }
2376                 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
2377                         ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
2378                 break;
2379         case LDLM_CP_CALLBACK:
2380                 CDEBUG(D_INODE, "completion ast\n");
2381                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
2382                 ldlm_callback_reply(req, 0);
2383                 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
2384                 break;
2385         case LDLM_GL_CALLBACK:
2386                 CDEBUG(D_INODE, "glimpse ast\n");
2387                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
2388                 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
2389                 break;
2390         default:
2391                 LBUG(); /* checked above */
2392         }
2393
2394         RETURN(0);
2395 }
2396
2397 #ifdef HAVE_SERVER_SUPPORT
2398 /**
2399  * Main handler for canceld thread.
2400  *
2401  * Separated into its own thread to avoid deadlocks.
2402  */
2403 static int ldlm_cancel_handler(struct ptlrpc_request *req)
2404 {
2405         int rc;
2406
2407         ENTRY;
2408
2409         /*
2410          * Requests arrive in sender's byte order.  The ptlrpc service
2411          * handler has already checked and, if necessary, byte-swapped the
2412          * incoming request message body, but I am responsible for the
2413          * message buffers.
2414          */
2415
2416         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2417
2418         if (req->rq_export == NULL) {
2419                 struct ldlm_request *dlm_req;
2420
2421                 CERROR("%s from %s arrived at %llu with bad export cookie %llu\n",
2422                        ll_opcode2str(lustre_msg_get_opc(req->rq_reqmsg)),
2423                        libcfs_nid2str(req->rq_peer.nid),
2424                        (unsigned long long)req->rq_arrival_time.tv_sec,
2425                        lustre_msg_get_handle(req->rq_reqmsg)->cookie);
2426
2427                 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_CANCEL) {
2428                         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2429                         dlm_req = req_capsule_client_get(&req->rq_pill,
2430                                                          &RMF_DLM_REQ);
2431                         if (dlm_req != NULL)
2432                                 ldlm_lock_dump_handle(D_ERROR,
2433                                                       &dlm_req->lock_handle[0]);
2434                 }
2435                 ldlm_callback_reply(req, -ENOTCONN);
2436                 RETURN(0);
2437         }
2438
2439         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2440         /* XXX FIXME move this back to mds/handler.c, b=249 */
2441         case LDLM_CANCEL:
2442                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2443                 CDEBUG(D_INODE, "cancel\n");
2444                 if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_NET) ||
2445                     CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND) ||
2446                     CFS_FAIL_CHECK(OBD_FAIL_LDLM_BL_EVICT))
2447                         RETURN(0);
2448                 rc = ldlm_handle_cancel(req);
2449                 break;
2450         case LDLM_CONVERT:
2451         {
2452                 struct ldlm_request *dlm_req;
2453
2454                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CONVERT);
2455                 CDEBUG(D_INODE, "convert\n");
2456
2457                 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2458                 if (dlm_req == NULL) {
2459                         CDEBUG(D_INFO, "bad request buffer for cancel\n");
2460                         rc = ldlm_callback_reply(req, -EPROTO);
2461                 } else {
2462                         req->rq_status = ldlm_handle_convert0(req, dlm_req);
2463                         rc = ptlrpc_reply(req);
2464                 }
2465                 break;
2466         }
2467         default:
2468                 CERROR("invalid opcode %d\n",
2469                        lustre_msg_get_opc(req->rq_reqmsg));
2470                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2471                 rc = ldlm_callback_reply(req, -EINVAL);
2472         }
2473
2474         RETURN(rc);
2475 }
2476
2477 static int ldlm_cancel_hpreq_lock_match(struct ptlrpc_request *req,
2478                                         struct ldlm_lock *lock)
2479 {
2480         struct ldlm_request *dlm_req;
2481         struct lustre_handle lockh;
2482         int rc = 0;
2483         int i;
2484
2485         ENTRY;
2486
2487         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2488         if (dlm_req == NULL)
2489                 RETURN(0);
2490
2491         ldlm_lock2handle(lock, &lockh);
2492         for (i = 0; i < dlm_req->lock_count; i++) {
2493                 if (lustre_handle_equal(&dlm_req->lock_handle[i],
2494                                         &lockh)) {
2495                         DEBUG_REQ(D_RPCTRACE, req,
2496                                   "Prio raised by lock %#llx.", lockh.cookie);
2497                         rc = 1;
2498                         break;
2499                 }
2500         }
2501
2502         RETURN(rc);
2503 }
2504
2505 static int ldlm_cancel_hpreq_check(struct ptlrpc_request *req)
2506 {
2507         struct ldlm_request *dlm_req;
2508         int rc = 0;
2509         int i;
2510
2511         ENTRY;
2512
2513         /* no prolong in recovery */
2514         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
2515                 RETURN(0);
2516
2517         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2518         if (dlm_req == NULL)
2519                 RETURN(-EFAULT);
2520
2521         for (i = 0; i < dlm_req->lock_count; i++) {
2522                 struct ldlm_lock *lock;
2523
2524                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
2525                 if (lock == NULL)
2526                         continue;
2527
2528                 rc = ldlm_is_ast_sent(lock) ? 1 : 0;
2529                 if (rc)
2530                         LDLM_DEBUG(lock, "hpreq cancel/convert lock");
2531                 LDLM_LOCK_PUT(lock);
2532
2533                 if (rc)
2534                         break;
2535         }
2536
2537         RETURN(rc);
2538 }
2539
2540 static struct ptlrpc_hpreq_ops ldlm_cancel_hpreq_ops = {
2541         .hpreq_lock_match = ldlm_cancel_hpreq_lock_match,
2542         .hpreq_check      = ldlm_cancel_hpreq_check,
2543         .hpreq_fini       = NULL,
2544 };
2545
2546 static int ldlm_hpreq_handler(struct ptlrpc_request *req)
2547 {
2548         ENTRY;
2549
2550         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2551
2552         if (req->rq_export == NULL)
2553                 RETURN(0);
2554
2555         if (LDLM_CANCEL == lustre_msg_get_opc(req->rq_reqmsg)) {
2556                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2557                 req->rq_ops = &ldlm_cancel_hpreq_ops;
2558         } else if (LDLM_CONVERT == lustre_msg_get_opc(req->rq_reqmsg)) {
2559                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CONVERT);
2560                 req->rq_ops = &ldlm_cancel_hpreq_ops;
2561         }
2562         RETURN(0);
2563 }
2564
2565 static int ldlm_revoke_lock_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2566                                struct hlist_node *hnode, void *data)
2567
2568 {
2569         struct list_head *rpc_list = data;
2570         struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
2571
2572         lock_res_and_lock(lock);
2573
2574         if (!ldlm_is_granted(lock)) {
2575                 unlock_res_and_lock(lock);
2576                 return 0;
2577         }
2578
2579         LASSERT(lock->l_resource);
2580         if (lock->l_resource->lr_type != LDLM_IBITS &&
2581             lock->l_resource->lr_type != LDLM_PLAIN) {
2582                 unlock_res_and_lock(lock);
2583                 return 0;
2584         }
2585
2586         if (ldlm_is_ast_sent(lock)) {
2587                 unlock_res_and_lock(lock);
2588                 return 0;
2589         }
2590
2591         LASSERT(lock->l_blocking_ast);
2592         LASSERT(!lock->l_blocking_lock);
2593
2594         ldlm_set_ast_sent(lock);
2595         if (lock->l_export && lock->l_export->exp_lock_hash) {
2596                 /*
2597                  * NB: it's safe to call cfs_hash_del() even lock isn't
2598                  * in exp_lock_hash.
2599                  */
2600                 /*
2601                  * In the function below, .hs_keycmp resolves to
2602                  * ldlm_export_lock_keycmp()
2603                  */
2604                 /* coverity[overrun-buffer-val] */
2605                 cfs_hash_del(lock->l_export->exp_lock_hash,
2606                              &lock->l_remote_handle, &lock->l_exp_hash);
2607         }
2608
2609         list_add_tail(&lock->l_rk_ast, rpc_list);
2610         LDLM_LOCK_GET(lock);
2611
2612         unlock_res_and_lock(lock);
2613         return 0;
2614 }
2615
2616 void ldlm_revoke_export_locks(struct obd_export *exp)
2617 {
2618         struct list_head rpc_list;
2619
2620         ENTRY;
2621
2622         INIT_LIST_HEAD(&rpc_list);
2623         cfs_hash_for_each_nolock(exp->exp_lock_hash,
2624                                  ldlm_revoke_lock_cb, &rpc_list, 0);
2625         ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
2626                           LDLM_WORK_REVOKE_AST);
2627
2628         EXIT;
2629 }
2630 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2631 #endif /* HAVE_SERVER_SUPPORT */
2632
2633 static int ldlm_bl_get_work(struct ldlm_bl_pool *blp,
2634                             struct ldlm_bl_work_item **p_blwi,
2635                             struct obd_export **p_exp)
2636 {
2637         struct ldlm_bl_work_item *blwi = NULL;
2638         static unsigned int num_bl;
2639         static unsigned int num_stale;
2640         int num_th = atomic_read(&blp->blp_num_threads);
2641
2642         *p_exp = obd_stale_export_get();
2643
2644         spin_lock(&blp->blp_lock);
2645         if (*p_exp != NULL) {
2646                 if (num_th == 1 || ++num_stale < num_th) {
2647                         spin_unlock(&blp->blp_lock);
2648                         return 1;
2649                 }
2650                 num_stale = 0;
2651         }
2652
2653         /* process a request from the blp_list at least every blp_num_threads */
2654         if (!list_empty(&blp->blp_list) &&
2655             (list_empty(&blp->blp_prio_list) || num_bl == 0))
2656                 blwi = list_entry(blp->blp_list.next,
2657                                   struct ldlm_bl_work_item, blwi_entry);
2658         else
2659                 if (!list_empty(&blp->blp_prio_list))
2660                         blwi = list_entry(blp->blp_prio_list.next,
2661                                           struct ldlm_bl_work_item,
2662                                           blwi_entry);
2663
2664         if (blwi) {
2665                 if (++num_bl >= num_th)
2666                         num_bl = 0;
2667                 list_del(&blwi->blwi_entry);
2668         }
2669         spin_unlock(&blp->blp_lock);
2670         *p_blwi = blwi;
2671
2672         if (*p_exp != NULL && *p_blwi != NULL) {
2673                 obd_stale_export_put(*p_exp);
2674                 *p_exp = NULL;
2675         }
2676
2677         return (*p_blwi != NULL || *p_exp != NULL) ? 1 : 0;
2678 }
2679
2680 /* This only contains temporary data until the thread starts */
2681 struct ldlm_bl_thread_data {
2682         struct ldlm_bl_pool     *bltd_blp;
2683         struct completion       bltd_comp;
2684         int                     bltd_num;
2685 };
2686
2687 static int ldlm_bl_thread_main(void *arg);
2688
2689 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp, bool check_busy)
2690 {
2691         struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
2692         struct task_struct *task;
2693
2694         init_completion(&bltd.bltd_comp);
2695
2696         bltd.bltd_num = atomic_inc_return(&blp->blp_num_threads);
2697         if (bltd.bltd_num >= blp->blp_max_threads) {
2698                 atomic_dec(&blp->blp_num_threads);
2699                 return 0;
2700         }
2701
2702         LASSERTF(bltd.bltd_num > 0, "thread num:%d\n", bltd.bltd_num);
2703         if (check_busy &&
2704             atomic_read(&blp->blp_busy_threads) < (bltd.bltd_num - 1)) {
2705                 atomic_dec(&blp->blp_num_threads);
2706                 return 0;
2707         }
2708
2709         task = kthread_run(ldlm_bl_thread_main, &bltd, "ldlm_bl_%02d",
2710                            bltd.bltd_num);
2711         if (IS_ERR(task)) {
2712                 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
2713                        bltd.bltd_num, PTR_ERR(task));
2714                 atomic_dec(&blp->blp_num_threads);
2715                 return PTR_ERR(task);
2716         }
2717         wait_for_completion(&bltd.bltd_comp);
2718
2719         return 0;
2720 }
2721
2722 /* Not fatal if racy and have a few too many threads */
2723 static int ldlm_bl_thread_need_create(struct ldlm_bl_pool *blp,
2724                                       struct ldlm_bl_work_item *blwi)
2725 {
2726         if (atomic_read(&blp->blp_num_threads) >= blp->blp_max_threads)
2727                 return 0;
2728
2729         if (atomic_read(&blp->blp_busy_threads) <
2730             atomic_read(&blp->blp_num_threads))
2731                 return 0;
2732
2733         if (blwi != NULL && (blwi->blwi_ns == NULL ||
2734                              blwi->blwi_mem_pressure))
2735                 return 0;
2736
2737         return 1;
2738 }
2739
2740 static int ldlm_bl_thread_blwi(struct ldlm_bl_pool *blp,
2741                                struct ldlm_bl_work_item *blwi)
2742 {
2743         ENTRY;
2744
2745         if (blwi->blwi_ns == NULL)
2746                 /* added by ldlm_cleanup() */
2747                 RETURN(LDLM_ITER_STOP);
2748
2749         if (blwi->blwi_mem_pressure)
2750                 memory_pressure_set();
2751
2752         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL2, 4);
2753
2754         if (blwi->blwi_count) {
2755                 int count;
2756                 /*
2757                  * The special case when we cancel locks in lru
2758                  * asynchronously, we pass the list of locks here.
2759                  * Thus locks are marked LDLM_FL_CANCELING, but NOT
2760                  * canceled locally yet.
2761                  */
2762                 count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
2763                                                    blwi->blwi_count,
2764                                                    LCF_BL_AST);
2765                 ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
2766                                      blwi->blwi_flags);
2767         } else {
2768                 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
2769                                         blwi->blwi_lock);
2770         }
2771         if (blwi->blwi_mem_pressure)
2772                 memory_pressure_clr();
2773
2774         if (blwi->blwi_flags & LCF_ASYNC)
2775                 OBD_FREE(blwi, sizeof(*blwi));
2776         else
2777                 complete(&blwi->blwi_comp);
2778
2779         RETURN(0);
2780 }
2781
2782 /**
2783  * Cancel stale locks on export. Cancel blocked locks first.
2784  * If the given export has blocked locks, the next in the list may have
2785  * them too, thus cancel not blocked locks only if the current export has
2786  * no blocked locks.
2787  **/
2788 static int ldlm_bl_thread_exports(struct ldlm_bl_pool *blp,
2789                                   struct obd_export *exp)
2790 {
2791         int num;
2792
2793         ENTRY;
2794
2795         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_BL_EVICT, 4);
2796
2797         num = ldlm_export_cancel_blocked_locks(exp);
2798         if (num == 0)
2799                 ldlm_export_cancel_locks(exp);
2800
2801         obd_stale_export_put(exp);
2802
2803         RETURN(0);
2804 }
2805
2806
2807 /**
2808  * Main blocking requests processing thread.
2809  *
2810  * Callers put locks into its queue by calling ldlm_bl_to_thread.
2811  * This thread in the end ends up doing actual call to ->l_blocking_ast
2812  * for queued locks.
2813  */
2814 static int ldlm_bl_thread_main(void *arg)
2815 {
2816         struct lu_env *env;
2817         struct ldlm_bl_pool *blp;
2818         struct ldlm_bl_thread_data *bltd = arg;
2819         int rc;
2820
2821         ENTRY;
2822
2823         OBD_ALLOC_PTR(env);
2824         if (!env)
2825                 RETURN(-ENOMEM);
2826         rc = lu_env_init(env, LCT_DT_THREAD);
2827         if (rc)
2828                 GOTO(out_env, rc);
2829         rc = lu_env_add(env);
2830         if (rc)
2831                 GOTO(out_env_fini, rc);
2832
2833         blp = bltd->bltd_blp;
2834
2835         complete(&bltd->bltd_comp);
2836         /* cannot use bltd after this, it is only on caller's stack */
2837
2838         while (1) {
2839                 struct l_wait_info lwi = { 0 };
2840                 struct ldlm_bl_work_item *blwi = NULL;
2841                 struct obd_export *exp = NULL;
2842                 int rc;
2843
2844                 rc = ldlm_bl_get_work(blp, &blwi, &exp);
2845
2846                 if (rc == 0)
2847                         l_wait_event_exclusive(blp->blp_waitq,
2848                                                ldlm_bl_get_work(blp, &blwi,
2849                                                                 &exp),
2850                                                &lwi);
2851                 atomic_inc(&blp->blp_busy_threads);
2852
2853                 if (ldlm_bl_thread_need_create(blp, blwi))
2854                         /* discard the return value, we tried */
2855                         ldlm_bl_thread_start(blp, true);
2856
2857                 if (exp)
2858                         rc = ldlm_bl_thread_exports(blp, exp);
2859                 else if (blwi)
2860                         rc = ldlm_bl_thread_blwi(blp, blwi);
2861
2862                 atomic_dec(&blp->blp_busy_threads);
2863
2864                 if (rc == LDLM_ITER_STOP)
2865                         break;
2866
2867                 /*
2868                  * If there are many namespaces, we will not sleep waiting for
2869                  * work, and must do a cond_resched to avoid holding the CPU
2870                  * for too long
2871                  */
2872                 cond_resched();
2873         }
2874
2875         atomic_dec(&blp->blp_num_threads);
2876         complete(&blp->blp_comp);
2877
2878         lu_env_remove(env);
2879 out_env_fini:
2880         lu_env_fini(env);
2881 out_env:
2882         OBD_FREE_PTR(env);
2883         RETURN(rc);
2884 }
2885
2886
2887 static int ldlm_setup(void);
2888 static int ldlm_cleanup(void);
2889
2890 int ldlm_get_ref(void)
2891 {
2892         int rc = 0;
2893
2894         ENTRY;
2895         mutex_lock(&ldlm_ref_mutex);
2896         if (++ldlm_refcount == 1) {
2897                 rc = ldlm_setup();
2898                 if (rc)
2899                         ldlm_refcount--;
2900         }
2901         mutex_unlock(&ldlm_ref_mutex);
2902
2903         RETURN(rc);
2904 }
2905
2906 void ldlm_put_ref(void)
2907 {
2908         ENTRY;
2909         mutex_lock(&ldlm_ref_mutex);
2910         if (ldlm_refcount == 1) {
2911                 int rc = ldlm_cleanup();
2912
2913                 if (rc)
2914                         CERROR("ldlm_cleanup failed: %d\n", rc);
2915                 else
2916                         ldlm_refcount--;
2917         } else {
2918                 ldlm_refcount--;
2919         }
2920         mutex_unlock(&ldlm_ref_mutex);
2921
2922         EXIT;
2923 }
2924
2925 /*
2926  * Export handle<->lock hash operations.
2927  */
2928 static unsigned
2929 ldlm_export_lock_hash(struct cfs_hash *hs, const void *key, unsigned int mask)
2930 {
2931         return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
2932 }
2933
2934 static void *
2935 ldlm_export_lock_key(struct hlist_node *hnode)
2936 {
2937         struct ldlm_lock *lock;
2938
2939         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2940         return &lock->l_remote_handle;
2941 }
2942
2943 static void
2944 ldlm_export_lock_keycpy(struct hlist_node *hnode, void *key)
2945 {
2946         struct ldlm_lock     *lock;
2947
2948         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2949         lock->l_remote_handle = *(struct lustre_handle *)key;
2950 }
2951
2952 static int
2953 ldlm_export_lock_keycmp(const void *key, struct hlist_node *hnode)
2954 {
2955         return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
2956 }
2957
2958 static void *
2959 ldlm_export_lock_object(struct hlist_node *hnode)
2960 {
2961         return hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2962 }
2963
2964 static void
2965 ldlm_export_lock_get(struct cfs_hash *hs, struct hlist_node *hnode)
2966 {
2967         struct ldlm_lock *lock;
2968
2969         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2970         LDLM_LOCK_GET(lock);
2971 }
2972
2973 static void
2974 ldlm_export_lock_put(struct cfs_hash *hs, struct hlist_node *hnode)
2975 {
2976         struct ldlm_lock *lock;
2977
2978         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2979         LDLM_LOCK_RELEASE(lock);
2980 }
2981
2982 static struct cfs_hash_ops ldlm_export_lock_ops = {
2983         .hs_hash        = ldlm_export_lock_hash,
2984         .hs_key         = ldlm_export_lock_key,
2985         .hs_keycmp      = ldlm_export_lock_keycmp,
2986         .hs_keycpy      = ldlm_export_lock_keycpy,
2987         .hs_object      = ldlm_export_lock_object,
2988         .hs_get         = ldlm_export_lock_get,
2989         .hs_put         = ldlm_export_lock_put,
2990         .hs_put_locked  = ldlm_export_lock_put,
2991 };
2992
2993 int ldlm_init_export(struct obd_export *exp)
2994 {
2995         int rc;
2996
2997         ENTRY;
2998
2999         exp->exp_lock_hash =
3000                 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
3001                                 HASH_EXP_LOCK_CUR_BITS,
3002                                 HASH_EXP_LOCK_MAX_BITS,
3003                                 HASH_EXP_LOCK_BKT_BITS, 0,
3004                                 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
3005                                 &ldlm_export_lock_ops,
3006                                 CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
3007                                 CFS_HASH_NBLK_CHANGE);
3008
3009         if (!exp->exp_lock_hash)
3010                 RETURN(-ENOMEM);
3011
3012         rc = ldlm_init_flock_export(exp);
3013         if (rc)
3014                 GOTO(err, rc);
3015
3016         RETURN(0);
3017 err:
3018         ldlm_destroy_export(exp);
3019         RETURN(rc);
3020 }
3021 EXPORT_SYMBOL(ldlm_init_export);
3022
3023 void ldlm_destroy_export(struct obd_export *exp)
3024 {
3025         ENTRY;
3026         cfs_hash_putref(exp->exp_lock_hash);
3027         exp->exp_lock_hash = NULL;
3028
3029         ldlm_destroy_flock_export(exp);
3030         EXIT;
3031 }
3032 EXPORT_SYMBOL(ldlm_destroy_export);
3033
3034 static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
3035                                                       struct attribute *attr,
3036                                                       char *buf)
3037 {
3038         return sprintf(buf, "%d\n", ldlm_cancel_unused_locks_before_replay);
3039 }
3040
3041 static ssize_t cancel_unused_locks_before_replay_store(struct kobject *kobj,
3042                                                        struct attribute *attr,
3043                                                        const char *buffer,
3044                                                        size_t count)
3045 {
3046         int rc;
3047         unsigned long val;
3048
3049         rc = kstrtoul(buffer, 10, &val);
3050         if (rc)
3051                 return rc;
3052
3053         ldlm_cancel_unused_locks_before_replay = val;
3054
3055         return count;
3056 }
3057 LUSTRE_RW_ATTR(cancel_unused_locks_before_replay);
3058
3059 static struct attribute *ldlm_attrs[] = {
3060         &lustre_attr_cancel_unused_locks_before_replay.attr,
3061         NULL,
3062 };
3063
3064 static struct attribute_group ldlm_attr_group = {
3065         .attrs = ldlm_attrs,
3066 };
3067
3068 static int ldlm_setup(void)
3069 {
3070         static struct ptlrpc_service_conf       conf;
3071         struct ldlm_bl_pool                    *blp = NULL;
3072 #ifdef HAVE_SERVER_SUPPORT
3073         struct task_struct *task;
3074 #endif /* HAVE_SERVER_SUPPORT */
3075         int i;
3076         int rc = 0;
3077
3078         ENTRY;
3079
3080         if (ldlm_state != NULL)
3081                 RETURN(-EALREADY);
3082
3083         OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
3084         if (ldlm_state == NULL)
3085                 RETURN(-ENOMEM);
3086
3087         ldlm_kobj = kobject_create_and_add("ldlm", &lustre_kset->kobj);
3088         if (!ldlm_kobj)
3089                 GOTO(out, -ENOMEM);
3090
3091         rc = sysfs_create_group(ldlm_kobj, &ldlm_attr_group);
3092         if (rc)
3093                 GOTO(out, rc);
3094
3095         ldlm_ns_kset = kset_create_and_add("namespaces", NULL, ldlm_kobj);
3096         if (!ldlm_ns_kset)
3097                 GOTO(out, -ENOMEM);
3098
3099         ldlm_svc_kset = kset_create_and_add("services", NULL, ldlm_kobj);
3100         if (!ldlm_svc_kset)
3101                 GOTO(out, -ENOMEM);
3102
3103         rc = ldlm_debugfs_setup();
3104         if (rc != 0)
3105                 GOTO(out, rc);
3106
3107         memset(&conf, 0, sizeof(conf));
3108         conf = (typeof(conf)) {
3109                 .psc_name               = "ldlm_cbd",
3110                 .psc_watchdog_factor    = 2,
3111                 .psc_buf                = {
3112                         .bc_nbufs               = LDLM_CLIENT_NBUFS,
3113                         .bc_buf_size            = LDLM_BUFSIZE,
3114                         .bc_req_max_size        = LDLM_MAXREQSIZE,
3115                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
3116                         .bc_req_portal          = LDLM_CB_REQUEST_PORTAL,
3117                         .bc_rep_portal          = LDLM_CB_REPLY_PORTAL,
3118                 },
3119                 .psc_thr                = {
3120                         .tc_thr_name            = "ldlm_cb",
3121                         .tc_thr_factor          = LDLM_THR_FACTOR,
3122                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
3123                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
3124                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
3125                         .tc_nthrs_user          = ldlm_num_threads,
3126                         .tc_cpu_bind            = ldlm_cpu_bind,
3127                         .tc_ctx_tags            = LCT_MD_THREAD | LCT_DT_THREAD,
3128                 },
3129                 .psc_cpt                = {
3130                         .cc_pattern             = ldlm_cpts,
3131                         .cc_affinity            = true,
3132                 },
3133                 .psc_ops                = {
3134                         .so_req_handler         = ldlm_callback_handler,
3135                 },
3136         };
3137         ldlm_state->ldlm_cb_service = \
3138                         ptlrpc_register_service(&conf, ldlm_svc_kset,
3139                                                 ldlm_svc_debugfs_dir);
3140         if (IS_ERR(ldlm_state->ldlm_cb_service)) {
3141                 CERROR("failed to start service\n");
3142                 rc = PTR_ERR(ldlm_state->ldlm_cb_service);
3143                 ldlm_state->ldlm_cb_service = NULL;
3144                 GOTO(out, rc);
3145         }
3146
3147 #ifdef HAVE_SERVER_SUPPORT
3148         memset(&conf, 0, sizeof(conf));
3149         conf = (typeof(conf)) {
3150                 .psc_name               = "ldlm_canceld",
3151                 .psc_watchdog_factor    = 6,
3152                 .psc_buf                = {
3153                         .bc_nbufs               = LDLM_SERVER_NBUFS,
3154                         .bc_buf_size            = LDLM_BUFSIZE,
3155                         .bc_req_max_size        = LDLM_MAXREQSIZE,
3156                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
3157                         .bc_req_portal          = LDLM_CANCEL_REQUEST_PORTAL,
3158                         .bc_rep_portal          = LDLM_CANCEL_REPLY_PORTAL,
3159
3160                 },
3161                 .psc_thr                = {
3162                         .tc_thr_name            = "ldlm_cn",
3163                         .tc_thr_factor          = LDLM_THR_FACTOR,
3164                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
3165                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
3166                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
3167                         .tc_nthrs_user          = ldlm_num_threads,
3168                         .tc_cpu_bind            = ldlm_cpu_bind,
3169                         .tc_ctx_tags            = LCT_MD_THREAD | \
3170                                                   LCT_DT_THREAD | \
3171                                                   LCT_CL_THREAD,
3172                 },
3173                 .psc_cpt                = {
3174                         .cc_pattern             = ldlm_cpts,
3175                         .cc_affinity            = true,
3176                 },
3177                 .psc_ops                = {
3178                         .so_req_handler         = ldlm_cancel_handler,
3179                         .so_hpreq_handler       = ldlm_hpreq_handler,
3180                 },
3181         };
3182         ldlm_state->ldlm_cancel_service = \
3183                         ptlrpc_register_service(&conf, ldlm_svc_kset,
3184                                                 ldlm_svc_debugfs_dir);
3185         if (IS_ERR(ldlm_state->ldlm_cancel_service)) {
3186                 CERROR("failed to start service\n");
3187                 rc = PTR_ERR(ldlm_state->ldlm_cancel_service);
3188                 ldlm_state->ldlm_cancel_service = NULL;
3189                 GOTO(out, rc);
3190         }
3191 #endif /* HAVE_SERVER_SUPPORT */
3192
3193         OBD_ALLOC(blp, sizeof(*blp));
3194         if (blp == NULL)
3195                 GOTO(out, rc = -ENOMEM);
3196         ldlm_state->ldlm_bl_pool = blp;
3197
3198         spin_lock_init(&blp->blp_lock);
3199         INIT_LIST_HEAD(&blp->blp_list);
3200         INIT_LIST_HEAD(&blp->blp_prio_list);
3201         init_waitqueue_head(&blp->blp_waitq);
3202         atomic_set(&blp->blp_num_threads, 0);
3203         atomic_set(&blp->blp_busy_threads, 0);
3204
3205         if (ldlm_num_threads == 0) {
3206                 blp->blp_min_threads = LDLM_NTHRS_INIT;
3207                 blp->blp_max_threads = LDLM_NTHRS_MAX;
3208         } else {
3209                 blp->blp_min_threads = blp->blp_max_threads = \
3210                         min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
3211                                                          ldlm_num_threads));
3212         }
3213
3214         for (i = 0; i < blp->blp_min_threads; i++) {
3215                 rc = ldlm_bl_thread_start(blp, false);
3216                 if (rc < 0)
3217                         GOTO(out, rc);
3218         }
3219
3220 #ifdef HAVE_SERVER_SUPPORT
3221         task = kthread_run(expired_lock_main, NULL, "ldlm_elt");
3222         if (IS_ERR(task)) {
3223                 rc = PTR_ERR(task);
3224                 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
3225                 GOTO(out, rc);
3226         }
3227
3228         wait_event(expired_lock_wait_queue,
3229                    expired_lock_thread_state == ELT_READY);
3230 #endif /* HAVE_SERVER_SUPPORT */
3231
3232         rc = ldlm_pools_init();
3233         if (rc) {
3234                 CERROR("Failed to initialize LDLM pools: %d\n", rc);
3235                 GOTO(out, rc);
3236         }
3237
3238         rc = ldlm_reclaim_setup();
3239         if (rc) {
3240                 CERROR("Failed to setup reclaim thread: rc = %d\n", rc);
3241                 GOTO(out, rc);
3242         }
3243         RETURN(0);
3244
3245  out:
3246         ldlm_cleanup();
3247         RETURN(rc);
3248 }
3249
3250 static int ldlm_cleanup(void)
3251 {
3252         ENTRY;
3253
3254         if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
3255             !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
3256                 CERROR("ldlm still has namespaces; clean these up first.\n");
3257                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
3258                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
3259                 RETURN(-EBUSY);
3260         }
3261
3262         ldlm_reclaim_cleanup();
3263         ldlm_pools_fini();
3264
3265         if (ldlm_state->ldlm_bl_pool != NULL) {
3266                 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
3267
3268                 while (atomic_read(&blp->blp_num_threads) > 0) {
3269                         struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
3270
3271                         init_completion(&blp->blp_comp);
3272
3273                         spin_lock(&blp->blp_lock);
3274                         list_add_tail(&blwi.blwi_entry, &blp->blp_list);
3275                         wake_up(&blp->blp_waitq);
3276                         spin_unlock(&blp->blp_lock);
3277
3278                         wait_for_completion(&blp->blp_comp);
3279                 }
3280
3281                 OBD_FREE(blp, sizeof(*blp));
3282         }
3283
3284         if (ldlm_state->ldlm_cb_service != NULL)
3285                 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
3286 #ifdef HAVE_SERVER_SUPPORT
3287         if (ldlm_state->ldlm_cancel_service != NULL)
3288                 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
3289 #endif
3290
3291         if (ldlm_ns_kset)
3292                 kset_unregister(ldlm_ns_kset);
3293         if (ldlm_svc_kset)
3294                 kset_unregister(ldlm_svc_kset);
3295         if (ldlm_kobj) {
3296                 sysfs_remove_group(ldlm_kobj, &ldlm_attr_group);
3297                 kobject_put(ldlm_kobj);
3298         }
3299
3300         ldlm_debugfs_cleanup();
3301
3302 #ifdef HAVE_SERVER_SUPPORT
3303         if (expired_lock_thread_state != ELT_STOPPED) {
3304                 expired_lock_thread_state = ELT_TERMINATE;
3305                 wake_up(&expired_lock_wait_queue);
3306                 wait_event(expired_lock_wait_queue,
3307                            expired_lock_thread_state == ELT_STOPPED);
3308         }
3309 #endif
3310
3311         OBD_FREE(ldlm_state, sizeof(*ldlm_state));
3312         ldlm_state = NULL;
3313
3314         RETURN(0);
3315 }
3316
3317 int ldlm_init(void)
3318 {
3319         ldlm_resource_slab = kmem_cache_create("ldlm_resources",
3320                                                sizeof(struct ldlm_resource), 0,
3321                                                SLAB_HWCACHE_ALIGN, NULL);
3322         if (ldlm_resource_slab == NULL)
3323                 return -ENOMEM;
3324
3325         ldlm_lock_slab = kmem_cache_create("ldlm_locks",
3326                               sizeof(struct ldlm_lock), 0,
3327                               SLAB_HWCACHE_ALIGN, NULL);
3328         if (ldlm_lock_slab == NULL)
3329                 goto out_resource;
3330
3331         ldlm_interval_slab = kmem_cache_create("interval_node",
3332                                         sizeof(struct ldlm_interval),
3333                                         0, SLAB_HWCACHE_ALIGN, NULL);
3334         if (ldlm_interval_slab == NULL)
3335                 goto out_lock;
3336
3337         ldlm_interval_tree_slab = kmem_cache_create("interval_tree",
3338                         sizeof(struct ldlm_interval_tree) * LCK_MODE_NUM,
3339                         0, SLAB_HWCACHE_ALIGN, NULL);
3340         if (ldlm_interval_tree_slab == NULL)
3341                 goto out_interval;
3342
3343 #ifdef HAVE_SERVER_SUPPORT
3344         ldlm_inodebits_slab = kmem_cache_create("ldlm_ibits_node",
3345                                                 sizeof(struct ldlm_ibits_node),
3346                                                 0, SLAB_HWCACHE_ALIGN, NULL);
3347         if (ldlm_inodebits_slab == NULL)
3348                 goto out_interval_tree;
3349
3350         ldlm_glimpse_work_kmem = kmem_cache_create("ldlm_glimpse_work_kmem",
3351                                         sizeof(struct ldlm_glimpse_work),
3352                                         0, 0, NULL);
3353         if (ldlm_glimpse_work_kmem == NULL)
3354                 goto out_inodebits;
3355 #endif
3356
3357 #if LUSTRE_TRACKS_LOCK_EXP_REFS
3358         class_export_dump_hook = ldlm_dump_export_locks;
3359 #endif
3360         return 0;
3361 #ifdef HAVE_SERVER_SUPPORT
3362 out_inodebits:
3363         kmem_cache_destroy(ldlm_inodebits_slab);
3364 out_interval_tree:
3365         kmem_cache_destroy(ldlm_interval_tree_slab);
3366 #endif
3367 out_interval:
3368         kmem_cache_destroy(ldlm_interval_slab);
3369 out_lock:
3370         kmem_cache_destroy(ldlm_lock_slab);
3371 out_resource:
3372         kmem_cache_destroy(ldlm_resource_slab);
3373
3374         return -ENOMEM;
3375 }
3376
3377 void ldlm_exit(void)
3378 {
3379         if (ldlm_refcount)
3380                 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
3381         kmem_cache_destroy(ldlm_resource_slab);
3382         /*
3383          * ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
3384          * rcu_barrier() to wait all outstanding RCU callbacks to complete,
3385          * so that ldlm_lock_free() get a chance to be called.
3386          */
3387         rcu_barrier();
3388         kmem_cache_destroy(ldlm_lock_slab);
3389         kmem_cache_destroy(ldlm_interval_slab);
3390         kmem_cache_destroy(ldlm_interval_tree_slab);
3391 #ifdef HAVE_SERVER_SUPPORT
3392         kmem_cache_destroy(ldlm_inodebits_slab);
3393         kmem_cache_destroy(ldlm_glimpse_work_kmem);
3394 #endif
3395 }