Whamcloud - gitweb
d3737cb0b6ef2674a0a76e1f891818ad5c77cf89
[fs/lustre-release.git] / lustre / ldlm / ldlm_lockd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_lockd.c
33  *
34  * Author: Peter Braam <braam@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LDLM
39
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <libcfs/libcfs.h>
43 #include <lustre_errno.h>
44 #include <lustre_dlm.h>
45 #include <obd_class.h>
46 #include "ldlm_internal.h"
47
48 static int ldlm_num_threads;
49 module_param(ldlm_num_threads, int, 0444);
50 MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
51
52 static unsigned int ldlm_cpu_bind = 1;
53 module_param(ldlm_cpu_bind, uint, 0444);
54 MODULE_PARM_DESC(ldlm_cpu_bind,
55                  "bind DLM service threads to particular CPU partitions");
56
57 static char *ldlm_cpts;
58 module_param(ldlm_cpts, charp, 0444);
59 MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
60
61 static DEFINE_MUTEX(ldlm_ref_mutex);
62 static int ldlm_refcount;
63
64 struct kobject *ldlm_kobj;
65 struct kset *ldlm_ns_kset;
66 struct kset *ldlm_svc_kset;
67
68 /* LDLM state */
69
70 static struct ldlm_state *ldlm_state;
71
72 /*
73  * timeout for initial callback (AST) reply (bz10399)
74  * Due to having to send a 32 bit time value over the
75  * wire return it as time_t instead of time64_t
76  */
77 static inline time_t ldlm_get_rq_timeout(void)
78 {
79         /* Non-AT value */
80         time_t timeout = min(ldlm_timeout, obd_timeout / 3);
81
82         return timeout < 1 ? 1 : timeout;
83 }
84
85 struct ldlm_bl_pool {
86         spinlock_t blp_lock;
87
88         /*
89          * blp_prio_list is used for callbacks that should be handled
90          * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
91          * see b=13843
92          */
93         struct list_head blp_prio_list;
94
95         /*
96          * blp_list is used for all other callbacks which are likely
97          * to take longer to process.
98          */
99         struct list_head blp_list;
100
101         wait_queue_head_t blp_waitq;
102         struct completion blp_comp;
103         atomic_t blp_num_threads;
104         atomic_t blp_busy_threads;
105         int blp_min_threads;
106         int blp_max_threads;
107 };
108
109 struct ldlm_bl_work_item {
110         struct list_head        blwi_entry;
111         struct ldlm_namespace   *blwi_ns;
112         struct ldlm_lock_desc   blwi_ld;
113         struct ldlm_lock        *blwi_lock;
114         struct list_head        blwi_head;
115         int                     blwi_count;
116         struct completion       blwi_comp;
117         enum ldlm_cancel_flags  blwi_flags;
118         int                     blwi_mem_pressure;
119 };
120
121 #ifdef HAVE_SERVER_SUPPORT
122
123 /**
124  * Protects both waiting_locks_list and expired_lock_thread.
125  */
126 static DEFINE_SPINLOCK(waiting_locks_spinlock); /* BH lock (timer) */
127
128 /**
129  * List for contended locks.
130  *
131  * As soon as a lock is contended, it gets placed on this list and
132  * expected time to get a response is filled in the lock. A special
133  * thread walks the list looking for locks that should be released and
134  * schedules client evictions for those that have not been released in
135  * time.
136  *
137  * All access to it should be under waiting_locks_spinlock.
138  */
139 static LIST_HEAD(waiting_locks_list);
140 static void waiting_locks_callback(TIMER_DATA_TYPE unused);
141 static CFS_DEFINE_TIMER(waiting_locks_timer, waiting_locks_callback, 0, 0);
142
143 enum elt_state {
144         ELT_STOPPED,
145         ELT_READY,
146         ELT_TERMINATE,
147 };
148
149 static DECLARE_WAIT_QUEUE_HEAD(expired_lock_wait_queue);
150 static enum elt_state expired_lock_thread_state = ELT_STOPPED;
151 static int expired_lock_dump;
152 static LIST_HEAD(expired_lock_list);
153
154 static int ldlm_lock_busy(struct ldlm_lock *lock);
155 static int ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t timeout);
156 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds);
157
158 static inline int have_expired_locks(void)
159 {
160         int need_to_run;
161
162         ENTRY;
163         spin_lock_bh(&waiting_locks_spinlock);
164         need_to_run = !list_empty(&expired_lock_list);
165         spin_unlock_bh(&waiting_locks_spinlock);
166
167         RETURN(need_to_run);
168 }
169
170 /**
171  * Check expired lock list for expired locks and time them out.
172  */
173 static int expired_lock_main(void *arg)
174 {
175         struct list_head *expired = &expired_lock_list;
176         struct l_wait_info lwi = { 0 };
177         int do_dump;
178
179         ENTRY;
180
181         expired_lock_thread_state = ELT_READY;
182         wake_up(&expired_lock_wait_queue);
183
184         while (1) {
185                 l_wait_event(expired_lock_wait_queue,
186                              have_expired_locks() ||
187                              expired_lock_thread_state == ELT_TERMINATE,
188                              &lwi);
189
190                 spin_lock_bh(&waiting_locks_spinlock);
191                 if (expired_lock_dump) {
192                         spin_unlock_bh(&waiting_locks_spinlock);
193
194                         /* from waiting_locks_callback, but not in timer */
195                         libcfs_debug_dumplog();
196
197                         spin_lock_bh(&waiting_locks_spinlock);
198                         expired_lock_dump = 0;
199                 }
200
201                 do_dump = 0;
202
203                 while (!list_empty(expired)) {
204                         struct obd_export *export;
205                         struct ldlm_lock *lock;
206
207                         lock = list_entry(expired->next, struct ldlm_lock,
208                                           l_pending_chain);
209                         if ((void *)lock < LP_POISON + PAGE_SIZE &&
210                             (void *)lock >= LP_POISON) {
211                                 spin_unlock_bh(&waiting_locks_spinlock);
212                                 CERROR("free lock on elt list %p\n", lock);
213                                 LBUG();
214                         }
215                         list_del_init(&lock->l_pending_chain);
216                         if ((void *)lock->l_export <
217                              LP_POISON + PAGE_SIZE &&
218                             (void *)lock->l_export >= LP_POISON) {
219                                 CERROR("lock with free export on elt list %p\n",
220                                        lock->l_export);
221                                 lock->l_export = NULL;
222                                 LDLM_ERROR(lock, "free export");
223                                 /*
224                                  * release extra ref grabbed by
225                                  * ldlm_add_waiting_lock() or
226                                  * ldlm_failed_ast()
227                                  */
228                                 LDLM_LOCK_RELEASE(lock);
229                                 continue;
230                         }
231
232                         if (ldlm_is_destroyed(lock)) {
233                                 /*
234                                  * release the lock refcount where
235                                  * waiting_locks_callback() founds
236                                  */
237                                 LDLM_LOCK_RELEASE(lock);
238                                 continue;
239                         }
240                         export = class_export_lock_get(lock->l_export, lock);
241                         spin_unlock_bh(&waiting_locks_spinlock);
242
243                         /* Check if we need to prolong timeout */
244                         if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
245                             lock->l_callback_timeout != 0 && /* not AST error */
246                             ldlm_lock_busy(lock)) {
247                                 LDLM_DEBUG(lock, "prolong the busy lock");
248                                 lock_res_and_lock(lock);
249                                 ldlm_add_waiting_lock(lock,
250                                                 ldlm_bl_timeout(lock) >> 1);
251                                 unlock_res_and_lock(lock);
252                         } else {
253                                 spin_lock_bh(&export->exp_bl_list_lock);
254                                 list_del_init(&lock->l_exp_list);
255                                 spin_unlock_bh(&export->exp_bl_list_lock);
256
257                                 LDLM_ERROR(lock,
258                                            "lock callback timer expired after %llds: evicting client at %s ",
259                                            ktime_get_real_seconds() -
260                                            lock->l_blast_sent,
261                                            obd_export_nid2str(export));
262                                 ldlm_lock_to_ns(lock)->ns_timeouts++;
263                                 do_dump++;
264                                 class_fail_export(export);
265                         }
266                         class_export_lock_put(export, lock);
267                         /*
268                          * release extra ref grabbed by ldlm_add_waiting_lock()
269                          * or ldlm_failed_ast()
270                          */
271                         LDLM_LOCK_RELEASE(lock);
272
273                         spin_lock_bh(&waiting_locks_spinlock);
274                 }
275                 spin_unlock_bh(&waiting_locks_spinlock);
276
277                 if (do_dump && obd_dump_on_eviction) {
278                         CERROR("dump the log upon eviction\n");
279                         libcfs_debug_dumplog();
280                 }
281
282                 if (expired_lock_thread_state == ELT_TERMINATE)
283                         break;
284         }
285
286         expired_lock_thread_state = ELT_STOPPED;
287         wake_up(&expired_lock_wait_queue);
288         RETURN(0);
289 }
290
291 /**
292  * Check if there is a request in the export request list
293  * which prevents the lock canceling.
294  */
295 static int ldlm_lock_busy(struct ldlm_lock *lock)
296 {
297         struct ptlrpc_request *req;
298         int match = 0;
299
300         ENTRY;
301
302         if (lock->l_export == NULL)
303                 return 0;
304
305         spin_lock(&lock->l_export->exp_rpc_lock);
306         list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
307                                 rq_exp_list) {
308                 if (req->rq_ops->hpreq_lock_match) {
309                         match = req->rq_ops->hpreq_lock_match(req, lock);
310                         if (match)
311                                 break;
312                 }
313         }
314         spin_unlock(&lock->l_export->exp_rpc_lock);
315         RETURN(match);
316 }
317
318 /* This is called from within a timer interrupt and cannot schedule */
319 static void waiting_locks_callback(TIMER_DATA_TYPE unused)
320 {
321         struct ldlm_lock *lock;
322         int need_dump = 0;
323
324         spin_lock_bh(&waiting_locks_spinlock);
325         while (!list_empty(&waiting_locks_list)) {
326                 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
327                                   l_pending_chain);
328                 if (lock->l_callback_timeout > ktime_get_seconds() ||
329                     lock->l_req_mode == LCK_GROUP)
330                         break;
331
332                 /*
333                  * no needs to take an extra ref on the lock since it was in
334                  * the waiting_locks_list and ldlm_add_waiting_lock()
335                  * already grabbed a ref
336                  */
337                 list_del(&lock->l_pending_chain);
338                 list_add(&lock->l_pending_chain, &expired_lock_list);
339                 need_dump = 1;
340         }
341
342         if (!list_empty(&expired_lock_list)) {
343                 if (obd_dump_on_timeout && need_dump)
344                         expired_lock_dump = __LINE__;
345
346                 wake_up(&expired_lock_wait_queue);
347         }
348
349         /*
350          * Make sure the timer will fire again if we have any locks
351          * left.
352          */
353         if (!list_empty(&waiting_locks_list)) {
354                 unsigned long timeout_jiffies;
355
356                 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
357                                   l_pending_chain);
358                 timeout_jiffies = cfs_time_seconds(lock->l_callback_timeout);
359                 mod_timer(&waiting_locks_timer, timeout_jiffies);
360         }
361         spin_unlock_bh(&waiting_locks_spinlock);
362 }
363
364 /**
365  * Add lock to the list of contended locks.
366  *
367  * Indicate that we're waiting for a client to call us back cancelling a given
368  * lock.  We add it to the pending-callback chain, and schedule the lock-timeout
369  * timer to fire appropriately.  (We round up to the next second, to avoid
370  * floods of timer firings during periods of high lock contention and traffic).
371  * As done by ldlm_add_waiting_lock(), the caller must grab a lock reference
372  * if it has been added to the waiting list (1 is returned).
373  *
374  * Called with the namespace lock held.
375  */
376 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds)
377 {
378         unsigned long timeout_jiffies;
379         time64_t timeout;
380
381         if (!list_empty(&lock->l_pending_chain))
382                 return 0;
383
384         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
385             OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
386                 seconds = 1;
387
388         timeout = ktime_get_seconds() + seconds;
389         if (likely(timeout > lock->l_callback_timeout))
390                 lock->l_callback_timeout = timeout;
391
392         timeout_jiffies = cfs_time_seconds(lock->l_callback_timeout);
393
394         if (time_before(timeout_jiffies, waiting_locks_timer.expires) ||
395             !timer_pending(&waiting_locks_timer))
396                 mod_timer(&waiting_locks_timer, timeout_jiffies);
397
398         /*
399          * if the new lock has a shorter timeout than something earlier on
400          * the list, we'll wait the longer amount of time; no big deal.
401          */
402         /* FIFO */
403         list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
404         return 1;
405 }
406
407 static void ldlm_add_blocked_lock(struct ldlm_lock *lock)
408 {
409         spin_lock_bh(&lock->l_export->exp_bl_list_lock);
410         if (list_empty(&lock->l_exp_list)) {
411                 if (!ldlm_is_granted(lock))
412                         list_add_tail(&lock->l_exp_list,
413                                       &lock->l_export->exp_bl_list);
414                 else
415                         list_add(&lock->l_exp_list,
416                                  &lock->l_export->exp_bl_list);
417         }
418         spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
419
420         /*
421          * A blocked lock is added. Adjust the position in
422          * the stale list if the export is in the list.
423          * If export is stale and not in the list - it is being
424          * processed and will be placed on the right position
425          * on obd_stale_export_put().
426          */
427         if (!list_empty(&lock->l_export->exp_stale_list))
428                 obd_stale_export_adjust(lock->l_export);
429 }
430
431 static int ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
432 {
433         int ret;
434
435         /* NB: must be called with hold of lock_res_and_lock() */
436         LASSERT(ldlm_is_res_locked(lock));
437         LASSERT(!ldlm_is_cancel_on_block(lock));
438
439         /*
440          * Do not put cross-MDT lock in the waiting list, since we
441          * will not evict it due to timeout for now
442          */
443         if (lock->l_export != NULL &&
444             (exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS))
445                 return 0;
446
447         spin_lock_bh(&waiting_locks_spinlock);
448         if (ldlm_is_cancel(lock)) {
449                 spin_unlock_bh(&waiting_locks_spinlock);
450                 return 0;
451         }
452
453         if (ldlm_is_destroyed(lock)) {
454                 static time64_t next;
455
456                 spin_unlock_bh(&waiting_locks_spinlock);
457                 LDLM_ERROR(lock, "not waiting on destroyed lock (b=5653)");
458                 if (ktime_get_seconds() > next) {
459                         next = ktime_get_seconds() + 14400;
460                         libcfs_debug_dumpstack(NULL);
461                 }
462                 return 0;
463         }
464
465         ldlm_set_waited(lock);
466         lock->l_blast_sent = ktime_get_real_seconds();
467         ret = __ldlm_add_waiting_lock(lock, timeout);
468         if (ret) {
469                 /*
470                  * grab ref on the lock if it has been added to the
471                  * waiting list
472                  */
473                 LDLM_LOCK_GET(lock);
474         }
475         spin_unlock_bh(&waiting_locks_spinlock);
476
477         if (ret)
478                 ldlm_add_blocked_lock(lock);
479
480         LDLM_DEBUG(lock, "%sadding to wait list(timeout: %lld, AT: %s)",
481                    ret == 0 ? "not re-" : "", timeout,
482                    AT_OFF ? "off" : "on");
483         return ret;
484 }
485
486 /**
487  * Remove a lock from the pending list, likely because it had its cancellation
488  * callback arrive without incident.  This adjusts the lock-timeout timer if
489  * needed.  Returns 0 if the lock wasn't pending after all, 1 if it was.
490  * As done by ldlm_del_waiting_lock(), the caller must release the lock
491  * reference when the lock is removed from any list (1 is returned).
492  *
493  * Called with namespace lock held.
494  */
495 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
496 {
497         struct list_head *list_next;
498
499         if (list_empty(&lock->l_pending_chain))
500                 return 0;
501
502         list_next = lock->l_pending_chain.next;
503         if (lock->l_pending_chain.prev == &waiting_locks_list) {
504                 /* Removing the head of the list, adjust timer. */
505                 if (list_next == &waiting_locks_list) {
506                         /* No more, just cancel. */
507                         del_timer(&waiting_locks_timer);
508                 } else {
509                         struct ldlm_lock *next;
510
511                         next = list_entry(list_next, struct ldlm_lock,
512                                           l_pending_chain);
513                         mod_timer(&waiting_locks_timer,
514                                   cfs_time_seconds(next->l_callback_timeout));
515                 }
516         }
517         list_del_init(&lock->l_pending_chain);
518
519         return 1;
520 }
521
522 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
523 {
524         int ret;
525
526         if (lock->l_export == NULL) {
527                 /* We don't have a "waiting locks list" on clients. */
528                 CDEBUG(D_DLMTRACE, "Client lock %p : no-op\n", lock);
529                 return 0;
530         }
531
532         spin_lock_bh(&waiting_locks_spinlock);
533         ret = __ldlm_del_waiting_lock(lock);
534         ldlm_clear_waited(lock);
535         spin_unlock_bh(&waiting_locks_spinlock);
536
537         /* remove the lock out of export blocking list */
538         spin_lock_bh(&lock->l_export->exp_bl_list_lock);
539         list_del_init(&lock->l_exp_list);
540         spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
541
542         if (ret) {
543                 /*
544                  * release lock ref if it has indeed been removed
545                  * from a list
546                  */
547                 LDLM_LOCK_RELEASE(lock);
548         }
549
550         LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
551         return ret;
552 }
553
554 /**
555  * Prolong the contended lock waiting time.
556  *
557  * Called with namespace lock held.
558  */
559 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
560 {
561         if (lock->l_export == NULL) {
562                 /* We don't have a "waiting locks list" on clients. */
563                 LDLM_DEBUG(lock, "client lock: no-op");
564                 return 0;
565         }
566
567         if (exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS) {
568                 /* We don't have a "waiting locks list" on OSP. */
569                 LDLM_DEBUG(lock, "MDS-MDS lock: no-op");
570                 return 0;
571         }
572
573         spin_lock_bh(&waiting_locks_spinlock);
574
575         if (list_empty(&lock->l_pending_chain)) {
576                 spin_unlock_bh(&waiting_locks_spinlock);
577                 LDLM_DEBUG(lock, "wasn't waiting");
578                 return 0;
579         }
580
581         /*
582          * we remove/add the lock to the waiting list, so no needs to
583          * release/take a lock reference
584          */
585         __ldlm_del_waiting_lock(lock);
586         __ldlm_add_waiting_lock(lock, timeout);
587         spin_unlock_bh(&waiting_locks_spinlock);
588
589         LDLM_DEBUG(lock, "refreshed");
590         return 1;
591 }
592 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
593
594 #else /* HAVE_SERVER_SUPPORT */
595
596 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
597 {
598         RETURN(0);
599 }
600
601 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
602 {
603         RETURN(0);
604 }
605
606 #endif /* !HAVE_SERVER_SUPPORT */
607
608 #ifdef HAVE_SERVER_SUPPORT
609
610 /**
611  * Calculate the per-export Blocking timeout (covering BL AST, data flush,
612  * lock cancel, and their replies). Used for lock callback timeout and AST
613  * re-send period.
614  *
615  * \param[in] lock        lock which is getting the blocking callback
616  *
617  * \retval            timeout in seconds to wait for the client reply
618  */
619 time64_t ldlm_bl_timeout(struct ldlm_lock *lock)
620 {
621         time64_t timeout;
622
623         if (AT_OFF)
624                 return obd_timeout / 2;
625
626         /*
627          * Since these are non-updating timeouts, we should be conservative.
628          * Take more than usually, 150%
629          * It would be nice to have some kind of "early reply" mechanism for
630          * lock callbacks too...
631          */
632         timeout = at_get(&lock->l_export->exp_bl_lock_at);
633         return max(timeout + (timeout >> 1), (time64_t)ldlm_enqueue_min);
634 }
635 EXPORT_SYMBOL(ldlm_bl_timeout);
636
637 /**
638  * Perform lock cleanup if AST sending failed.
639  */
640 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
641                             const char *ast_type)
642 {
643         LCONSOLE_ERROR_MSG(0x138,
644                            "%s: A client on nid %s was evicted due to a lock %s callback time out: rc %d\n",
645                            lock->l_export->exp_obd->obd_name,
646                            obd_export_nid2str(lock->l_export), ast_type, rc);
647
648         if (obd_dump_on_timeout)
649                 libcfs_debug_dumplog();
650         spin_lock_bh(&waiting_locks_spinlock);
651         if (__ldlm_del_waiting_lock(lock) == 0)
652                 /*
653                  * the lock was not in any list, grab an extra ref before adding
654                  * the lock to the expired list
655                  */
656                 LDLM_LOCK_GET(lock);
657         lock->l_callback_timeout = 0; /* differentiate it from expired locks */
658         list_add(&lock->l_pending_chain, &expired_lock_list);
659         wake_up(&expired_lock_wait_queue);
660         spin_unlock_bh(&waiting_locks_spinlock);
661 }
662
663 /**
664  * Perform lock cleanup if AST reply came with error.
665  */
666 static int ldlm_handle_ast_error(const struct lu_env *env,
667                                  struct ldlm_lock *lock,
668                                  struct ptlrpc_request *req, int rc,
669                                  const char *ast_type)
670 {
671         struct lnet_process_id peer = req->rq_import->imp_connection->c_peer;
672
673         if (!req->rq_replied || (rc && rc != -EINVAL)) {
674                 if (ldlm_is_cancel(lock)) {
675                         LDLM_DEBUG(lock,
676                                    "%s AST (req@%p x%llu) timeout from nid %s, but cancel was received (AST reply lost?)",
677                                    ast_type, req, req->rq_xid,
678                                    libcfs_nid2str(peer.nid));
679                         ldlm_lock_cancel(lock);
680                         rc = -ERESTART;
681                 } else if (rc == -ENODEV || rc == -ESHUTDOWN ||
682                            (rc == -EIO &&
683                             req->rq_import->imp_state == LUSTRE_IMP_CLOSED)) {
684                         /*
685                          * Upon umount process the AST fails because cannot be
686                          * sent. This shouldn't lead to the client eviction.
687                          * -ENODEV error is returned by ptl_send_rpc() for
688                          *  new request in such import.
689                          * -SHUTDOWN is returned by ptlrpc_import_delay_req()
690                          *  if imp_invalid is set or obd_no_recov.
691                          * Meanwhile there is also check for LUSTRE_IMP_CLOSED
692                          * in ptlrpc_import_delay_req() as well with -EIO code.
693                          * In all such cases errors are ignored.
694                          */
695                         LDLM_DEBUG(lock,
696                                    "%s AST can't be sent due to a server %s failure or umount process: rc = %d\n",
697                                     ast_type,
698                                      req->rq_import->imp_obd->obd_name, rc);
699                 } else {
700                         LDLM_ERROR(lock,
701                                    "client (nid %s) %s %s AST (req@%p x%llu status %d rc %d), evict it",
702                                    libcfs_nid2str(peer.nid),
703                                    req->rq_replied ? "returned error from" :
704                                    "failed to reply to",
705                                    ast_type, req, req->rq_xid,
706                                    (req->rq_repmsg != NULL) ?
707                                    lustre_msg_get_status(req->rq_repmsg) : 0,
708                                    rc);
709                         ldlm_failed_ast(lock, rc, ast_type);
710                 }
711                 return rc;
712         }
713
714         if (rc == -EINVAL) {
715                 struct ldlm_resource *res = lock->l_resource;
716
717                 LDLM_DEBUG(lock,
718                            "client (nid %s) returned %d from %s AST (req@%p x%llu) - normal race",
719                            libcfs_nid2str(peer.nid),
720                            req->rq_repmsg ?
721                            lustre_msg_get_status(req->rq_repmsg) : -1,
722                            ast_type, req, req->rq_xid);
723                 if (res) {
724                         /*
725                          * update lvbo to return proper attributes.
726                          * see b=23174
727                          */
728                         ldlm_resource_getref(res);
729                         ldlm_lvbo_update(env, res, lock, NULL, 1);
730                         ldlm_resource_putref(res);
731                 }
732                 ldlm_lock_cancel(lock);
733                 rc = -ERESTART;
734         }
735
736         return rc;
737 }
738
739 static int ldlm_cb_interpret(const struct lu_env *env,
740                              struct ptlrpc_request *req, void *args, int rc)
741 {
742         struct ldlm_cb_async_args *ca = args;
743         struct ldlm_lock *lock = ca->ca_lock;
744         struct ldlm_cb_set_arg *arg  = ca->ca_set_arg;
745
746         ENTRY;
747
748         LASSERT(lock != NULL);
749
750         switch (arg->type) {
751         case LDLM_GL_CALLBACK:
752                 /*
753                  * Update the LVB from disk if the AST failed
754                  * (this is a legal race)
755                  *
756                  * - Glimpse callback of local lock just returns
757                  *   -ELDLM_NO_LOCK_DATA.
758                  * - Glimpse callback of remote lock might return
759                  *   -ELDLM_NO_LOCK_DATA when inode is cleared. LU-274
760                  */
761                 if (unlikely(arg->gl_interpret_reply)) {
762                         rc = arg->gl_interpret_reply(env, req, args, rc);
763                 } else if (rc == -ELDLM_NO_LOCK_DATA) {
764                         LDLM_DEBUG(lock,
765                                    "lost race - client has a lock but no inode");
766                         ldlm_lvbo_update(env, lock->l_resource, lock, NULL, 1);
767                 } else if (rc != 0) {
768                         rc = ldlm_handle_ast_error(env, lock, req,
769                                                    rc, "glimpse");
770                 } else {
771                         rc = ldlm_lvbo_update(env, lock->l_resource,
772                                               lock, req, 1);
773                 }
774                 break;
775         case LDLM_BL_CALLBACK:
776                 if (rc != 0)
777                         rc = ldlm_handle_ast_error(env, lock, req,
778                                                    rc, "blocking");
779                 break;
780         case LDLM_CP_CALLBACK:
781                 if (rc != 0)
782                         rc = ldlm_handle_ast_error(env, lock, req,
783                                                    rc, "completion");
784                 break;
785         default:
786                 LDLM_ERROR(lock, "invalid opcode for lock callback %d",
787                            arg->type);
788                 LBUG();
789         }
790
791         /* release extra reference taken in ldlm_ast_fini() */
792         LDLM_LOCK_RELEASE(lock);
793
794         if (rc == -ERESTART)
795                 atomic_inc(&arg->restart);
796
797         RETURN(0);
798 }
799
800 static void ldlm_update_resend(struct ptlrpc_request *req, void *data)
801 {
802         struct ldlm_cb_async_args *ca = data;
803         struct ldlm_lock *lock = ca->ca_lock;
804
805         ldlm_refresh_waiting_lock(lock, ldlm_bl_timeout(lock));
806 }
807
808 static inline int ldlm_ast_fini(struct ptlrpc_request *req,
809                                 struct ldlm_cb_set_arg *arg,
810                                 struct ldlm_lock *lock,
811                                 int instant_cancel)
812 {
813         int rc = 0;
814
815         ENTRY;
816
817         if (unlikely(instant_cancel)) {
818                 rc = ptl_send_rpc(req, 1);
819                 ptlrpc_req_finished(req);
820                 if (rc == 0)
821                         atomic_inc(&arg->restart);
822         } else {
823                 LDLM_LOCK_GET(lock);
824                 ptlrpc_set_add_req(arg->set, req);
825         }
826
827         RETURN(rc);
828 }
829
830 /**
831  * Check if there are requests in the export request list which prevent
832  * the lock canceling and make these requests high priority ones.
833  */
834 static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
835 {
836         struct ptlrpc_request *req;
837
838         ENTRY;
839
840         if (lock->l_export == NULL) {
841                 LDLM_DEBUG(lock, "client lock: no-op");
842                 RETURN_EXIT;
843         }
844
845         spin_lock(&lock->l_export->exp_rpc_lock);
846         list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
847                             rq_exp_list) {
848                 /*
849                  * Do not process requests that were not yet added to there
850                  * incoming queue or were already removed from there for
851                  * processing. We evaluate ptlrpc_nrs_req_can_move() without
852                  * holding svcpt->scp_req_lock, and then redo the check with
853                  * the lock held once we need to obtain a reliable result.
854                  */
855                 if (ptlrpc_nrs_req_can_move(req) &&
856                     req->rq_ops->hpreq_lock_match &&
857                     req->rq_ops->hpreq_lock_match(req, lock))
858                         ptlrpc_nrs_req_hp_move(req);
859         }
860         spin_unlock(&lock->l_export->exp_rpc_lock);
861         EXIT;
862 }
863
864 /**
865  * ->l_blocking_ast() method for server-side locks. This is invoked when newly
866  * enqueued server lock conflicts with given one.
867  *
868  * Sends blocking AST RPC to the client owning that lock; arms timeout timer
869  * to wait for client response.
870  */
871 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
872                              struct ldlm_lock_desc *desc,
873                              void *data, int flag)
874 {
875         struct ldlm_cb_async_args *ca;
876         struct ldlm_cb_set_arg *arg = data;
877         struct ldlm_request *body;
878         struct ptlrpc_request  *req;
879         int instant_cancel = 0;
880         int rc = 0;
881
882         ENTRY;
883
884         if (flag == LDLM_CB_CANCELING)
885                 /* Don't need to do anything here. */
886                 RETURN(0);
887
888         if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_SRV_BL_AST)) {
889                 LDLM_DEBUG(lock, "dropping BL AST");
890                 RETURN(0);
891         }
892
893         LASSERT(lock);
894         LASSERT(data != NULL);
895         if (lock->l_export->exp_obd->obd_recovering != 0)
896                 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
897
898         ldlm_lock_reorder_req(lock);
899
900         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
901                                         &RQF_LDLM_BL_CALLBACK,
902                                         LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
903         if (req == NULL)
904                 RETURN(-ENOMEM);
905
906         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
907         ca = ptlrpc_req_async_args(req);
908         ca->ca_set_arg = arg;
909         ca->ca_lock = lock;
910
911         req->rq_interpret_reply = ldlm_cb_interpret;
912
913         lock_res_and_lock(lock);
914         if (ldlm_is_destroyed(lock)) {
915                 /* What's the point? */
916                 unlock_res_and_lock(lock);
917                 ptlrpc_req_finished(req);
918                 RETURN(0);
919         }
920
921         if (!ldlm_is_granted(lock)) {
922                 /*
923                  * this blocking AST will be communicated as part of the
924                  * completion AST instead
925                  */
926                 ldlm_add_blocked_lock(lock);
927                 ldlm_set_waited(lock);
928                 unlock_res_and_lock(lock);
929
930                 ptlrpc_req_finished(req);
931                 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
932                 RETURN(0);
933         }
934
935         if (ldlm_is_cancel_on_block(lock))
936                 instant_cancel = 1;
937
938         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
939         body->lock_handle[0] = lock->l_remote_handle;
940         body->lock_desc = *desc;
941         body->lock_flags |= ldlm_flags_to_wire(lock->l_flags & LDLM_FL_AST_MASK);
942
943         LDLM_DEBUG(lock, "server preparing blocking AST");
944
945         ptlrpc_request_set_replen(req);
946         ldlm_set_cbpending(lock);
947         if (instant_cancel) {
948                 unlock_res_and_lock(lock);
949                 ldlm_lock_cancel(lock);
950
951                 req->rq_no_resend = 1;
952         } else {
953                 LASSERT(ldlm_is_granted(lock));
954                 ldlm_add_waiting_lock(lock, ldlm_bl_timeout(lock));
955                 unlock_res_and_lock(lock);
956
957                 /* Do not resend after lock callback timeout */
958                 req->rq_delay_limit = ldlm_bl_timeout(lock);
959                 req->rq_resend_cb = ldlm_update_resend;
960         }
961
962         req->rq_send_state = LUSTRE_IMP_FULL;
963         /* ptlrpc_request_alloc_pack already set timeout */
964         if (AT_OFF)
965                 req->rq_timeout = ldlm_get_rq_timeout();
966
967         if (lock->l_export && lock->l_export->exp_nid_stats &&
968             lock->l_export->exp_nid_stats->nid_ldlm_stats)
969                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
970                                      LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
971
972         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
973
974         RETURN(rc);
975 }
976
977 /**
978  * ->l_completion_ast callback for a remote lock in server namespace.
979  *
980  *  Sends AST to the client notifying it of lock granting.  If initial
981  *  lock response was not sent yet, instead of sending another RPC, just
982  *  mark the lock as granted and client will understand
983  */
984 int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
985 {
986         struct ldlm_cb_set_arg *arg = data;
987         struct ldlm_request *body;
988         struct ptlrpc_request *req;
989         struct ldlm_cb_async_args *ca;
990         int instant_cancel = 0;
991         int rc = 0;
992         int lvb_len;
993
994         ENTRY;
995
996         LASSERT(lock != NULL);
997         LASSERT(data != NULL);
998
999         if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_SRV_CP_AST)) {
1000                 LDLM_DEBUG(lock, "dropping CP AST");
1001                 RETURN(0);
1002         }
1003
1004         req = ptlrpc_request_alloc(lock->l_export->exp_imp_reverse,
1005                                    &RQF_LDLM_CP_CALLBACK);
1006         if (req == NULL)
1007                 RETURN(-ENOMEM);
1008
1009         /* server namespace, doesn't need lock */
1010         lvb_len = ldlm_lvbo_size(lock);
1011         /*
1012          * LU-3124 & LU-2187: to not return layout in completion AST because
1013          * it may deadlock for LU-2187, or client may not have enough space
1014          * for large layout. The layout will be returned to client with an
1015          * extra RPC to fetch xattr.lov
1016          */
1017         if (ldlm_has_layout(lock))
1018                 lvb_len = 0;
1019
1020         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT, lvb_len);
1021         rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
1022         if (rc) {
1023                 ptlrpc_request_free(req);
1024                 RETURN(rc);
1025         }
1026
1027         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
1028         ca = ptlrpc_req_async_args(req);
1029         ca->ca_set_arg = arg;
1030         ca->ca_lock = lock;
1031
1032         req->rq_interpret_reply = ldlm_cb_interpret;
1033         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1034
1035         body->lock_handle[0] = lock->l_remote_handle;
1036         body->lock_flags = ldlm_flags_to_wire(flags);
1037         ldlm_lock2desc(lock, &body->lock_desc);
1038         if (lvb_len > 0) {
1039                 void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
1040                 const struct lu_env *env = NULL;
1041
1042                 if (req->rq_svc_thread)
1043                         env = req->rq_svc_thread->t_env;
1044
1045                 lvb_len = ldlm_lvbo_fill(env, lock, lvb, &lvb_len);
1046                 if (lvb_len < 0) {
1047                         /*
1048                          * We still need to send the RPC to wake up the blocked
1049                          * enqueue thread on the client.
1050                          *
1051                          * Consider old client, there is no better way to notify
1052                          * the failure, just zero-sized the LVB, then the client
1053                          * will fail out as "-EPROTO".
1054                          */
1055                         req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB, 0,
1056                                            RCL_CLIENT);
1057                         instant_cancel = 1;
1058                 } else {
1059                         req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB, lvb_len,
1060                                            RCL_CLIENT);
1061                 }
1062         }
1063
1064         LDLM_DEBUG(lock, "server preparing completion AST");
1065
1066         ptlrpc_request_set_replen(req);
1067
1068         req->rq_send_state = LUSTRE_IMP_FULL;
1069         /* ptlrpc_request_pack already set timeout */
1070         if (AT_OFF)
1071                 req->rq_timeout = ldlm_get_rq_timeout();
1072
1073         /* We only send real blocking ASTs after the lock is granted */
1074         lock_res_and_lock(lock);
1075         if (ldlm_is_ast_sent(lock)) {
1076                 body->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
1077                 /* Copy AST flags like LDLM_FL_DISCARD_DATA. */
1078                 body->lock_flags |= ldlm_flags_to_wire(lock->l_flags &
1079                                                        LDLM_FL_AST_MASK);
1080
1081                 /*
1082                  * We might get here prior to ldlm_handle_enqueue setting
1083                  * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
1084                  * into waiting list, but this is safe and similar code in
1085                  * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
1086                  * that would not only cancel the lock, but will also remove
1087                  * it from waiting list
1088                  */
1089                 if (ldlm_is_cancel_on_block(lock)) {
1090                         unlock_res_and_lock(lock);
1091                         ldlm_lock_cancel(lock);
1092
1093                         instant_cancel = 1;
1094                         req->rq_no_resend = 1;
1095
1096                         lock_res_and_lock(lock);
1097                 } else {
1098                         /* start the lock-timeout clock */
1099                         ldlm_add_waiting_lock(lock, ldlm_bl_timeout(lock));
1100                         /* Do not resend after lock callback timeout */
1101                         req->rq_delay_limit = ldlm_bl_timeout(lock);
1102                         req->rq_resend_cb = ldlm_update_resend;
1103                 }
1104         }
1105         unlock_res_and_lock(lock);
1106
1107         if (lock->l_export && lock->l_export->exp_nid_stats &&
1108             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1109                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1110                                      LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
1111
1112         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
1113
1114         RETURN(lvb_len < 0 ? lvb_len : rc);
1115 }
1116
1117 /**
1118  * Server side ->l_glimpse_ast handler for client locks.
1119  *
1120  * Sends glimpse AST to the client and waits for reply. Then updates
1121  * lvbo with the result.
1122  */
1123 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
1124 {
1125         struct ldlm_cb_set_arg *arg = data;
1126         struct ldlm_request *body;
1127         struct ptlrpc_request *req;
1128         struct ldlm_cb_async_args *ca;
1129         int rc;
1130         struct req_format *req_fmt;
1131
1132         ENTRY;
1133
1134         LASSERT(lock != NULL);
1135
1136         if (arg->gl_desc != NULL)
1137                 /* There is a glimpse descriptor to pack */
1138                 req_fmt = &RQF_LDLM_GL_CALLBACK_DESC;
1139         else
1140                 req_fmt = &RQF_LDLM_GL_CALLBACK;
1141
1142         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
1143                                         req_fmt, LUSTRE_DLM_VERSION,
1144                                         LDLM_GL_CALLBACK);
1145
1146         if (req == NULL)
1147                 RETURN(-ENOMEM);
1148
1149         if (arg->gl_desc != NULL) {
1150                 /* copy the GL descriptor */
1151                 union ldlm_gl_desc      *desc;
1152
1153                 desc = req_capsule_client_get(&req->rq_pill, &RMF_DLM_GL_DESC);
1154                 *desc = *arg->gl_desc;
1155         }
1156
1157         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1158         body->lock_handle[0] = lock->l_remote_handle;
1159         ldlm_lock2desc(lock, &body->lock_desc);
1160
1161         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
1162         ca = ptlrpc_req_async_args(req);
1163         ca->ca_set_arg = arg;
1164         ca->ca_lock = lock;
1165
1166         /* server namespace, doesn't need lock */
1167         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
1168                              ldlm_lvbo_size(lock));
1169         ptlrpc_request_set_replen(req);
1170
1171         req->rq_send_state = LUSTRE_IMP_FULL;
1172         /* ptlrpc_request_alloc_pack already set timeout */
1173         if (AT_OFF)
1174                 req->rq_timeout = ldlm_get_rq_timeout();
1175
1176         req->rq_interpret_reply = ldlm_cb_interpret;
1177
1178         if (lock->l_export && lock->l_export->exp_nid_stats) {
1179                 struct nid_stat *nid_stats = lock->l_export->exp_nid_stats;
1180
1181                 lprocfs_counter_incr(nid_stats->nid_ldlm_stats,
1182                                      LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
1183         }
1184
1185         rc = ldlm_ast_fini(req, arg, lock, 0);
1186
1187         RETURN(rc);
1188 }
1189 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
1190
1191 int ldlm_glimpse_locks(struct ldlm_resource *res,
1192                        struct list_head *gl_work_list)
1193 {
1194         int rc;
1195
1196         ENTRY;
1197
1198         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), gl_work_list,
1199                                LDLM_WORK_GL_AST);
1200         if (rc == -ERESTART)
1201                 ldlm_reprocess_all(res);
1202
1203         RETURN(rc);
1204 }
1205 EXPORT_SYMBOL(ldlm_glimpse_locks);
1206
1207 /* return LDLM lock associated with a lock callback request */
1208 struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req)
1209 {
1210         struct ldlm_cb_async_args *ca;
1211         struct ldlm_lock *lock;
1212
1213         ENTRY;
1214
1215         ca = ptlrpc_req_async_args(req);
1216         lock = ca->ca_lock;
1217         if (lock == NULL)
1218                 RETURN(ERR_PTR(-EFAULT));
1219
1220         RETURN(lock);
1221 }
1222 EXPORT_SYMBOL(ldlm_request_lock);
1223
1224 /**
1225  * Main server-side entry point into LDLM for enqueue. This is called by ptlrpc
1226  * service threads to carry out client lock enqueueing requests.
1227  */
1228 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
1229                          struct ptlrpc_request *req,
1230                          const struct ldlm_request *dlm_req,
1231                          const struct ldlm_callback_suite *cbs)
1232 {
1233         struct ldlm_reply *dlm_rep;
1234         __u64 flags;
1235         enum ldlm_error err = ELDLM_OK;
1236         struct ldlm_lock *lock = NULL;
1237         void *cookie = NULL;
1238         int rc = 0;
1239         struct ldlm_resource *res = NULL;
1240         const struct lu_env *env = req->rq_svc_thread->t_env;
1241
1242         ENTRY;
1243
1244         LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
1245
1246         ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF, LATF_SKIP);
1247         flags = ldlm_flags_from_wire(dlm_req->lock_flags);
1248
1249         LASSERT(req->rq_export);
1250
1251         /* for intent enqueue the stat will be updated inside intent policy */
1252         if (ptlrpc_req2svc(req)->srv_stats != NULL &&
1253             !(dlm_req->lock_flags & LDLM_FL_HAS_INTENT))
1254                 ldlm_svc_get_eopc(dlm_req, ptlrpc_req2svc(req)->srv_stats);
1255
1256         if (req->rq_export && req->rq_export->exp_nid_stats &&
1257             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1258                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1259                                      LDLM_ENQUEUE - LDLM_FIRST_OPC);
1260
1261         if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
1262                      dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
1263                 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
1264                           dlm_req->lock_desc.l_resource.lr_type);
1265                 GOTO(out, rc = -EFAULT);
1266         }
1267
1268         if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
1269                      dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
1270                      dlm_req->lock_desc.l_req_mode &
1271                      (dlm_req->lock_desc.l_req_mode-1))) {
1272                 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
1273                           dlm_req->lock_desc.l_req_mode);
1274                 GOTO(out, rc = -EFAULT);
1275         }
1276
1277         if (unlikely((flags & LDLM_FL_REPLAY) ||
1278                      (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))) {
1279                 /* Find an existing lock in the per-export lock hash */
1280                 /*
1281                  * In the function below, .hs_keycmp resolves to
1282                  * ldlm_export_lock_keycmp()
1283                  */
1284                 /* coverity[overrun-buffer-val] */
1285                 lock = cfs_hash_lookup(req->rq_export->exp_lock_hash,
1286                                        (void *)&dlm_req->lock_handle[0]);
1287                 if (lock != NULL) {
1288                         DEBUG_REQ(D_DLMTRACE, req,
1289                                   "found existing lock cookie %#llx",
1290                                   lock->l_handle.h_cookie);
1291                         flags |= LDLM_FL_RESENT;
1292                         GOTO(existing_lock, rc = 0);
1293                 }
1294         } else {
1295                 if (ldlm_reclaim_full()) {
1296                         DEBUG_REQ(D_DLMTRACE, req,
1297                                   "Too many granted locks, reject current enqueue request and let the client retry later.\n");
1298                         GOTO(out, rc = -EINPROGRESS);
1299                 }
1300         }
1301
1302         /* The lock's callback data might be set in the policy function */
1303         lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
1304                                 dlm_req->lock_desc.l_resource.lr_type,
1305                                 dlm_req->lock_desc.l_req_mode,
1306                                 cbs, NULL, 0, LVB_T_NONE);
1307         if (IS_ERR(lock)) {
1308                 rc = PTR_ERR(lock);
1309                 lock = NULL;
1310                 GOTO(out, rc);
1311         }
1312
1313         lock->l_remote_handle = dlm_req->lock_handle[0];
1314         LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
1315
1316         /*
1317          * Initialize resource lvb but not for a lock being replayed since
1318          * Client already got lvb sent in this case.
1319          * This must occur early since some policy methods assume resource
1320          * lvb is available (lr_lvb_data != NULL).
1321          */
1322         res = lock->l_resource;
1323         if (!(flags & LDLM_FL_REPLAY)) {
1324                 /* non-replayed lock, delayed lvb init may need to be done */
1325                 rc = ldlm_lvbo_init(env, res);
1326                 if (rc < 0) {
1327                         LDLM_DEBUG(lock, "delayed lvb init failed (rc %d)", rc);
1328                         GOTO(out, rc);
1329                 }
1330         }
1331
1332         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
1333         /*
1334          * Don't enqueue a lock onto the export if it is been disonnected
1335          * due to eviction (b=3822) or server umount (b=24324).
1336          * Cancel it now instead.
1337          */
1338         if (req->rq_export->exp_disconnected) {
1339                 LDLM_ERROR(lock, "lock on disconnected export %p",
1340                            req->rq_export);
1341                 GOTO(out, rc = -ENOTCONN);
1342         }
1343
1344         lock->l_export = class_export_lock_get(req->rq_export, lock);
1345         if (lock->l_export->exp_lock_hash)
1346                 cfs_hash_add(lock->l_export->exp_lock_hash,
1347                              &lock->l_remote_handle,
1348                              &lock->l_exp_hash);
1349
1350         /*
1351          * Inherit the enqueue flags before the operation, because we do not
1352          * keep the res lock on return and next operations (BL AST) may proceed
1353          * without them.
1354          */
1355         lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
1356                                               LDLM_FL_INHERIT_MASK);
1357
1358         ldlm_convert_policy_to_local(req->rq_export,
1359                                      dlm_req->lock_desc.l_resource.lr_type,
1360                                      &dlm_req->lock_desc.l_policy_data,
1361                                      &lock->l_policy_data);
1362         if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
1363                 lock->l_req_extent = lock->l_policy_data.l_extent;
1364
1365 existing_lock:
1366         if (flags & LDLM_FL_HAS_INTENT) {
1367                 /*
1368                  * In this case, the reply buffer is allocated deep in
1369                  * local_lock_enqueue by the policy function.
1370                  */
1371                 cookie = req;
1372         } else {
1373                 /*
1374                  * based on the assumption that lvb size never changes during
1375                  * resource life time otherwise it need resource->lr_lock's
1376                  * protection
1377                  */
1378                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
1379                                      RCL_SERVER, ldlm_lvbo_size(lock));
1380
1381                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
1382                         GOTO(out, rc = -ENOMEM);
1383
1384                 rc = req_capsule_server_pack(&req->rq_pill);
1385                 if (rc)
1386                         GOTO(out, rc);
1387         }
1388
1389         err = ldlm_lock_enqueue(env, ns, &lock, cookie, &flags);
1390         if (err) {
1391                 if ((int)err < 0)
1392                         rc = (int)err;
1393                 GOTO(out, err);
1394         }
1395
1396         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1397
1398         ldlm_lock2desc(lock, &dlm_rep->lock_desc);
1399         ldlm_lock2handle(lock, &dlm_rep->lock_handle);
1400
1401         if (lock && lock->l_resource->lr_type == LDLM_EXTENT)
1402                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_BL_EVICT, 6);
1403
1404         /*
1405          * We never send a blocking AST until the lock is granted, but
1406          * we can tell it right now
1407          */
1408         lock_res_and_lock(lock);
1409
1410         /*
1411          * Now take into account flags to be inherited from original lock
1412          * request both in reply to client and in our own lock flags.
1413          */
1414         dlm_rep->lock_flags = ldlm_flags_to_wire(flags);
1415         lock->l_flags |= flags & LDLM_FL_INHERIT_MASK;
1416
1417         /*
1418          * Don't move a pending lock onto the export if it has already been
1419          * disconnected due to eviction (b=5683) or server umount (b=24324).
1420          * Cancel it now instead.
1421          */
1422         if (unlikely(req->rq_export->exp_disconnected ||
1423                      OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
1424                 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
1425                 rc = -ENOTCONN;
1426         } else if (ldlm_is_ast_sent(lock)) {
1427                 /* fill lock desc for possible lock convert */
1428                 if (lock->l_blocking_lock &&
1429                     lock->l_resource->lr_type == LDLM_IBITS) {
1430                         struct ldlm_lock *bl_lock = lock->l_blocking_lock;
1431                         struct ldlm_lock_desc *rep_desc = &dlm_rep->lock_desc;
1432
1433                         LDLM_DEBUG(lock,
1434                                    "save blocking bits %llx in granted lock",
1435                                    bl_lock->l_policy_data.l_inodebits.bits);
1436                         /*
1437                          * If lock is blocked then save blocking ibits
1438                          * in returned lock policy for the possible lock
1439                          * convert on a client.
1440                          */
1441                         rep_desc->l_policy_data.l_inodebits.cancel_bits =
1442                                 bl_lock->l_policy_data.l_inodebits.bits;
1443                 }
1444                 dlm_rep->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
1445                 if (ldlm_is_granted(lock)) {
1446                         /*
1447                          * Only cancel lock if it was granted, because it would
1448                          * be destroyed immediately and would never be granted
1449                          * in the future, causing timeouts on client.  Not
1450                          * granted lock will be cancelled immediately after
1451                          * sending completion AST.
1452                          */
1453                         if (ldlm_is_cancel_on_block(lock)) {
1454                                 unlock_res_and_lock(lock);
1455                                 ldlm_lock_cancel(lock);
1456                                 lock_res_and_lock(lock);
1457                         } else {
1458                                 ldlm_add_waiting_lock(lock,
1459                                                       ldlm_bl_timeout(lock));
1460                         }
1461                 }
1462         }
1463         unlock_res_and_lock(lock);
1464
1465         EXIT;
1466 out:
1467         req->rq_status = rc ?: err; /* return either error - b=11190 */
1468         if (!req->rq_packed_final) {
1469                 err = lustre_pack_reply(req, 1, NULL, NULL);
1470                 if (rc == 0)
1471                         rc = err;
1472         }
1473
1474         /*
1475          * The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1476          * ldlm_reprocess_all.  If this moves, revisit that code. -phil
1477          */
1478         if (lock != NULL) {
1479                 LDLM_DEBUG(lock,
1480                            "server-side enqueue handler, sending reply (err=%d, rc=%d)",
1481                            err, rc);
1482
1483                 if (rc == 0 &&
1484                     req_capsule_has_field(&req->rq_pill, &RMF_DLM_LVB,
1485                                           RCL_SERVER) &&
1486                     ldlm_lvbo_size(lock) > 0) {
1487                         void *buf;
1488                         int buflen;
1489
1490 retry:
1491                         buf = req_capsule_server_get(&req->rq_pill,
1492                                                      &RMF_DLM_LVB);
1493                         LASSERTF(buf != NULL, "req %p, lock %p\n", req, lock);
1494                         buflen = req_capsule_get_size(&req->rq_pill,
1495                                         &RMF_DLM_LVB, RCL_SERVER);
1496                         /*
1497                          * non-replayed lock, delayed lvb init may
1498                          * need to be occur now
1499                          */
1500                         if ((buflen > 0) && !(flags & LDLM_FL_REPLAY)) {
1501                                 int rc2;
1502
1503                                 rc2 = ldlm_lvbo_fill(env, lock, buf, &buflen);
1504                                 if (rc2 >= 0) {
1505                                         req_capsule_shrink(&req->rq_pill,
1506                                                            &RMF_DLM_LVB,
1507                                                            rc2, RCL_SERVER);
1508                                 } else if (rc2 == -ERANGE) {
1509                                         rc2 = req_capsule_server_grow(
1510                                                         &req->rq_pill,
1511                                                         &RMF_DLM_LVB, buflen);
1512                                         if (!rc2) {
1513                                                 goto retry;
1514                                         } else {
1515                                                 /*
1516                                                  * if we can't grow the buffer,
1517                                                  * it's ok to return empty lvb
1518                                                  * to client.
1519                                                  */
1520                                                 req_capsule_shrink(
1521                                                         &req->rq_pill,
1522                                                         &RMF_DLM_LVB, 0,
1523                                                         RCL_SERVER);
1524                                         }
1525                                 } else {
1526                                         rc = rc2;
1527                                 }
1528                         } else if (flags & LDLM_FL_REPLAY) {
1529                                 /* no LVB resend upon replay */
1530                                 if (buflen > 0)
1531                                         req_capsule_shrink(&req->rq_pill,
1532                                                            &RMF_DLM_LVB,
1533                                                            0, RCL_SERVER);
1534                                 else
1535                                         rc = buflen;
1536                         } else {
1537                                 rc = buflen;
1538                         }
1539                 }
1540
1541                 if (rc != 0 && !(flags & LDLM_FL_RESENT)) {
1542                         if (lock->l_export) {
1543                                 ldlm_lock_cancel(lock);
1544                         } else {
1545                                 lock_res_and_lock(lock);
1546                                 ldlm_resource_unlink_lock(lock);
1547                                 ldlm_lock_destroy_nolock(lock);
1548                                 unlock_res_and_lock(lock);
1549
1550                         }
1551                 }
1552
1553                 if (!err && !ldlm_is_cbpending(lock) &&
1554                     dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1555                         ldlm_reprocess_all(lock->l_resource);
1556
1557                 LDLM_LOCK_RELEASE(lock);
1558         }
1559
1560         LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1561                           lock, rc);
1562
1563         return rc;
1564 }
1565
1566 /*
1567  * Clear the blocking lock, the race is possible between ldlm_handle_convert0()
1568  * and ldlm_work_bl_ast_lock(), so this is done under lock with check for NULL.
1569  */
1570 void ldlm_clear_blocking_lock(struct ldlm_lock *lock)
1571 {
1572         if (lock->l_blocking_lock) {
1573                 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
1574                 lock->l_blocking_lock = NULL;
1575         }
1576 }
1577
1578 /* A lock can be converted to new ibits or mode and should be considered
1579  * as new lock. Clear all states related to a previous blocking AST
1580  * processing so new conflicts will cause new blocking ASTs.
1581  *
1582  * This is used during lock convert below and lock downgrade to COS mode in
1583  * ldlm_lock_mode_downgrade().
1584  */
1585 void ldlm_clear_blocking_data(struct ldlm_lock *lock)
1586 {
1587         ldlm_clear_ast_sent(lock);
1588         lock->l_bl_ast_run = 0;
1589         ldlm_clear_blocking_lock(lock);
1590 }
1591
1592 /**
1593  * Main LDLM entry point for server code to process lock conversion requests.
1594  */
1595 int ldlm_handle_convert0(struct ptlrpc_request *req,
1596                          const struct ldlm_request *dlm_req)
1597 {
1598         struct obd_export *exp = req->rq_export;
1599         struct ldlm_reply *dlm_rep;
1600         struct ldlm_lock *lock;
1601         int rc;
1602
1603         ENTRY;
1604
1605         if (exp && exp->exp_nid_stats && exp->exp_nid_stats->nid_ldlm_stats)
1606                 lprocfs_counter_incr(exp->exp_nid_stats->nid_ldlm_stats,
1607                                      LDLM_CONVERT - LDLM_FIRST_OPC);
1608
1609         rc = req_capsule_server_pack(&req->rq_pill);
1610         if (rc)
1611                 RETURN(rc);
1612
1613         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1614         dlm_rep->lock_flags = dlm_req->lock_flags;
1615
1616         lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1617         if (lock) {
1618                 __u64 bits;
1619                 __u64 new;
1620
1621                 bits = lock->l_policy_data.l_inodebits.bits;
1622                 new = dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
1623                 LDLM_DEBUG(lock, "server-side convert handler START");
1624
1625                 if (ldlm_is_cancel(lock)) {
1626                         LDLM_ERROR(lock, "convert on canceled lock!");
1627                         rc = ELDLM_NO_LOCK_DATA;
1628                 } else if (dlm_req->lock_desc.l_req_mode !=
1629                            lock->l_granted_mode) {
1630                         LDLM_ERROR(lock, "lock mode differs!");
1631                         rc = ELDLM_NO_LOCK_DATA;
1632                 } else if (bits == new) {
1633                         /*
1634                          * This can be valid situation if CONVERT RPCs are
1635                          * re-ordered. Just finish silently
1636                          */
1637                         LDLM_DEBUG(lock, "lock is converted already!");
1638                         rc = ELDLM_OK;
1639                 } else {
1640                         lock_res_and_lock(lock);
1641                         if (ldlm_is_waited(lock))
1642                                 ldlm_del_waiting_lock(lock);
1643
1644                         ldlm_clear_cbpending(lock);
1645                         lock->l_policy_data.l_inodebits.cancel_bits = 0;
1646                         ldlm_inodebits_drop(lock, bits & ~new);
1647
1648                         ldlm_clear_blocking_data(lock);
1649                         unlock_res_and_lock(lock);
1650
1651                         ldlm_reprocess_all(lock->l_resource);
1652                         rc = ELDLM_OK;
1653                 }
1654
1655                 if (rc == ELDLM_OK) {
1656                         dlm_rep->lock_handle = lock->l_remote_handle;
1657                         ldlm_ibits_policy_local_to_wire(&lock->l_policy_data,
1658                                         &dlm_rep->lock_desc.l_policy_data);
1659                 }
1660
1661                 LDLM_DEBUG(lock, "server-side convert handler END, rc = %d",
1662                            rc);
1663                 LDLM_LOCK_PUT(lock);
1664         } else {
1665                 rc = ELDLM_NO_LOCK_DATA;
1666                 LDLM_DEBUG_NOLOCK("server-side convert handler END, rc = %d",
1667                                   rc);
1668         }
1669
1670         req->rq_status = rc;
1671
1672         RETURN(0);
1673 }
1674
1675 /**
1676  * Cancel all the locks whose handles are packed into ldlm_request
1677  *
1678  * Called by server code expecting such combined cancel activity
1679  * requests.
1680  */
1681 int ldlm_request_cancel(struct ptlrpc_request *req,
1682                         const struct ldlm_request *dlm_req,
1683                         int first, enum lustre_at_flags flags)
1684 {
1685         const struct lu_env *env = req->rq_svc_thread->t_env;
1686         struct ldlm_resource *res, *pres = NULL;
1687         struct ldlm_lock *lock;
1688         int i, count, done = 0;
1689
1690         ENTRY;
1691
1692         count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1693         if (first >= count)
1694                 RETURN(0);
1695
1696         if (count == 1 && dlm_req->lock_handle[0].cookie == 0)
1697                 RETURN(0);
1698
1699         /*
1700          * There is no lock on the server at the replay time,
1701          * skip lock cancelling to make replay tests to pass.
1702          */
1703         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1704                 RETURN(0);
1705
1706         LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, starting at %d",
1707                           count, first);
1708
1709         for (i = first; i < count; i++) {
1710                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1711                 if (!lock) {
1712                         LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock (cookie %llu)",
1713                                           dlm_req->lock_handle[i].cookie);
1714                         continue;
1715                 }
1716
1717                 res = lock->l_resource;
1718                 done++;
1719
1720                 /*
1721                  * This code is an optimization to only attempt lock
1722                  * granting on the resource (that could be CPU-expensive)
1723                  * after we are done cancelling lock in that resource.
1724                  */
1725                 if (res != pres) {
1726                         if (pres != NULL) {
1727                                 ldlm_reprocess_all(pres);
1728                                 LDLM_RESOURCE_DELREF(pres);
1729                                 ldlm_resource_putref(pres);
1730                         }
1731                         if (res != NULL) {
1732                                 ldlm_resource_getref(res);
1733                                 LDLM_RESOURCE_ADDREF(res);
1734
1735                                 if (!ldlm_is_discard_data(lock))
1736                                         ldlm_lvbo_update(env, res, lock,
1737                                                          NULL, 1);
1738                         }
1739                         pres = res;
1740                 }
1741
1742                 if ((flags & LATF_STATS) && ldlm_is_ast_sent(lock) &&
1743                     lock->l_blast_sent != 0) {
1744                         time64_t delay = ktime_get_real_seconds() -
1745                                          lock->l_blast_sent;
1746                         LDLM_DEBUG(lock,
1747                                    "server cancels blocked lock after %llds",
1748                                    (s64)delay);
1749                         at_measured(&lock->l_export->exp_bl_lock_at, delay);
1750                 }
1751                 ldlm_lock_cancel(lock);
1752                 LDLM_LOCK_PUT(lock);
1753         }
1754         if (pres != NULL) {
1755                 ldlm_reprocess_all(pres);
1756                 LDLM_RESOURCE_DELREF(pres);
1757                 ldlm_resource_putref(pres);
1758         }
1759         LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1760         RETURN(done);
1761 }
1762 EXPORT_SYMBOL(ldlm_request_cancel);
1763
1764 /**
1765  * Main LDLM entry point for server code to cancel locks.
1766  *
1767  * Typically gets called from service handler on LDLM_CANCEL opc.
1768  */
1769 int ldlm_handle_cancel(struct ptlrpc_request *req)
1770 {
1771         struct ldlm_request *dlm_req;
1772         int rc;
1773
1774         ENTRY;
1775
1776         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1777         if (dlm_req == NULL) {
1778                 CDEBUG(D_INFO, "bad request buffer for cancel\n");
1779                 RETURN(-EFAULT);
1780         }
1781
1782         if (req->rq_export && req->rq_export->exp_nid_stats &&
1783             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1784                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1785                                      LDLM_CANCEL - LDLM_FIRST_OPC);
1786
1787         rc = req_capsule_server_pack(&req->rq_pill);
1788         if (rc)
1789                 RETURN(rc);
1790
1791         if (!ldlm_request_cancel(req, dlm_req, 0, LATF_STATS))
1792                 req->rq_status = LUSTRE_ESTALE;
1793
1794         RETURN(ptlrpc_reply(req));
1795 }
1796 #endif /* HAVE_SERVER_SUPPORT */
1797
1798 /**
1799  * Callback handler for receiving incoming blocking ASTs.
1800  *
1801  * This can only happen on client side.
1802  */
1803 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1804                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1805 {
1806         int do_ast;
1807
1808         ENTRY;
1809
1810         LDLM_DEBUG(lock, "client blocking AST callback handler");
1811
1812         lock_res_and_lock(lock);
1813
1814         /* set bits to cancel for this lock for possible lock convert */
1815         if (ns_is_client(ns) && (lock->l_resource->lr_type == LDLM_IBITS)) {
1816                 /*
1817                  * Lock description contains policy of blocking lock,
1818                  * and its cancel_bits is used to pass conflicting bits.
1819                  * NOTE: ld can be NULL or can be not NULL but zeroed if
1820                  * passed from ldlm_bl_thread_blwi(), check below used bits
1821                  * in ld to make sure it is valid description.
1822                  *
1823                  * If server may replace lock resource keeping the same cookie,
1824                  * never use cancel bits from different resource, full cancel
1825                  * is to be used.
1826                  */
1827                 if (ld && ld->l_policy_data.l_inodebits.bits &&
1828                     ldlm_res_eq(&ld->l_resource.lr_name,
1829                                 &lock->l_resource->lr_name))
1830                         lock->l_policy_data.l_inodebits.cancel_bits =
1831                                 ld->l_policy_data.l_inodebits.cancel_bits;
1832                 /*
1833                  * if there is no valid ld and lock is cbpending already
1834                  * then cancel_bits should be kept, otherwise it is zeroed.
1835                  */
1836                 else if (!ldlm_is_cbpending(lock))
1837                         lock->l_policy_data.l_inodebits.cancel_bits = 0;
1838         }
1839         ldlm_set_cbpending(lock);
1840
1841         do_ast = (!lock->l_readers && !lock->l_writers);
1842         unlock_res_and_lock(lock);
1843
1844         if (do_ast) {
1845                 CDEBUG(D_DLMTRACE,
1846                        "Lock %p already unused, calling callback (%p)\n",
1847                        lock, lock->l_blocking_ast);
1848                 if (lock->l_blocking_ast != NULL)
1849                         lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1850                                              LDLM_CB_BLOCKING);
1851         } else {
1852                 CDEBUG(D_DLMTRACE,
1853                        "Lock %p is referenced, will be cancelled later\n",
1854                        lock);
1855         }
1856
1857         LDLM_DEBUG(lock, "client blocking callback handler END");
1858         LDLM_LOCK_RELEASE(lock);
1859         EXIT;
1860 }
1861
1862 /**
1863  * Callback handler for receiving incoming completion ASTs.
1864  *
1865  * This only can happen on client side.
1866  */
1867 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1868                                     struct ldlm_namespace *ns,
1869                                     struct ldlm_request *dlm_req,
1870                                     struct ldlm_lock *lock)
1871 {
1872         struct list_head ast_list;
1873         int lvb_len;
1874         int rc = 0;
1875
1876         ENTRY;
1877
1878         LDLM_DEBUG(lock, "client completion callback handler START");
1879
1880         INIT_LIST_HEAD(&ast_list);
1881         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
1882                 long to = cfs_time_seconds(1);
1883
1884                 while (to > 0) {
1885                         set_current_state(TASK_INTERRUPTIBLE);
1886                         schedule_timeout(to);
1887                         if (ldlm_is_granted(lock) ||
1888                             ldlm_is_destroyed(lock))
1889                                 break;
1890                 }
1891         }
1892
1893         lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
1894         if (lvb_len < 0) {
1895                 LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len);
1896                 GOTO(out, rc = lvb_len);
1897         } else if (lvb_len > 0) {
1898                 if (lock->l_lvb_len > 0) {
1899                         /* for extent lock, lvb contains ost_lvb{}. */
1900                         LASSERT(lock->l_lvb_data != NULL);
1901
1902                         if (unlikely(lock->l_lvb_len < lvb_len)) {
1903                                 LDLM_ERROR(lock,
1904                                            "Replied LVB is larger than expectation, expected = %d, replied = %d",
1905                                            lock->l_lvb_len, lvb_len);
1906                                 GOTO(out, rc = -EINVAL);
1907                         }
1908                 }
1909         }
1910
1911         lock_res_and_lock(lock);
1912
1913         if (!ldlm_res_eq(&dlm_req->lock_desc.l_resource.lr_name,
1914                          &lock->l_resource->lr_name)) {
1915                 ldlm_resource_unlink_lock(lock);
1916                 unlock_res_and_lock(lock);
1917                 rc = ldlm_lock_change_resource(ns, lock,
1918                                 &dlm_req->lock_desc.l_resource.lr_name);
1919                 if (rc < 0) {
1920                         LDLM_ERROR(lock, "Failed to allocate resource");
1921                         GOTO(out, rc);
1922                 }
1923                 LDLM_DEBUG(lock, "completion AST, new resource");
1924                 lock_res_and_lock(lock);
1925         }
1926
1927         if (ldlm_is_destroyed(lock) ||
1928             ldlm_is_granted(lock)) {
1929                 /* b=11300: the lock has already been granted */
1930                 unlock_res_and_lock(lock);
1931                 LDLM_DEBUG(lock, "Double grant race happened");
1932                 GOTO(out, rc = 0);
1933         }
1934
1935         /*
1936          * If we receive the completion AST before the actual enqueue returned,
1937          * then we might need to switch lock modes, resources, or extents.
1938          */
1939         if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1940                 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1941                 LDLM_DEBUG(lock, "completion AST, new lock mode");
1942         }
1943
1944         if (lock->l_resource->lr_type != LDLM_PLAIN) {
1945                 ldlm_convert_policy_to_local(req->rq_export,
1946                                           dlm_req->lock_desc.l_resource.lr_type,
1947                                           &dlm_req->lock_desc.l_policy_data,
1948                                           &lock->l_policy_data);
1949                 LDLM_DEBUG(lock, "completion AST, new policy data");
1950         }
1951
1952         ldlm_resource_unlink_lock(lock);
1953
1954         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1955                 /*
1956                  * BL_AST locks are not needed in LRU.
1957                  * Let ldlm_cancel_lru() be fast.
1958                  */
1959                 ldlm_lock_remove_from_lru(lock);
1960                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
1961                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1962         }
1963
1964         if (lock->l_lvb_len > 0) {
1965                 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT,
1966                                    lock->l_lvb_data, lvb_len);
1967                 if (rc < 0) {
1968                         unlock_res_and_lock(lock);
1969                         GOTO(out, rc);
1970                 }
1971         }
1972
1973         ldlm_grant_lock(lock, &ast_list);
1974         unlock_res_and_lock(lock);
1975
1976         LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1977
1978         /*
1979          * Let Enqueue to call osc_lock_upcall() and initialize
1980          * l_ast_data
1981          */
1982         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
1983
1984         ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
1985
1986         LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1987                           lock);
1988         GOTO(out, rc);
1989
1990 out:
1991         if (rc < 0) {
1992                 lock_res_and_lock(lock);
1993                 ldlm_set_failed(lock);
1994                 unlock_res_and_lock(lock);
1995                 wake_up(&lock->l_waitq);
1996         }
1997         LDLM_LOCK_RELEASE(lock);
1998 }
1999
2000 /**
2001  * Callback handler for receiving incoming glimpse ASTs.
2002  *
2003  * This only can happen on client side.  After handling the glimpse AST
2004  * we also consider dropping the lock here if it is unused locally for a
2005  * long time.
2006  */
2007 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
2008                                     struct ldlm_namespace *ns,
2009                                     struct ldlm_request *dlm_req,
2010                                     struct ldlm_lock *lock)
2011 {
2012         int rc = -ENOSYS;
2013
2014         ENTRY;
2015
2016         LDLM_DEBUG(lock, "client glimpse AST callback handler");
2017
2018         if (lock->l_glimpse_ast != NULL)
2019                 rc = lock->l_glimpse_ast(lock, req);
2020
2021         if (req->rq_repmsg != NULL) {
2022                 ptlrpc_reply(req);
2023         } else {
2024                 req->rq_status = rc;
2025                 ptlrpc_error(req);
2026         }
2027
2028         lock_res_and_lock(lock);
2029         if (lock->l_granted_mode == LCK_PW &&
2030             !lock->l_readers && !lock->l_writers &&
2031             ktime_after(ktime_get(),
2032                         ktime_add(lock->l_last_used,
2033                                   ktime_set(ns->ns_dirty_age_limit, 0)))) {
2034                 unlock_res_and_lock(lock);
2035                 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
2036                         ldlm_handle_bl_callback(ns, NULL, lock);
2037
2038                 EXIT;
2039                 return;
2040         }
2041         unlock_res_and_lock(lock);
2042         LDLM_LOCK_RELEASE(lock);
2043         EXIT;
2044 }
2045
2046 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
2047 {
2048         if (req->rq_no_reply)
2049                 return 0;
2050
2051         req->rq_status = rc;
2052         if (!req->rq_packed_final) {
2053                 rc = lustre_pack_reply(req, 1, NULL, NULL);
2054                 if (rc)
2055                         return rc;
2056         }
2057         return ptlrpc_reply(req);
2058 }
2059
2060 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
2061                                enum ldlm_cancel_flags cancel_flags)
2062 {
2063         struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
2064
2065         ENTRY;
2066
2067         spin_lock(&blp->blp_lock);
2068         if (blwi->blwi_lock &&
2069             ldlm_is_discard_data(blwi->blwi_lock)) {
2070                 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
2071                 list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
2072         } else {
2073                 /* other blocking callbacks are added to the regular list */
2074                 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
2075         }
2076         spin_unlock(&blp->blp_lock);
2077
2078         wake_up(&blp->blp_waitq);
2079
2080         /*
2081          * can not check blwi->blwi_flags as blwi could be already freed in
2082          * LCF_ASYNC mode
2083          */
2084         if (!(cancel_flags & LCF_ASYNC))
2085                 wait_for_completion(&blwi->blwi_comp);
2086
2087         RETURN(0);
2088 }
2089
2090 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
2091                              struct ldlm_namespace *ns,
2092                              struct ldlm_lock_desc *ld,
2093                              struct list_head *cancels, int count,
2094                              struct ldlm_lock *lock,
2095                              enum ldlm_cancel_flags cancel_flags)
2096 {
2097         init_completion(&blwi->blwi_comp);
2098         INIT_LIST_HEAD(&blwi->blwi_head);
2099
2100         if (memory_pressure_get())
2101                 blwi->blwi_mem_pressure = 1;
2102
2103         blwi->blwi_ns = ns;
2104         blwi->blwi_flags = cancel_flags;
2105         if (ld != NULL)
2106                 blwi->blwi_ld = *ld;
2107         if (count) {
2108                 list_add(&blwi->blwi_head, cancels);
2109                 list_del_init(cancels);
2110                 blwi->blwi_count = count;
2111         } else {
2112                 blwi->blwi_lock = lock;
2113         }
2114 }
2115
2116 /**
2117  * Queues a list of locks \a cancels containing \a count locks
2118  * for later processing by a blocking thread.  If \a count is zero,
2119  * then the lock referenced as \a lock is queued instead.
2120  *
2121  * The blocking thread would then call ->l_blocking_ast callback in the lock.
2122  * If list addition fails an error is returned and caller is supposed to
2123  * call ->l_blocking_ast itself.
2124  */
2125 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
2126                              struct ldlm_lock_desc *ld,
2127                              struct ldlm_lock *lock,
2128                              struct list_head *cancels, int count,
2129                              enum ldlm_cancel_flags cancel_flags)
2130 {
2131         ENTRY;
2132
2133         if (cancels && count == 0)
2134                 RETURN(0);
2135
2136         if (cancel_flags & LCF_ASYNC) {
2137                 struct ldlm_bl_work_item *blwi;
2138
2139                 OBD_ALLOC(blwi, sizeof(*blwi));
2140                 if (blwi == NULL)
2141                         RETURN(-ENOMEM);
2142                 init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
2143
2144                 RETURN(__ldlm_bl_to_thread(blwi, cancel_flags));
2145         } else {
2146                 /*
2147                  * if it is synchronous call do minimum mem alloc, as it could
2148                  * be triggered from kernel shrinker
2149                  */
2150                 struct ldlm_bl_work_item blwi;
2151
2152                 memset(&blwi, 0, sizeof(blwi));
2153                 init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
2154                 RETURN(__ldlm_bl_to_thread(&blwi, cancel_flags));
2155         }
2156 }
2157
2158
2159 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
2160                            struct ldlm_lock *lock)
2161 {
2162         return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC);
2163 }
2164
2165 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
2166                            struct list_head *cancels, int count,
2167                            enum ldlm_cancel_flags cancel_flags)
2168 {
2169         return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
2170 }
2171
2172 int ldlm_bl_thread_wakeup(void)
2173 {
2174         wake_up(&ldlm_state->ldlm_bl_pool->blp_waitq);
2175         return 0;
2176 }
2177
2178 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
2179 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
2180 {
2181         struct obd_device *obd = req->rq_export->exp_obd;
2182         char *key;
2183         void *val;
2184         int keylen, vallen;
2185         int rc = -ENOSYS;
2186
2187         ENTRY;
2188
2189         DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
2190
2191         req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
2192
2193         key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2194         if (key == NULL) {
2195                 DEBUG_REQ(D_IOCTL, req, "no set_info key");
2196                 RETURN(-EFAULT);
2197         }
2198         keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
2199                                       RCL_CLIENT);
2200         val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
2201         if (val == NULL) {
2202                 DEBUG_REQ(D_IOCTL, req, "no set_info val");
2203                 RETURN(-EFAULT);
2204         }
2205         vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
2206                                       RCL_CLIENT);
2207
2208         /* We are responsible for swabbing contents of val */
2209
2210         if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
2211                 /* Pass it on to mdc (the "export" in this case) */
2212                 rc = obd_set_info_async(req->rq_svc_thread->t_env,
2213                                         req->rq_export,
2214                                         sizeof(KEY_HSM_COPYTOOL_SEND),
2215                                         KEY_HSM_COPYTOOL_SEND,
2216                                         vallen, val, NULL);
2217         else
2218                 DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
2219
2220         return rc;
2221 }
2222
2223 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
2224                                         const char *msg, int rc,
2225                                         const struct lustre_handle *handle)
2226 {
2227         DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
2228                   "%s: [nid %s] [rc %d] [lock %#llx]",
2229                   msg, libcfs_id2str(req->rq_peer), rc,
2230                   handle ? handle->cookie : 0);
2231         if (req->rq_no_reply)
2232                 CWARN("No reply was sent, maybe cause b=21636.\n");
2233         else if (rc)
2234                 CWARN("Send reply failed, maybe cause b=21636.\n");
2235 }
2236
2237 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
2238 static int ldlm_callback_handler(struct ptlrpc_request *req)
2239 {
2240         struct ldlm_namespace *ns;
2241         struct ldlm_request *dlm_req;
2242         struct ldlm_lock *lock;
2243         int rc;
2244
2245         ENTRY;
2246
2247         /*
2248          * Requests arrive in sender's byte order.  The ptlrpc service
2249          * handler has already checked and, if necessary, byte-swapped the
2250          * incoming request message body, but I am responsible for the
2251          * message buffers.
2252          */
2253
2254         /* do nothing for sec context finalize */
2255         if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
2256                 RETURN(0);
2257
2258         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2259
2260         if (req->rq_export == NULL) {
2261                 rc = ldlm_callback_reply(req, -ENOTCONN);
2262                 ldlm_callback_errmsg(req, "Operate on unconnected server",
2263                                      rc, NULL);
2264                 RETURN(0);
2265         }
2266
2267         LASSERT(req->rq_export != NULL);
2268         LASSERT(req->rq_export->exp_obd != NULL);
2269
2270         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2271         case LDLM_BL_CALLBACK:
2272                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET)) {
2273                         if (cfs_fail_err)
2274                                 ldlm_callback_reply(req, -(int)cfs_fail_err);
2275                         RETURN(0);
2276                 }
2277                 break;
2278         case LDLM_CP_CALLBACK:
2279                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
2280                         RETURN(0);
2281                 break;
2282         case LDLM_GL_CALLBACK:
2283                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
2284                         RETURN(0);
2285                 break;
2286         case LDLM_SET_INFO:
2287                 rc = ldlm_handle_setinfo(req);
2288                 ldlm_callback_reply(req, rc);
2289                 RETURN(0);
2290         default:
2291                 CERROR("unknown opcode %u\n",
2292                        lustre_msg_get_opc(req->rq_reqmsg));
2293                 ldlm_callback_reply(req, -EPROTO);
2294                 RETURN(0);
2295         }
2296
2297         ns = req->rq_export->exp_obd->obd_namespace;
2298         LASSERT(ns != NULL);
2299
2300         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2301
2302         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2303         if (dlm_req == NULL) {
2304                 rc = ldlm_callback_reply(req, -EPROTO);
2305                 ldlm_callback_errmsg(req, "Operate without parameter", rc,
2306                                      NULL);
2307                 RETURN(0);
2308         }
2309
2310         /*
2311          * Force a known safe race, send a cancel to the server for a lock
2312          * which the server has already started a blocking callback on.
2313          */
2314         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
2315             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2316                 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
2317                 if (rc < 0)
2318                         CERROR("ldlm_cli_cancel: %d\n", rc);
2319         }
2320
2321         lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
2322         if (!lock) {
2323                 CDEBUG(D_DLMTRACE,
2324                        "callback on lock %#llx - lock disappeared\n",
2325                        dlm_req->lock_handle[0].cookie);
2326                 rc = ldlm_callback_reply(req, -EINVAL);
2327                 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
2328                                      &dlm_req->lock_handle[0]);
2329                 RETURN(0);
2330         }
2331
2332         if (ldlm_is_fail_loc(lock) &&
2333             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
2334                 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
2335
2336         /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
2337         lock_res_and_lock(lock);
2338         lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
2339                                               LDLM_FL_AST_MASK);
2340         if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2341                 /*
2342                  * If somebody cancels lock and cache is already dropped,
2343                  * or lock is failed before cp_ast received on client,
2344                  * we can tell the server we have no lock. Otherwise, we
2345                  * should send cancel after dropping the cache.
2346                  */
2347                 if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
2348                      ldlm_is_failed(lock)) {
2349                         LDLM_DEBUG(lock,
2350                                    "callback on lock %llx - lock disappeared",
2351                                    dlm_req->lock_handle[0].cookie);
2352                         unlock_res_and_lock(lock);
2353                         LDLM_LOCK_RELEASE(lock);
2354                         rc = ldlm_callback_reply(req, -EINVAL);
2355                         ldlm_callback_errmsg(req, "Operate on stale lock", rc,
2356                                              &dlm_req->lock_handle[0]);
2357                         RETURN(0);
2358                 }
2359                 /*
2360                  * BL_AST locks are not needed in LRU.
2361                  * Let ldlm_cancel_lru() be fast.
2362                  */
2363                 ldlm_lock_remove_from_lru(lock);
2364                 ldlm_set_bl_ast(lock);
2365         }
2366         unlock_res_and_lock(lock);
2367
2368         /*
2369          * We want the ost thread to get this reply so that it can respond
2370          * to ost requests (write cache writeback) that might be triggered
2371          * in the callback.
2372          *
2373          * But we'd also like to be able to indicate in the reply that we're
2374          * cancelling right now, because it's unused, or have an intent result
2375          * in the reply, so we might have to push the responsibility for sending
2376          * the reply down into the AST handlers, alas.
2377          */
2378
2379         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2380         case LDLM_BL_CALLBACK:
2381                 CDEBUG(D_INODE, "blocking ast\n");
2382                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
2383                 if (!ldlm_is_cancel_on_block(lock)) {
2384                         rc = ldlm_callback_reply(req, 0);
2385                         if (req->rq_no_reply || rc)
2386                                 ldlm_callback_errmsg(req, "Normal process", rc,
2387                                                      &dlm_req->lock_handle[0]);
2388                 }
2389                 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
2390                         ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
2391                 break;
2392         case LDLM_CP_CALLBACK:
2393                 CDEBUG(D_INODE, "completion ast\n");
2394                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
2395                 ldlm_callback_reply(req, 0);
2396                 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
2397                 break;
2398         case LDLM_GL_CALLBACK:
2399                 CDEBUG(D_INODE, "glimpse ast\n");
2400                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
2401                 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
2402                 break;
2403         default:
2404                 LBUG(); /* checked above */
2405         }
2406
2407         RETURN(0);
2408 }
2409
2410 #ifdef HAVE_SERVER_SUPPORT
2411 /**
2412  * Main handler for canceld thread.
2413  *
2414  * Separated into its own thread to avoid deadlocks.
2415  */
2416 static int ldlm_cancel_handler(struct ptlrpc_request *req)
2417 {
2418         int rc;
2419
2420         ENTRY;
2421
2422         /*
2423          * Requests arrive in sender's byte order.  The ptlrpc service
2424          * handler has already checked and, if necessary, byte-swapped the
2425          * incoming request message body, but I am responsible for the
2426          * message buffers.
2427          */
2428
2429         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2430
2431         if (req->rq_export == NULL) {
2432                 struct ldlm_request *dlm_req;
2433
2434                 CERROR("%s from %s arrived at %llu with bad export cookie %llu\n",
2435                        ll_opcode2str(lustre_msg_get_opc(req->rq_reqmsg)),
2436                        libcfs_nid2str(req->rq_peer.nid),
2437                        (unsigned long long)req->rq_arrival_time.tv_sec,
2438                        lustre_msg_get_handle(req->rq_reqmsg)->cookie);
2439
2440                 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_CANCEL) {
2441                         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2442                         dlm_req = req_capsule_client_get(&req->rq_pill,
2443                                                          &RMF_DLM_REQ);
2444                         if (dlm_req != NULL)
2445                                 ldlm_lock_dump_handle(D_ERROR,
2446                                                       &dlm_req->lock_handle[0]);
2447                 }
2448                 ldlm_callback_reply(req, -ENOTCONN);
2449                 RETURN(0);
2450         }
2451
2452         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2453         /* XXX FIXME move this back to mds/handler.c, b=249 */
2454         case LDLM_CANCEL:
2455                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2456                 CDEBUG(D_INODE, "cancel\n");
2457                 if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_NET) ||
2458                     CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND) ||
2459                     CFS_FAIL_CHECK(OBD_FAIL_LDLM_BL_EVICT))
2460                         RETURN(0);
2461                 rc = ldlm_handle_cancel(req);
2462                 break;
2463         case LDLM_CONVERT:
2464         {
2465                 struct ldlm_request *dlm_req;
2466
2467                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CONVERT);
2468                 CDEBUG(D_INODE, "convert\n");
2469
2470                 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2471                 if (dlm_req == NULL) {
2472                         CDEBUG(D_INFO, "bad request buffer for cancel\n");
2473                         rc = ldlm_callback_reply(req, -EPROTO);
2474                 } else {
2475                         req->rq_status = ldlm_handle_convert0(req, dlm_req);
2476                         rc = ptlrpc_reply(req);
2477                 }
2478                 break;
2479         }
2480         default:
2481                 CERROR("invalid opcode %d\n",
2482                        lustre_msg_get_opc(req->rq_reqmsg));
2483                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2484                 rc = ldlm_callback_reply(req, -EINVAL);
2485         }
2486
2487         RETURN(rc);
2488 }
2489
2490 static int ldlm_cancel_hpreq_lock_match(struct ptlrpc_request *req,
2491                                         struct ldlm_lock *lock)
2492 {
2493         struct ldlm_request *dlm_req;
2494         struct lustre_handle lockh;
2495         int rc = 0;
2496         int i;
2497
2498         ENTRY;
2499
2500         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2501         if (dlm_req == NULL)
2502                 RETURN(0);
2503
2504         ldlm_lock2handle(lock, &lockh);
2505         for (i = 0; i < dlm_req->lock_count; i++) {
2506                 if (lustre_handle_equal(&dlm_req->lock_handle[i],
2507                                         &lockh)) {
2508                         DEBUG_REQ(D_RPCTRACE, req,
2509                                   "Prio raised by lock %#llx.", lockh.cookie);
2510                         rc = 1;
2511                         break;
2512                 }
2513         }
2514
2515         RETURN(rc);
2516 }
2517
2518 static int ldlm_cancel_hpreq_check(struct ptlrpc_request *req)
2519 {
2520         struct ldlm_request *dlm_req;
2521         int rc = 0;
2522         int i;
2523
2524         ENTRY;
2525
2526         /* no prolong in recovery */
2527         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
2528                 RETURN(0);
2529
2530         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2531         if (dlm_req == NULL)
2532                 RETURN(-EFAULT);
2533
2534         for (i = 0; i < dlm_req->lock_count; i++) {
2535                 struct ldlm_lock *lock;
2536
2537                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
2538                 if (lock == NULL)
2539                         continue;
2540
2541                 rc = ldlm_is_ast_sent(lock) ? 1 : 0;
2542                 if (rc)
2543                         LDLM_DEBUG(lock, "hpreq cancel/convert lock");
2544                 LDLM_LOCK_PUT(lock);
2545
2546                 if (rc)
2547                         break;
2548         }
2549
2550         RETURN(rc);
2551 }
2552
2553 static struct ptlrpc_hpreq_ops ldlm_cancel_hpreq_ops = {
2554         .hpreq_lock_match = ldlm_cancel_hpreq_lock_match,
2555         .hpreq_check      = ldlm_cancel_hpreq_check,
2556         .hpreq_fini       = NULL,
2557 };
2558
2559 static int ldlm_hpreq_handler(struct ptlrpc_request *req)
2560 {
2561         ENTRY;
2562
2563         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2564
2565         if (req->rq_export == NULL)
2566                 RETURN(0);
2567
2568         if (LDLM_CANCEL == lustre_msg_get_opc(req->rq_reqmsg)) {
2569                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2570                 req->rq_ops = &ldlm_cancel_hpreq_ops;
2571         } else if (LDLM_CONVERT == lustre_msg_get_opc(req->rq_reqmsg)) {
2572                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CONVERT);
2573                 req->rq_ops = &ldlm_cancel_hpreq_ops;
2574         }
2575         RETURN(0);
2576 }
2577
2578 static int ldlm_revoke_lock_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2579                                struct hlist_node *hnode, void *data)
2580
2581 {
2582         struct list_head *rpc_list = data;
2583         struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
2584
2585         lock_res_and_lock(lock);
2586
2587         if (!ldlm_is_granted(lock)) {
2588                 unlock_res_and_lock(lock);
2589                 return 0;
2590         }
2591
2592         LASSERT(lock->l_resource);
2593         if (lock->l_resource->lr_type != LDLM_IBITS &&
2594             lock->l_resource->lr_type != LDLM_PLAIN) {
2595                 unlock_res_and_lock(lock);
2596                 return 0;
2597         }
2598
2599         if (ldlm_is_ast_sent(lock)) {
2600                 unlock_res_and_lock(lock);
2601                 return 0;
2602         }
2603
2604         LASSERT(lock->l_blocking_ast);
2605         LASSERT(!lock->l_blocking_lock);
2606
2607         ldlm_set_ast_sent(lock);
2608         if (lock->l_export && lock->l_export->exp_lock_hash) {
2609                 /*
2610                  * NB: it's safe to call cfs_hash_del() even lock isn't
2611                  * in exp_lock_hash.
2612                  */
2613                 /*
2614                  * In the function below, .hs_keycmp resolves to
2615                  * ldlm_export_lock_keycmp()
2616                  */
2617                 /* coverity[overrun-buffer-val] */
2618                 cfs_hash_del(lock->l_export->exp_lock_hash,
2619                              &lock->l_remote_handle, &lock->l_exp_hash);
2620         }
2621
2622         list_add_tail(&lock->l_rk_ast, rpc_list);
2623         LDLM_LOCK_GET(lock);
2624
2625         unlock_res_and_lock(lock);
2626         return 0;
2627 }
2628
2629 void ldlm_revoke_export_locks(struct obd_export *exp)
2630 {
2631         struct list_head rpc_list;
2632
2633         ENTRY;
2634
2635         INIT_LIST_HEAD(&rpc_list);
2636         cfs_hash_for_each_nolock(exp->exp_lock_hash,
2637                                  ldlm_revoke_lock_cb, &rpc_list, 0);
2638         ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
2639                           LDLM_WORK_REVOKE_AST);
2640
2641         EXIT;
2642 }
2643 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2644 #endif /* HAVE_SERVER_SUPPORT */
2645
2646 static int ldlm_bl_get_work(struct ldlm_bl_pool *blp,
2647                             struct ldlm_bl_work_item **p_blwi,
2648                             struct obd_export **p_exp)
2649 {
2650         struct ldlm_bl_work_item *blwi = NULL;
2651         static unsigned int num_bl;
2652         static unsigned int num_stale;
2653         int num_th = atomic_read(&blp->blp_num_threads);
2654
2655         *p_exp = obd_stale_export_get();
2656
2657         spin_lock(&blp->blp_lock);
2658         if (*p_exp != NULL) {
2659                 if (num_th == 1 || ++num_stale < num_th) {
2660                         spin_unlock(&blp->blp_lock);
2661                         return 1;
2662                 }
2663                 num_stale = 0;
2664         }
2665
2666         /* process a request from the blp_list at least every blp_num_threads */
2667         if (!list_empty(&blp->blp_list) &&
2668             (list_empty(&blp->blp_prio_list) || num_bl == 0))
2669                 blwi = list_entry(blp->blp_list.next,
2670                                   struct ldlm_bl_work_item, blwi_entry);
2671         else
2672                 if (!list_empty(&blp->blp_prio_list))
2673                         blwi = list_entry(blp->blp_prio_list.next,
2674                                           struct ldlm_bl_work_item,
2675                                           blwi_entry);
2676
2677         if (blwi) {
2678                 if (++num_bl >= num_th)
2679                         num_bl = 0;
2680                 list_del(&blwi->blwi_entry);
2681         }
2682         spin_unlock(&blp->blp_lock);
2683         *p_blwi = blwi;
2684
2685         if (*p_exp != NULL && *p_blwi != NULL) {
2686                 obd_stale_export_put(*p_exp);
2687                 *p_exp = NULL;
2688         }
2689
2690         return (*p_blwi != NULL || *p_exp != NULL) ? 1 : 0;
2691 }
2692
2693 /* This only contains temporary data until the thread starts */
2694 struct ldlm_bl_thread_data {
2695         struct ldlm_bl_pool     *bltd_blp;
2696         struct completion       bltd_comp;
2697         int                     bltd_num;
2698 };
2699
2700 static int ldlm_bl_thread_main(void *arg);
2701
2702 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp, bool check_busy)
2703 {
2704         struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
2705         struct task_struct *task;
2706
2707         init_completion(&bltd.bltd_comp);
2708
2709         bltd.bltd_num = atomic_inc_return(&blp->blp_num_threads);
2710         if (bltd.bltd_num >= blp->blp_max_threads) {
2711                 atomic_dec(&blp->blp_num_threads);
2712                 return 0;
2713         }
2714
2715         LASSERTF(bltd.bltd_num > 0, "thread num:%d\n", bltd.bltd_num);
2716         if (check_busy &&
2717             atomic_read(&blp->blp_busy_threads) < (bltd.bltd_num - 1)) {
2718                 atomic_dec(&blp->blp_num_threads);
2719                 return 0;
2720         }
2721
2722         task = kthread_run(ldlm_bl_thread_main, &bltd, "ldlm_bl_%02d",
2723                            bltd.bltd_num);
2724         if (IS_ERR(task)) {
2725                 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
2726                        bltd.bltd_num, PTR_ERR(task));
2727                 atomic_dec(&blp->blp_num_threads);
2728                 return PTR_ERR(task);
2729         }
2730         wait_for_completion(&bltd.bltd_comp);
2731
2732         return 0;
2733 }
2734
2735 /* Not fatal if racy and have a few too many threads */
2736 static int ldlm_bl_thread_need_create(struct ldlm_bl_pool *blp,
2737                                       struct ldlm_bl_work_item *blwi)
2738 {
2739         if (atomic_read(&blp->blp_num_threads) >= blp->blp_max_threads)
2740                 return 0;
2741
2742         if (atomic_read(&blp->blp_busy_threads) <
2743             atomic_read(&blp->blp_num_threads))
2744                 return 0;
2745
2746         if (blwi != NULL && (blwi->blwi_ns == NULL ||
2747                              blwi->blwi_mem_pressure))
2748                 return 0;
2749
2750         return 1;
2751 }
2752
2753 static int ldlm_bl_thread_blwi(struct ldlm_bl_pool *blp,
2754                                struct ldlm_bl_work_item *blwi)
2755 {
2756         ENTRY;
2757
2758         if (blwi->blwi_ns == NULL)
2759                 /* added by ldlm_cleanup() */
2760                 RETURN(LDLM_ITER_STOP);
2761
2762         if (blwi->blwi_mem_pressure)
2763                 memory_pressure_set();
2764
2765         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL2, 4);
2766
2767         if (blwi->blwi_count) {
2768                 int count;
2769                 /*
2770                  * The special case when we cancel locks in lru
2771                  * asynchronously, we pass the list of locks here.
2772                  * Thus locks are marked LDLM_FL_CANCELING, but NOT
2773                  * canceled locally yet.
2774                  */
2775                 count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
2776                                                    blwi->blwi_count,
2777                                                    LCF_BL_AST);
2778                 ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
2779                                      blwi->blwi_flags);
2780         } else {
2781                 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
2782                                         blwi->blwi_lock);
2783         }
2784         if (blwi->blwi_mem_pressure)
2785                 memory_pressure_clr();
2786
2787         if (blwi->blwi_flags & LCF_ASYNC)
2788                 OBD_FREE(blwi, sizeof(*blwi));
2789         else
2790                 complete(&blwi->blwi_comp);
2791
2792         RETURN(0);
2793 }
2794
2795 /**
2796  * Cancel stale locks on export. Cancel blocked locks first.
2797  * If the given export has blocked locks, the next in the list may have
2798  * them too, thus cancel not blocked locks only if the current export has
2799  * no blocked locks.
2800  **/
2801 static int ldlm_bl_thread_exports(struct ldlm_bl_pool *blp,
2802                                   struct obd_export *exp)
2803 {
2804         int num;
2805
2806         ENTRY;
2807
2808         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_BL_EVICT, 4);
2809
2810         num = ldlm_export_cancel_blocked_locks(exp);
2811         if (num == 0)
2812                 ldlm_export_cancel_locks(exp);
2813
2814         obd_stale_export_put(exp);
2815
2816         RETURN(0);
2817 }
2818
2819
2820 /**
2821  * Main blocking requests processing thread.
2822  *
2823  * Callers put locks into its queue by calling ldlm_bl_to_thread.
2824  * This thread in the end ends up doing actual call to ->l_blocking_ast
2825  * for queued locks.
2826  */
2827 static int ldlm_bl_thread_main(void *arg)
2828 {
2829         struct ldlm_bl_pool *blp;
2830         struct ldlm_bl_thread_data *bltd = arg;
2831
2832         ENTRY;
2833
2834         blp = bltd->bltd_blp;
2835
2836         complete(&bltd->bltd_comp);
2837         /* cannot use bltd after this, it is only on caller's stack */
2838
2839         while (1) {
2840                 struct l_wait_info lwi = { 0 };
2841                 struct ldlm_bl_work_item *blwi = NULL;
2842                 struct obd_export *exp = NULL;
2843                 int rc;
2844
2845                 rc = ldlm_bl_get_work(blp, &blwi, &exp);
2846
2847                 if (rc == 0)
2848                         l_wait_event_exclusive(blp->blp_waitq,
2849                                                ldlm_bl_get_work(blp, &blwi,
2850                                                                 &exp),
2851                                                &lwi);
2852                 atomic_inc(&blp->blp_busy_threads);
2853
2854                 if (ldlm_bl_thread_need_create(blp, blwi))
2855                         /* discard the return value, we tried */
2856                         ldlm_bl_thread_start(blp, true);
2857
2858                 if (exp)
2859                         rc = ldlm_bl_thread_exports(blp, exp);
2860                 else if (blwi)
2861                         rc = ldlm_bl_thread_blwi(blp, blwi);
2862
2863                 atomic_dec(&blp->blp_busy_threads);
2864
2865                 if (rc == LDLM_ITER_STOP)
2866                         break;
2867
2868                 /*
2869                  * If there are many namespaces, we will not sleep waiting for
2870                  * work, and must do a cond_resched to avoid holding the CPU
2871                  * for too long
2872                  */
2873                 cond_resched();
2874         }
2875
2876         atomic_dec(&blp->blp_num_threads);
2877         complete(&blp->blp_comp);
2878         RETURN(0);
2879 }
2880
2881
2882 static int ldlm_setup(void);
2883 static int ldlm_cleanup(void);
2884
2885 int ldlm_get_ref(void)
2886 {
2887         int rc = 0;
2888
2889         ENTRY;
2890         mutex_lock(&ldlm_ref_mutex);
2891         if (++ldlm_refcount == 1) {
2892                 rc = ldlm_setup();
2893                 if (rc)
2894                         ldlm_refcount--;
2895         }
2896         mutex_unlock(&ldlm_ref_mutex);
2897
2898         RETURN(rc);
2899 }
2900
2901 void ldlm_put_ref(void)
2902 {
2903         ENTRY;
2904         mutex_lock(&ldlm_ref_mutex);
2905         if (ldlm_refcount == 1) {
2906                 int rc = ldlm_cleanup();
2907
2908                 if (rc)
2909                         CERROR("ldlm_cleanup failed: %d\n", rc);
2910                 else
2911                         ldlm_refcount--;
2912         } else {
2913                 ldlm_refcount--;
2914         }
2915         mutex_unlock(&ldlm_ref_mutex);
2916
2917         EXIT;
2918 }
2919
2920 /*
2921  * Export handle<->lock hash operations.
2922  */
2923 static unsigned
2924 ldlm_export_lock_hash(struct cfs_hash *hs, const void *key, unsigned int mask)
2925 {
2926         return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
2927 }
2928
2929 static void *
2930 ldlm_export_lock_key(struct hlist_node *hnode)
2931 {
2932         struct ldlm_lock *lock;
2933
2934         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2935         return &lock->l_remote_handle;
2936 }
2937
2938 static void
2939 ldlm_export_lock_keycpy(struct hlist_node *hnode, void *key)
2940 {
2941         struct ldlm_lock     *lock;
2942
2943         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2944         lock->l_remote_handle = *(struct lustre_handle *)key;
2945 }
2946
2947 static int
2948 ldlm_export_lock_keycmp(const void *key, struct hlist_node *hnode)
2949 {
2950         return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
2951 }
2952
2953 static void *
2954 ldlm_export_lock_object(struct hlist_node *hnode)
2955 {
2956         return hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2957 }
2958
2959 static void
2960 ldlm_export_lock_get(struct cfs_hash *hs, struct hlist_node *hnode)
2961 {
2962         struct ldlm_lock *lock;
2963
2964         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2965         LDLM_LOCK_GET(lock);
2966 }
2967
2968 static void
2969 ldlm_export_lock_put(struct cfs_hash *hs, struct hlist_node *hnode)
2970 {
2971         struct ldlm_lock *lock;
2972
2973         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2974         LDLM_LOCK_RELEASE(lock);
2975 }
2976
2977 static struct cfs_hash_ops ldlm_export_lock_ops = {
2978         .hs_hash        = ldlm_export_lock_hash,
2979         .hs_key         = ldlm_export_lock_key,
2980         .hs_keycmp      = ldlm_export_lock_keycmp,
2981         .hs_keycpy      = ldlm_export_lock_keycpy,
2982         .hs_object      = ldlm_export_lock_object,
2983         .hs_get         = ldlm_export_lock_get,
2984         .hs_put         = ldlm_export_lock_put,
2985         .hs_put_locked  = ldlm_export_lock_put,
2986 };
2987
2988 int ldlm_init_export(struct obd_export *exp)
2989 {
2990         int rc;
2991
2992         ENTRY;
2993
2994         exp->exp_lock_hash =
2995                 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
2996                                 HASH_EXP_LOCK_CUR_BITS,
2997                                 HASH_EXP_LOCK_MAX_BITS,
2998                                 HASH_EXP_LOCK_BKT_BITS, 0,
2999                                 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
3000                                 &ldlm_export_lock_ops,
3001                                 CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
3002                                 CFS_HASH_NBLK_CHANGE);
3003
3004         if (!exp->exp_lock_hash)
3005                 RETURN(-ENOMEM);
3006
3007         rc = ldlm_init_flock_export(exp);
3008         if (rc)
3009                 GOTO(err, rc);
3010
3011         RETURN(0);
3012 err:
3013         ldlm_destroy_export(exp);
3014         RETURN(rc);
3015 }
3016 EXPORT_SYMBOL(ldlm_init_export);
3017
3018 void ldlm_destroy_export(struct obd_export *exp)
3019 {
3020         ENTRY;
3021         cfs_hash_putref(exp->exp_lock_hash);
3022         exp->exp_lock_hash = NULL;
3023
3024         ldlm_destroy_flock_export(exp);
3025         EXIT;
3026 }
3027 EXPORT_SYMBOL(ldlm_destroy_export);
3028
3029 static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
3030                                                       struct attribute *attr,
3031                                                       char *buf)
3032 {
3033         return sprintf(buf, "%d\n", ldlm_cancel_unused_locks_before_replay);
3034 }
3035
3036 static ssize_t cancel_unused_locks_before_replay_store(struct kobject *kobj,
3037                                                        struct attribute *attr,
3038                                                        const char *buffer,
3039                                                        size_t count)
3040 {
3041         int rc;
3042         unsigned long val;
3043
3044         rc = kstrtoul(buffer, 10, &val);
3045         if (rc)
3046                 return rc;
3047
3048         ldlm_cancel_unused_locks_before_replay = val;
3049
3050         return count;
3051 }
3052 LUSTRE_RW_ATTR(cancel_unused_locks_before_replay);
3053
3054 static struct attribute *ldlm_attrs[] = {
3055         &lustre_attr_cancel_unused_locks_before_replay.attr,
3056         NULL,
3057 };
3058
3059 static struct attribute_group ldlm_attr_group = {
3060         .attrs = ldlm_attrs,
3061 };
3062
3063 static int ldlm_setup(void)
3064 {
3065         static struct ptlrpc_service_conf       conf;
3066         struct ldlm_bl_pool                    *blp = NULL;
3067 #ifdef HAVE_SERVER_SUPPORT
3068         struct task_struct *task;
3069 #endif /* HAVE_SERVER_SUPPORT */
3070         int i;
3071         int rc = 0;
3072
3073         ENTRY;
3074
3075         if (ldlm_state != NULL)
3076                 RETURN(-EALREADY);
3077
3078         OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
3079         if (ldlm_state == NULL)
3080                 RETURN(-ENOMEM);
3081
3082         ldlm_kobj = kobject_create_and_add("ldlm", &lustre_kset->kobj);
3083         if (!ldlm_kobj)
3084                 GOTO(out, -ENOMEM);
3085
3086         rc = sysfs_create_group(ldlm_kobj, &ldlm_attr_group);
3087         if (rc)
3088                 GOTO(out, rc);
3089
3090         ldlm_ns_kset = kset_create_and_add("namespaces", NULL, ldlm_kobj);
3091         if (!ldlm_ns_kset)
3092                 GOTO(out, -ENOMEM);
3093
3094         ldlm_svc_kset = kset_create_and_add("services", NULL, ldlm_kobj);
3095         if (!ldlm_svc_kset)
3096                 GOTO(out, -ENOMEM);
3097
3098         rc = ldlm_debugfs_setup();
3099         if (rc != 0)
3100                 GOTO(out, rc);
3101
3102         memset(&conf, 0, sizeof(conf));
3103         conf = (typeof(conf)) {
3104                 .psc_name               = "ldlm_cbd",
3105                 .psc_watchdog_factor    = 2,
3106                 .psc_buf                = {
3107                         .bc_nbufs               = LDLM_CLIENT_NBUFS,
3108                         .bc_buf_size            = LDLM_BUFSIZE,
3109                         .bc_req_max_size        = LDLM_MAXREQSIZE,
3110                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
3111                         .bc_req_portal          = LDLM_CB_REQUEST_PORTAL,
3112                         .bc_rep_portal          = LDLM_CB_REPLY_PORTAL,
3113                 },
3114                 .psc_thr                = {
3115                         .tc_thr_name            = "ldlm_cb",
3116                         .tc_thr_factor          = LDLM_THR_FACTOR,
3117                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
3118                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
3119                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
3120                         .tc_nthrs_user          = ldlm_num_threads,
3121                         .tc_cpu_bind            = ldlm_cpu_bind,
3122                         .tc_ctx_tags            = LCT_MD_THREAD | LCT_DT_THREAD,
3123                 },
3124                 .psc_cpt                = {
3125                         .cc_pattern             = ldlm_cpts,
3126                         .cc_affinity            = true,
3127                 },
3128                 .psc_ops                = {
3129                         .so_req_handler         = ldlm_callback_handler,
3130                 },
3131         };
3132         ldlm_state->ldlm_cb_service = \
3133                         ptlrpc_register_service(&conf, ldlm_svc_kset,
3134                                                 ldlm_svc_debugfs_dir);
3135         if (IS_ERR(ldlm_state->ldlm_cb_service)) {
3136                 CERROR("failed to start service\n");
3137                 rc = PTR_ERR(ldlm_state->ldlm_cb_service);
3138                 ldlm_state->ldlm_cb_service = NULL;
3139                 GOTO(out, rc);
3140         }
3141
3142 #ifdef HAVE_SERVER_SUPPORT
3143         memset(&conf, 0, sizeof(conf));
3144         conf = (typeof(conf)) {
3145                 .psc_name               = "ldlm_canceld",
3146                 .psc_watchdog_factor    = 6,
3147                 .psc_buf                = {
3148                         .bc_nbufs               = LDLM_SERVER_NBUFS,
3149                         .bc_buf_size            = LDLM_BUFSIZE,
3150                         .bc_req_max_size        = LDLM_MAXREQSIZE,
3151                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
3152                         .bc_req_portal          = LDLM_CANCEL_REQUEST_PORTAL,
3153                         .bc_rep_portal          = LDLM_CANCEL_REPLY_PORTAL,
3154
3155                 },
3156                 .psc_thr                = {
3157                         .tc_thr_name            = "ldlm_cn",
3158                         .tc_thr_factor          = LDLM_THR_FACTOR,
3159                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
3160                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
3161                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
3162                         .tc_nthrs_user          = ldlm_num_threads,
3163                         .tc_cpu_bind            = ldlm_cpu_bind,
3164                         .tc_ctx_tags            = LCT_MD_THREAD | \
3165                                                   LCT_DT_THREAD | \
3166                                                   LCT_CL_THREAD,
3167                 },
3168                 .psc_cpt                = {
3169                         .cc_pattern             = ldlm_cpts,
3170                         .cc_affinity            = true,
3171                 },
3172                 .psc_ops                = {
3173                         .so_req_handler         = ldlm_cancel_handler,
3174                         .so_hpreq_handler       = ldlm_hpreq_handler,
3175                 },
3176         };
3177         ldlm_state->ldlm_cancel_service = \
3178                         ptlrpc_register_service(&conf, ldlm_svc_kset,
3179                                                 ldlm_svc_debugfs_dir);
3180         if (IS_ERR(ldlm_state->ldlm_cancel_service)) {
3181                 CERROR("failed to start service\n");
3182                 rc = PTR_ERR(ldlm_state->ldlm_cancel_service);
3183                 ldlm_state->ldlm_cancel_service = NULL;
3184                 GOTO(out, rc);
3185         }
3186 #endif /* HAVE_SERVER_SUPPORT */
3187
3188         OBD_ALLOC(blp, sizeof(*blp));
3189         if (blp == NULL)
3190                 GOTO(out, rc = -ENOMEM);
3191         ldlm_state->ldlm_bl_pool = blp;
3192
3193         spin_lock_init(&blp->blp_lock);
3194         INIT_LIST_HEAD(&blp->blp_list);
3195         INIT_LIST_HEAD(&blp->blp_prio_list);
3196         init_waitqueue_head(&blp->blp_waitq);
3197         atomic_set(&blp->blp_num_threads, 0);
3198         atomic_set(&blp->blp_busy_threads, 0);
3199
3200         if (ldlm_num_threads == 0) {
3201                 blp->blp_min_threads = LDLM_NTHRS_INIT;
3202                 blp->blp_max_threads = LDLM_NTHRS_MAX;
3203         } else {
3204                 blp->blp_min_threads = blp->blp_max_threads = \
3205                         min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
3206                                                          ldlm_num_threads));
3207         }
3208
3209         for (i = 0; i < blp->blp_min_threads; i++) {
3210                 rc = ldlm_bl_thread_start(blp, false);
3211                 if (rc < 0)
3212                         GOTO(out, rc);
3213         }
3214
3215 #ifdef HAVE_SERVER_SUPPORT
3216         task = kthread_run(expired_lock_main, NULL, "ldlm_elt");
3217         if (IS_ERR(task)) {
3218                 rc = PTR_ERR(task);
3219                 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
3220                 GOTO(out, rc);
3221         }
3222
3223         wait_event(expired_lock_wait_queue,
3224                    expired_lock_thread_state == ELT_READY);
3225 #endif /* HAVE_SERVER_SUPPORT */
3226
3227         rc = ldlm_pools_init();
3228         if (rc) {
3229                 CERROR("Failed to initialize LDLM pools: %d\n", rc);
3230                 GOTO(out, rc);
3231         }
3232
3233         rc = ldlm_reclaim_setup();
3234         if (rc) {
3235                 CERROR("Failed to setup reclaim thread: rc = %d\n", rc);
3236                 GOTO(out, rc);
3237         }
3238         RETURN(0);
3239
3240  out:
3241         ldlm_cleanup();
3242         RETURN(rc);
3243 }
3244
3245 static int ldlm_cleanup(void)
3246 {
3247         ENTRY;
3248
3249         if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
3250             !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
3251                 CERROR("ldlm still has namespaces; clean these up first.\n");
3252                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
3253                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
3254                 RETURN(-EBUSY);
3255         }
3256
3257         ldlm_reclaim_cleanup();
3258         ldlm_pools_fini();
3259
3260         if (ldlm_state->ldlm_bl_pool != NULL) {
3261                 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
3262
3263                 while (atomic_read(&blp->blp_num_threads) > 0) {
3264                         struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
3265
3266                         init_completion(&blp->blp_comp);
3267
3268                         spin_lock(&blp->blp_lock);
3269                         list_add_tail(&blwi.blwi_entry, &blp->blp_list);
3270                         wake_up(&blp->blp_waitq);
3271                         spin_unlock(&blp->blp_lock);
3272
3273                         wait_for_completion(&blp->blp_comp);
3274                 }
3275
3276                 OBD_FREE(blp, sizeof(*blp));
3277         }
3278
3279         if (ldlm_state->ldlm_cb_service != NULL)
3280                 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
3281 #ifdef HAVE_SERVER_SUPPORT
3282         if (ldlm_state->ldlm_cancel_service != NULL)
3283                 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
3284 #endif
3285
3286         if (ldlm_ns_kset)
3287                 kset_unregister(ldlm_ns_kset);
3288         if (ldlm_svc_kset)
3289                 kset_unregister(ldlm_svc_kset);
3290         if (ldlm_kobj) {
3291                 sysfs_remove_group(ldlm_kobj, &ldlm_attr_group);
3292                 kobject_put(ldlm_kobj);
3293         }
3294
3295         ldlm_debugfs_cleanup();
3296
3297 #ifdef HAVE_SERVER_SUPPORT
3298         if (expired_lock_thread_state != ELT_STOPPED) {
3299                 expired_lock_thread_state = ELT_TERMINATE;
3300                 wake_up(&expired_lock_wait_queue);
3301                 wait_event(expired_lock_wait_queue,
3302                            expired_lock_thread_state == ELT_STOPPED);
3303         }
3304 #endif
3305
3306         OBD_FREE(ldlm_state, sizeof(*ldlm_state));
3307         ldlm_state = NULL;
3308
3309         RETURN(0);
3310 }
3311
3312 int ldlm_init(void)
3313 {
3314         ldlm_resource_slab = kmem_cache_create("ldlm_resources",
3315                                                sizeof(struct ldlm_resource), 0,
3316                                                SLAB_HWCACHE_ALIGN, NULL);
3317         if (ldlm_resource_slab == NULL)
3318                 return -ENOMEM;
3319
3320         ldlm_lock_slab = kmem_cache_create("ldlm_locks",
3321                               sizeof(struct ldlm_lock), 0,
3322                               SLAB_HWCACHE_ALIGN, NULL);
3323         if (ldlm_lock_slab == NULL)
3324                 goto out_resource;
3325
3326         ldlm_interval_slab = kmem_cache_create("interval_node",
3327                                         sizeof(struct ldlm_interval),
3328                                         0, SLAB_HWCACHE_ALIGN, NULL);
3329         if (ldlm_interval_slab == NULL)
3330                 goto out_lock;
3331
3332         ldlm_interval_tree_slab = kmem_cache_create("interval_tree",
3333                         sizeof(struct ldlm_interval_tree) * LCK_MODE_NUM,
3334                         0, SLAB_HWCACHE_ALIGN, NULL);
3335         if (ldlm_interval_tree_slab == NULL)
3336                 goto out_interval;
3337
3338 #ifdef HAVE_SERVER_SUPPORT
3339         ldlm_glimpse_work_kmem = kmem_cache_create("ldlm_glimpse_work_kmem",
3340                                         sizeof(struct ldlm_glimpse_work),
3341                                         0, 0, NULL);
3342         if (ldlm_glimpse_work_kmem == NULL)
3343                 goto out_interval_tree;
3344 #endif
3345
3346 #if LUSTRE_TRACKS_LOCK_EXP_REFS
3347         class_export_dump_hook = ldlm_dump_export_locks;
3348 #endif
3349         return 0;
3350 #ifdef HAVE_SERVER_SUPPORT
3351 out_interval_tree:
3352         kmem_cache_destroy(ldlm_interval_tree_slab);
3353 #endif
3354 out_interval:
3355         kmem_cache_destroy(ldlm_interval_slab);
3356 out_lock:
3357         kmem_cache_destroy(ldlm_lock_slab);
3358 out_resource:
3359         kmem_cache_destroy(ldlm_resource_slab);
3360
3361         return -ENOMEM;
3362 }
3363
3364 void ldlm_exit(void)
3365 {
3366         if (ldlm_refcount)
3367                 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
3368         kmem_cache_destroy(ldlm_resource_slab);
3369         /*
3370          * ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
3371          * synchronize_rcu() to wait a grace period elapsed, so that
3372          * ldlm_lock_free() get a chance to be called.
3373          */
3374         synchronize_rcu();
3375         kmem_cache_destroy(ldlm_lock_slab);
3376         kmem_cache_destroy(ldlm_interval_slab);
3377         kmem_cache_destroy(ldlm_interval_tree_slab);
3378 #ifdef HAVE_SERVER_SUPPORT
3379         kmem_cache_destroy(ldlm_glimpse_work_kmem);
3380 #endif
3381 }