Whamcloud - gitweb
b3370094f6de7efd60cd69048b456437786b2e36
[fs/lustre-release.git] / lustre / ldlm / ldlm_lockd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_lockd.c
33  *
34  * Author: Peter Braam <braam@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LDLM
39
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <libcfs/libcfs.h>
43 #include <lustre_errno.h>
44 #include <lustre_dlm.h>
45 #include <obd_class.h>
46 #include "ldlm_internal.h"
47
48 static int ldlm_num_threads;
49 module_param(ldlm_num_threads, int, 0444);
50 MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
51
52 static unsigned int ldlm_cpu_bind = 1;
53 module_param(ldlm_cpu_bind, uint, 0444);
54 MODULE_PARM_DESC(ldlm_cpu_bind,
55                  "bind DLM service threads to particular CPU partitions");
56
57 static char *ldlm_cpts;
58 module_param(ldlm_cpts, charp, 0444);
59 MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
60
61 static DEFINE_MUTEX(ldlm_ref_mutex);
62 static int ldlm_refcount;
63
64 struct kobject *ldlm_kobj;
65 struct kset *ldlm_ns_kset;
66 struct kset *ldlm_svc_kset;
67
68 /* LDLM state */
69
70 static struct ldlm_state *ldlm_state;
71
72 /*
73  * timeout for initial callback (AST) reply (bz10399)
74  * Due to having to send a 32 bit time value over the
75  * wire return it as time_t instead of time64_t
76  */
77 static inline time_t ldlm_get_rq_timeout(void)
78 {
79         /* Non-AT value */
80         time_t timeout = min(ldlm_timeout, obd_timeout / 3);
81
82         return timeout < 1 ? 1 : timeout;
83 }
84
85 struct ldlm_bl_pool {
86         spinlock_t blp_lock;
87
88         /*
89          * blp_prio_list is used for callbacks that should be handled
90          * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
91          * see b=13843
92          */
93         struct list_head blp_prio_list;
94
95         /*
96          * blp_list is used for all other callbacks which are likely
97          * to take longer to process.
98          */
99         struct list_head blp_list;
100
101         wait_queue_head_t blp_waitq;
102         struct completion blp_comp;
103         atomic_t blp_num_threads;
104         atomic_t blp_busy_threads;
105         int blp_min_threads;
106         int blp_max_threads;
107 };
108
109 struct ldlm_bl_work_item {
110         struct list_head        blwi_entry;
111         struct ldlm_namespace   *blwi_ns;
112         struct ldlm_lock_desc   blwi_ld;
113         struct ldlm_lock        *blwi_lock;
114         struct list_head        blwi_head;
115         int                     blwi_count;
116         struct completion       blwi_comp;
117         enum ldlm_cancel_flags  blwi_flags;
118         int                     blwi_mem_pressure;
119 };
120
121 #ifdef HAVE_SERVER_SUPPORT
122
123 /**
124  * Protects both waiting_locks_list and expired_lock_thread.
125  */
126 static DEFINE_SPINLOCK(waiting_locks_spinlock); /* BH lock (timer) */
127
128 /**
129  * List for contended locks.
130  *
131  * As soon as a lock is contended, it gets placed on this list and
132  * expected time to get a response is filled in the lock. A special
133  * thread walks the list looking for locks that should be released and
134  * schedules client evictions for those that have not been released in
135  * time.
136  *
137  * All access to it should be under waiting_locks_spinlock.
138  */
139 static LIST_HEAD(waiting_locks_list);
140 static void waiting_locks_callback(TIMER_DATA_TYPE unused);
141 static CFS_DEFINE_TIMER(waiting_locks_timer, waiting_locks_callback, 0, 0);
142
143 enum elt_state {
144         ELT_STOPPED,
145         ELT_READY,
146         ELT_TERMINATE,
147 };
148
149 static DECLARE_WAIT_QUEUE_HEAD(expired_lock_wait_queue);
150 static enum elt_state expired_lock_thread_state = ELT_STOPPED;
151 static int expired_lock_dump;
152 static LIST_HEAD(expired_lock_list);
153
154 static int ldlm_lock_busy(struct ldlm_lock *lock);
155 static int ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t timeout);
156 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds);
157
158 static inline int have_expired_locks(void)
159 {
160         int need_to_run;
161
162         ENTRY;
163         spin_lock_bh(&waiting_locks_spinlock);
164         need_to_run = !list_empty(&expired_lock_list);
165         spin_unlock_bh(&waiting_locks_spinlock);
166
167         RETURN(need_to_run);
168 }
169
170 /**
171  * Check expired lock list for expired locks and time them out.
172  */
173 static int expired_lock_main(void *arg)
174 {
175         struct list_head *expired = &expired_lock_list;
176         int do_dump;
177
178         ENTRY;
179
180         expired_lock_thread_state = ELT_READY;
181         wake_up(&expired_lock_wait_queue);
182
183         while (1) {
184                 wait_event_idle(expired_lock_wait_queue,
185                                 have_expired_locks() ||
186                                 expired_lock_thread_state == ELT_TERMINATE);
187
188                 spin_lock_bh(&waiting_locks_spinlock);
189                 if (expired_lock_dump) {
190                         spin_unlock_bh(&waiting_locks_spinlock);
191
192                         /* from waiting_locks_callback, but not in timer */
193                         libcfs_debug_dumplog();
194
195                         spin_lock_bh(&waiting_locks_spinlock);
196                         expired_lock_dump = 0;
197                 }
198
199                 do_dump = 0;
200
201                 while (!list_empty(expired)) {
202                         struct obd_export *export;
203                         struct ldlm_lock *lock;
204
205                         lock = list_entry(expired->next, struct ldlm_lock,
206                                           l_pending_chain);
207                         if ((void *)lock < LP_POISON + PAGE_SIZE &&
208                             (void *)lock >= LP_POISON) {
209                                 spin_unlock_bh(&waiting_locks_spinlock);
210                                 CERROR("free lock on elt list %p\n", lock);
211                                 LBUG();
212                         }
213                         list_del_init(&lock->l_pending_chain);
214                         if ((void *)lock->l_export <
215                              LP_POISON + PAGE_SIZE &&
216                             (void *)lock->l_export >= LP_POISON) {
217                                 CERROR("lock with free export on elt list %p\n",
218                                        lock->l_export);
219                                 lock->l_export = NULL;
220                                 LDLM_ERROR(lock, "free export");
221                                 /*
222                                  * release extra ref grabbed by
223                                  * ldlm_add_waiting_lock() or
224                                  * ldlm_failed_ast()
225                                  */
226                                 LDLM_LOCK_RELEASE(lock);
227                                 continue;
228                         }
229
230                         if (ldlm_is_destroyed(lock)) {
231                                 /*
232                                  * release the lock refcount where
233                                  * waiting_locks_callback() founds
234                                  */
235                                 LDLM_LOCK_RELEASE(lock);
236                                 continue;
237                         }
238                         export = class_export_lock_get(lock->l_export, lock);
239                         spin_unlock_bh(&waiting_locks_spinlock);
240
241                         /* Check if we need to prolong timeout */
242                         if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
243                             lock->l_callback_timeout != 0 && /* not AST error */
244                             ldlm_lock_busy(lock)) {
245                                 LDLM_DEBUG(lock, "prolong the busy lock");
246                                 lock_res_and_lock(lock);
247                                 ldlm_add_waiting_lock(lock,
248                                                 ldlm_bl_timeout(lock) >> 1);
249                                 unlock_res_and_lock(lock);
250                         } else {
251                                 spin_lock_bh(&export->exp_bl_list_lock);
252                                 list_del_init(&lock->l_exp_list);
253                                 spin_unlock_bh(&export->exp_bl_list_lock);
254
255                                 LDLM_ERROR(lock,
256                                            "lock callback timer expired after %llds: evicting client at %s ",
257                                            ktime_get_real_seconds() -
258                                            lock->l_blast_sent,
259                                            obd_export_nid2str(export));
260                                 ldlm_lock_to_ns(lock)->ns_timeouts++;
261                                 do_dump++;
262                                 class_fail_export(export);
263                         }
264                         class_export_lock_put(export, lock);
265                         /*
266                          * release extra ref grabbed by ldlm_add_waiting_lock()
267                          * or ldlm_failed_ast()
268                          */
269                         LDLM_LOCK_RELEASE(lock);
270
271                         spin_lock_bh(&waiting_locks_spinlock);
272                 }
273                 spin_unlock_bh(&waiting_locks_spinlock);
274
275                 if (do_dump && obd_dump_on_eviction) {
276                         CERROR("dump the log upon eviction\n");
277                         libcfs_debug_dumplog();
278                 }
279
280                 if (expired_lock_thread_state == ELT_TERMINATE)
281                         break;
282         }
283
284         expired_lock_thread_state = ELT_STOPPED;
285         wake_up(&expired_lock_wait_queue);
286         RETURN(0);
287 }
288
289 /**
290  * Check if there is a request in the export request list
291  * which prevents the lock canceling.
292  */
293 static int ldlm_lock_busy(struct ldlm_lock *lock)
294 {
295         struct ptlrpc_request *req;
296         int match = 0;
297
298         ENTRY;
299
300         if (lock->l_export == NULL)
301                 return 0;
302
303         spin_lock(&lock->l_export->exp_rpc_lock);
304         list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
305                                 rq_exp_list) {
306                 if (req->rq_ops->hpreq_lock_match) {
307                         match = req->rq_ops->hpreq_lock_match(req, lock);
308                         if (match)
309                                 break;
310                 }
311         }
312         spin_unlock(&lock->l_export->exp_rpc_lock);
313         RETURN(match);
314 }
315
316 /* This is called from within a timer interrupt and cannot schedule */
317 static void waiting_locks_callback(TIMER_DATA_TYPE unused)
318 {
319         struct ldlm_lock *lock;
320         int need_dump = 0;
321
322         spin_lock_bh(&waiting_locks_spinlock);
323         while (!list_empty(&waiting_locks_list)) {
324                 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
325                                   l_pending_chain);
326                 if (lock->l_callback_timeout > ktime_get_seconds() ||
327                     lock->l_req_mode == LCK_GROUP)
328                         break;
329
330                 /*
331                  * no needs to take an extra ref on the lock since it was in
332                  * the waiting_locks_list and ldlm_add_waiting_lock()
333                  * already grabbed a ref
334                  */
335                 list_del(&lock->l_pending_chain);
336                 list_add(&lock->l_pending_chain, &expired_lock_list);
337                 need_dump = 1;
338         }
339
340         if (!list_empty(&expired_lock_list)) {
341                 if (obd_dump_on_timeout && need_dump)
342                         expired_lock_dump = __LINE__;
343
344                 wake_up(&expired_lock_wait_queue);
345         }
346
347         /*
348          * Make sure the timer will fire again if we have any locks
349          * left.
350          */
351         if (!list_empty(&waiting_locks_list)) {
352                 unsigned long timeout_jiffies;
353
354                 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
355                                   l_pending_chain);
356                 timeout_jiffies = cfs_time_seconds(lock->l_callback_timeout);
357                 mod_timer(&waiting_locks_timer, timeout_jiffies);
358         }
359         spin_unlock_bh(&waiting_locks_spinlock);
360 }
361
362 /**
363  * Add lock to the list of contended locks.
364  *
365  * Indicate that we're waiting for a client to call us back cancelling a given
366  * lock.  We add it to the pending-callback chain, and schedule the lock-timeout
367  * timer to fire appropriately.  (We round up to the next second, to avoid
368  * floods of timer firings during periods of high lock contention and traffic).
369  * As done by ldlm_add_waiting_lock(), the caller must grab a lock reference
370  * if it has been added to the waiting list (1 is returned).
371  *
372  * Called with the namespace lock held.
373  */
374 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds)
375 {
376         unsigned long timeout_jiffies;
377         time64_t timeout;
378
379         if (!list_empty(&lock->l_pending_chain))
380                 return 0;
381
382         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
383             OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
384                 seconds = 1;
385
386         timeout = ktime_get_seconds() + seconds;
387         if (likely(timeout > lock->l_callback_timeout))
388                 lock->l_callback_timeout = timeout;
389
390         timeout_jiffies = cfs_time_seconds(lock->l_callback_timeout);
391
392         if (time_before(timeout_jiffies, waiting_locks_timer.expires) ||
393             !timer_pending(&waiting_locks_timer))
394                 mod_timer(&waiting_locks_timer, timeout_jiffies);
395
396         /*
397          * if the new lock has a shorter timeout than something earlier on
398          * the list, we'll wait the longer amount of time; no big deal.
399          */
400         /* FIFO */
401         list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
402         return 1;
403 }
404
405 static void ldlm_add_blocked_lock(struct ldlm_lock *lock)
406 {
407         spin_lock_bh(&lock->l_export->exp_bl_list_lock);
408         if (list_empty(&lock->l_exp_list)) {
409                 if (!ldlm_is_granted(lock))
410                         list_add_tail(&lock->l_exp_list,
411                                       &lock->l_export->exp_bl_list);
412                 else
413                         list_add(&lock->l_exp_list,
414                                  &lock->l_export->exp_bl_list);
415         }
416         spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
417
418         /*
419          * A blocked lock is added. Adjust the position in
420          * the stale list if the export is in the list.
421          * If export is stale and not in the list - it is being
422          * processed and will be placed on the right position
423          * on obd_stale_export_put().
424          */
425         if (!list_empty(&lock->l_export->exp_stale_list))
426                 obd_stale_export_adjust(lock->l_export);
427 }
428
429 static int ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
430 {
431         int ret;
432
433         /* NB: must be called with hold of lock_res_and_lock() */
434         LASSERT(ldlm_is_res_locked(lock));
435         LASSERT(!ldlm_is_cancel_on_block(lock));
436
437         /*
438          * Do not put cross-MDT lock in the waiting list, since we
439          * will not evict it due to timeout for now
440          */
441         if (lock->l_export != NULL &&
442             (exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS))
443                 return 0;
444
445         spin_lock_bh(&waiting_locks_spinlock);
446         if (ldlm_is_cancel(lock)) {
447                 spin_unlock_bh(&waiting_locks_spinlock);
448                 return 0;
449         }
450
451         if (ldlm_is_destroyed(lock)) {
452                 static time64_t next;
453
454                 spin_unlock_bh(&waiting_locks_spinlock);
455                 LDLM_ERROR(lock, "not waiting on destroyed lock (b=5653)");
456                 if (ktime_get_seconds() > next) {
457                         next = ktime_get_seconds() + 14400;
458                         libcfs_debug_dumpstack(NULL);
459                 }
460                 return 0;
461         }
462
463         ldlm_set_waited(lock);
464         lock->l_blast_sent = ktime_get_real_seconds();
465         ret = __ldlm_add_waiting_lock(lock, timeout);
466         if (ret) {
467                 /*
468                  * grab ref on the lock if it has been added to the
469                  * waiting list
470                  */
471                 LDLM_LOCK_GET(lock);
472         }
473         spin_unlock_bh(&waiting_locks_spinlock);
474
475         if (ret)
476                 ldlm_add_blocked_lock(lock);
477
478         LDLM_DEBUG(lock, "%sadding to wait list(timeout: %lld, AT: %s)",
479                    ret == 0 ? "not re-" : "", timeout,
480                    AT_OFF ? "off" : "on");
481         return ret;
482 }
483
484 /**
485  * Remove a lock from the pending list, likely because it had its cancellation
486  * callback arrive without incident.  This adjusts the lock-timeout timer if
487  * needed.  Returns 0 if the lock wasn't pending after all, 1 if it was.
488  * As done by ldlm_del_waiting_lock(), the caller must release the lock
489  * reference when the lock is removed from any list (1 is returned).
490  *
491  * Called with namespace lock held.
492  */
493 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
494 {
495         struct list_head *list_next;
496
497         if (list_empty(&lock->l_pending_chain))
498                 return 0;
499
500         list_next = lock->l_pending_chain.next;
501         if (lock->l_pending_chain.prev == &waiting_locks_list) {
502                 /* Removing the head of the list, adjust timer. */
503                 if (list_next == &waiting_locks_list) {
504                         /* No more, just cancel. */
505                         del_timer(&waiting_locks_timer);
506                 } else {
507                         struct ldlm_lock *next;
508
509                         next = list_entry(list_next, struct ldlm_lock,
510                                           l_pending_chain);
511                         mod_timer(&waiting_locks_timer,
512                                   cfs_time_seconds(next->l_callback_timeout));
513                 }
514         }
515         list_del_init(&lock->l_pending_chain);
516
517         return 1;
518 }
519
520 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
521 {
522         int ret;
523
524         if (lock->l_export == NULL) {
525                 /* We don't have a "waiting locks list" on clients. */
526                 CDEBUG(D_DLMTRACE, "Client lock %p : no-op\n", lock);
527                 return 0;
528         }
529
530         spin_lock_bh(&waiting_locks_spinlock);
531         ret = __ldlm_del_waiting_lock(lock);
532         ldlm_clear_waited(lock);
533         spin_unlock_bh(&waiting_locks_spinlock);
534
535         /* remove the lock out of export blocking list */
536         spin_lock_bh(&lock->l_export->exp_bl_list_lock);
537         list_del_init(&lock->l_exp_list);
538         spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
539
540         if (ret) {
541                 /*
542                  * release lock ref if it has indeed been removed
543                  * from a list
544                  */
545                 LDLM_LOCK_RELEASE(lock);
546         }
547
548         LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
549         return ret;
550 }
551
552 /**
553  * Prolong the contended lock waiting time.
554  *
555  * Called with namespace lock held.
556  */
557 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
558 {
559         if (lock->l_export == NULL) {
560                 /* We don't have a "waiting locks list" on clients. */
561                 LDLM_DEBUG(lock, "client lock: no-op");
562                 return 0;
563         }
564
565         if (exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS) {
566                 /* We don't have a "waiting locks list" on OSP. */
567                 LDLM_DEBUG(lock, "MDS-MDS lock: no-op");
568                 return 0;
569         }
570
571         spin_lock_bh(&waiting_locks_spinlock);
572
573         if (list_empty(&lock->l_pending_chain)) {
574                 spin_unlock_bh(&waiting_locks_spinlock);
575                 LDLM_DEBUG(lock, "wasn't waiting");
576                 return 0;
577         }
578
579         /*
580          * we remove/add the lock to the waiting list, so no needs to
581          * release/take a lock reference
582          */
583         __ldlm_del_waiting_lock(lock);
584         __ldlm_add_waiting_lock(lock, timeout);
585         spin_unlock_bh(&waiting_locks_spinlock);
586
587         LDLM_DEBUG(lock, "refreshed");
588         return 1;
589 }
590 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
591
592 #else /* HAVE_SERVER_SUPPORT */
593
594 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
595 {
596         RETURN(0);
597 }
598
599 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
600 {
601         RETURN(0);
602 }
603
604 #endif /* !HAVE_SERVER_SUPPORT */
605
606 #ifdef HAVE_SERVER_SUPPORT
607
608 /**
609  * Calculate the per-export Blocking timeout (covering BL AST, data flush,
610  * lock cancel, and their replies). Used for lock callback timeout and AST
611  * re-send period.
612  *
613  * \param[in] lock        lock which is getting the blocking callback
614  *
615  * \retval            timeout in seconds to wait for the client reply
616  */
617 time64_t ldlm_bl_timeout(struct ldlm_lock *lock)
618 {
619         time64_t timeout;
620
621         if (AT_OFF)
622                 return obd_timeout / 2;
623
624         /*
625          * Since these are non-updating timeouts, we should be conservative.
626          * Take more than usually, 150%
627          * It would be nice to have some kind of "early reply" mechanism for
628          * lock callbacks too...
629          */
630         timeout = at_get(&lock->l_export->exp_bl_lock_at);
631         return max(timeout + (timeout >> 1), (time64_t)ldlm_enqueue_min);
632 }
633 EXPORT_SYMBOL(ldlm_bl_timeout);
634
635 /**
636  * Perform lock cleanup if AST sending failed.
637  */
638 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
639                             const char *ast_type)
640 {
641         LCONSOLE_ERROR_MSG(0x138,
642                            "%s: A client on nid %s was evicted due to a lock %s callback time out: rc %d\n",
643                            lock->l_export->exp_obd->obd_name,
644                            obd_export_nid2str(lock->l_export), ast_type, rc);
645
646         if (obd_dump_on_timeout)
647                 libcfs_debug_dumplog();
648         spin_lock_bh(&waiting_locks_spinlock);
649         if (__ldlm_del_waiting_lock(lock) == 0)
650                 /*
651                  * the lock was not in any list, grab an extra ref before adding
652                  * the lock to the expired list
653                  */
654                 LDLM_LOCK_GET(lock);
655         lock->l_callback_timeout = 0; /* differentiate it from expired locks */
656         list_add(&lock->l_pending_chain, &expired_lock_list);
657         wake_up(&expired_lock_wait_queue);
658         spin_unlock_bh(&waiting_locks_spinlock);
659 }
660
661 /**
662  * Perform lock cleanup if AST reply came with error.
663  */
664 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
665                                  struct ptlrpc_request *req, int rc,
666                                  const char *ast_type)
667 {
668         struct lnet_process_id peer = req->rq_import->imp_connection->c_peer;
669
670         if (!req->rq_replied || (rc && rc != -EINVAL)) {
671                 if (ldlm_is_cancel(lock)) {
672                         LDLM_DEBUG(lock,
673                                    "%s AST (req@%p x%llu) timeout from nid %s, but cancel was received (AST reply lost?)",
674                                    ast_type, req, req->rq_xid,
675                                    libcfs_nid2str(peer.nid));
676                         ldlm_lock_cancel(lock);
677                         rc = -ERESTART;
678                 } else if (rc == -ENODEV || rc == -ESHUTDOWN ||
679                            (rc == -EIO &&
680                             req->rq_import->imp_state == LUSTRE_IMP_CLOSED)) {
681                         /*
682                          * Upon umount process the AST fails because cannot be
683                          * sent. This shouldn't lead to the client eviction.
684                          * -ENODEV error is returned by ptl_send_rpc() for
685                          *  new request in such import.
686                          * -SHUTDOWN is returned by ptlrpc_import_delay_req()
687                          *  if imp_invalid is set or obd_no_recov.
688                          * Meanwhile there is also check for LUSTRE_IMP_CLOSED
689                          * in ptlrpc_import_delay_req() as well with -EIO code.
690                          * In all such cases errors are ignored.
691                          */
692                         LDLM_DEBUG(lock,
693                                    "%s AST can't be sent due to a server %s failure or umount process: rc = %d\n",
694                                     ast_type,
695                                      req->rq_import->imp_obd->obd_name, rc);
696                 } else {
697                         LDLM_ERROR(lock,
698                                    "client (nid %s) %s %s AST (req@%p x%llu status %d rc %d), evict it",
699                                    libcfs_nid2str(peer.nid),
700                                    req->rq_replied ? "returned error from" :
701                                    "failed to reply to",
702                                    ast_type, req, req->rq_xid,
703                                    (req->rq_repmsg != NULL) ?
704                                    lustre_msg_get_status(req->rq_repmsg) : 0,
705                                    rc);
706                         ldlm_failed_ast(lock, rc, ast_type);
707                 }
708                 return rc;
709         }
710
711         if (rc == -EINVAL) {
712                 struct ldlm_resource *res = lock->l_resource;
713
714                 LDLM_DEBUG(lock,
715                            "client (nid %s) returned %d from %s AST (req@%p x%llu) - normal race",
716                            libcfs_nid2str(peer.nid),
717                            req->rq_repmsg ?
718                            lustre_msg_get_status(req->rq_repmsg) : -1,
719                            ast_type, req, req->rq_xid);
720                 if (res) {
721                         /*
722                          * update lvbo to return proper attributes.
723                          * see b=23174
724                          */
725                         ldlm_resource_getref(res);
726                         ldlm_lvbo_update(res, lock, NULL, 1);
727                         ldlm_resource_putref(res);
728                 }
729                 ldlm_lock_cancel(lock);
730                 rc = -ERESTART;
731         }
732
733         return rc;
734 }
735
736 static int ldlm_cb_interpret(const struct lu_env *env,
737                              struct ptlrpc_request *req, void *args, int rc)
738 {
739         struct ldlm_cb_async_args *ca = args;
740         struct ldlm_lock *lock = ca->ca_lock;
741         struct ldlm_cb_set_arg *arg  = ca->ca_set_arg;
742
743         ENTRY;
744
745         LASSERT(lock != NULL);
746
747         switch (arg->type) {
748         case LDLM_GL_CALLBACK:
749                 /*
750                  * Update the LVB from disk if the AST failed
751                  * (this is a legal race)
752                  *
753                  * - Glimpse callback of local lock just returns
754                  *   -ELDLM_NO_LOCK_DATA.
755                  * - Glimpse callback of remote lock might return
756                  *   -ELDLM_NO_LOCK_DATA when inode is cleared. LU-274
757                  */
758                 if (unlikely(arg->gl_interpret_reply)) {
759                         rc = arg->gl_interpret_reply(NULL, req, args, rc);
760                 } else if (rc == -ELDLM_NO_LOCK_DATA) {
761                         LDLM_DEBUG(lock,
762                                    "lost race - client has a lock but no inode");
763                         ldlm_lvbo_update(lock->l_resource, lock, NULL, 1);
764                 } else if (rc != 0) {
765                         rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
766                 } else {
767                         rc = ldlm_lvbo_update(lock->l_resource,
768                                               lock, req, 1);
769                 }
770                 break;
771         case LDLM_BL_CALLBACK:
772                 if (rc != 0)
773                         rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
774                 break;
775         case LDLM_CP_CALLBACK:
776                 if (rc != 0)
777                         rc = ldlm_handle_ast_error(lock, req, rc, "completion");
778                 break;
779         default:
780                 LDLM_ERROR(lock, "invalid opcode for lock callback %d",
781                            arg->type);
782                 LBUG();
783         }
784
785         /* release extra reference taken in ldlm_ast_fini() */
786         LDLM_LOCK_RELEASE(lock);
787
788         if (rc == -ERESTART)
789                 atomic_inc(&arg->restart);
790
791         RETURN(0);
792 }
793
794 static void ldlm_update_resend(struct ptlrpc_request *req, void *data)
795 {
796         struct ldlm_cb_async_args *ca = data;
797         struct ldlm_lock *lock = ca->ca_lock;
798
799         ldlm_refresh_waiting_lock(lock, ldlm_bl_timeout(lock));
800 }
801
802 static inline int ldlm_ast_fini(struct ptlrpc_request *req,
803                                 struct ldlm_cb_set_arg *arg,
804                                 struct ldlm_lock *lock,
805                                 int instant_cancel)
806 {
807         int rc = 0;
808
809         ENTRY;
810
811         if (unlikely(instant_cancel)) {
812                 rc = ptl_send_rpc(req, 1);
813                 ptlrpc_req_finished(req);
814                 if (rc == 0)
815                         atomic_inc(&arg->restart);
816         } else {
817                 LDLM_LOCK_GET(lock);
818                 ptlrpc_set_add_req(arg->set, req);
819         }
820
821         RETURN(rc);
822 }
823
824 /**
825  * Check if there are requests in the export request list which prevent
826  * the lock canceling and make these requests high priority ones.
827  */
828 static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
829 {
830         struct ptlrpc_request *req;
831
832         ENTRY;
833
834         if (lock->l_export == NULL) {
835                 LDLM_DEBUG(lock, "client lock: no-op");
836                 RETURN_EXIT;
837         }
838
839         spin_lock(&lock->l_export->exp_rpc_lock);
840         list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
841                             rq_exp_list) {
842                 /*
843                  * Do not process requests that were not yet added to there
844                  * incoming queue or were already removed from there for
845                  * processing. We evaluate ptlrpc_nrs_req_can_move() without
846                  * holding svcpt->scp_req_lock, and then redo the check with
847                  * the lock held once we need to obtain a reliable result.
848                  */
849                 if (ptlrpc_nrs_req_can_move(req) &&
850                     req->rq_ops->hpreq_lock_match &&
851                     req->rq_ops->hpreq_lock_match(req, lock))
852                         ptlrpc_nrs_req_hp_move(req);
853         }
854         spin_unlock(&lock->l_export->exp_rpc_lock);
855         EXIT;
856 }
857
858 /**
859  * ->l_blocking_ast() method for server-side locks. This is invoked when newly
860  * enqueued server lock conflicts with given one.
861  *
862  * Sends blocking AST RPC to the client owning that lock; arms timeout timer
863  * to wait for client response.
864  */
865 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
866                              struct ldlm_lock_desc *desc,
867                              void *data, int flag)
868 {
869         struct ldlm_cb_async_args *ca;
870         struct ldlm_cb_set_arg *arg = data;
871         struct ldlm_request *body;
872         struct ptlrpc_request  *req;
873         int instant_cancel = 0;
874         int rc = 0;
875
876         ENTRY;
877
878         if (flag == LDLM_CB_CANCELING)
879                 /* Don't need to do anything here. */
880                 RETURN(0);
881
882         if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_SRV_BL_AST)) {
883                 LDLM_DEBUG(lock, "dropping BL AST");
884                 RETURN(0);
885         }
886
887         LASSERT(lock);
888         LASSERT(data != NULL);
889         if (lock->l_export->exp_obd->obd_recovering != 0)
890                 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
891
892         ldlm_lock_reorder_req(lock);
893
894         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
895                                         &RQF_LDLM_BL_CALLBACK,
896                                         LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
897         if (req == NULL)
898                 RETURN(-ENOMEM);
899
900         ca = ptlrpc_req_async_args(ca, req);
901         ca->ca_set_arg = arg;
902         ca->ca_lock = lock;
903
904         req->rq_interpret_reply = ldlm_cb_interpret;
905
906         lock_res_and_lock(lock);
907         if (ldlm_is_destroyed(lock)) {
908                 /* What's the point? */
909                 unlock_res_and_lock(lock);
910                 ptlrpc_req_finished(req);
911                 RETURN(0);
912         }
913
914         if (!ldlm_is_granted(lock)) {
915                 /*
916                  * this blocking AST will be communicated as part of the
917                  * completion AST instead
918                  */
919                 ldlm_add_blocked_lock(lock);
920                 ldlm_set_waited(lock);
921                 unlock_res_and_lock(lock);
922
923                 ptlrpc_req_finished(req);
924                 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
925                 RETURN(0);
926         }
927
928         if (ldlm_is_cancel_on_block(lock))
929                 instant_cancel = 1;
930
931         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
932         body->lock_handle[0] = lock->l_remote_handle;
933         body->lock_desc = *desc;
934         body->lock_flags |= ldlm_flags_to_wire(lock->l_flags & LDLM_FL_AST_MASK);
935
936         LDLM_DEBUG(lock, "server preparing blocking AST");
937
938         ptlrpc_request_set_replen(req);
939         ldlm_set_cbpending(lock);
940         if (instant_cancel) {
941                 unlock_res_and_lock(lock);
942                 ldlm_lock_cancel(lock);
943
944                 req->rq_no_resend = 1;
945         } else {
946                 LASSERT(ldlm_is_granted(lock));
947                 ldlm_add_waiting_lock(lock, ldlm_bl_timeout(lock));
948                 unlock_res_and_lock(lock);
949
950                 /* Do not resend after lock callback timeout */
951                 req->rq_delay_limit = ldlm_bl_timeout(lock);
952                 req->rq_resend_cb = ldlm_update_resend;
953         }
954
955         req->rq_send_state = LUSTRE_IMP_FULL;
956         /* ptlrpc_request_alloc_pack already set timeout */
957         if (AT_OFF)
958                 req->rq_timeout = ldlm_get_rq_timeout();
959
960         if (lock->l_export && lock->l_export->exp_nid_stats &&
961             lock->l_export->exp_nid_stats->nid_ldlm_stats)
962                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
963                                      LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
964
965         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
966
967         RETURN(rc);
968 }
969
970 /**
971  * ->l_completion_ast callback for a remote lock in server namespace.
972  *
973  *  Sends AST to the client notifying it of lock granting.  If initial
974  *  lock response was not sent yet, instead of sending another RPC, just
975  *  mark the lock as granted and client will understand
976  */
977 int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
978 {
979         struct ldlm_cb_set_arg *arg = data;
980         struct ldlm_request *body;
981         struct ptlrpc_request *req;
982         struct ldlm_cb_async_args *ca;
983         int instant_cancel = 0;
984         int rc = 0;
985         int lvb_len;
986
987         ENTRY;
988
989         LASSERT(lock != NULL);
990         LASSERT(data != NULL);
991
992         if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_SRV_CP_AST)) {
993                 LDLM_DEBUG(lock, "dropping CP AST");
994                 RETURN(0);
995         }
996
997         req = ptlrpc_request_alloc(lock->l_export->exp_imp_reverse,
998                                    &RQF_LDLM_CP_CALLBACK);
999         if (req == NULL)
1000                 RETURN(-ENOMEM);
1001
1002         /* server namespace, doesn't need lock */
1003         lvb_len = ldlm_lvbo_size(lock);
1004         /*
1005          * LU-3124 & LU-2187: to not return layout in completion AST because
1006          * it may deadlock for LU-2187, or client may not have enough space
1007          * for large layout. The layout will be returned to client with an
1008          * extra RPC to fetch xattr.lov
1009          */
1010         if (ldlm_has_layout(lock))
1011                 lvb_len = 0;
1012
1013         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT, lvb_len);
1014         rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
1015         if (rc) {
1016                 ptlrpc_request_free(req);
1017                 RETURN(rc);
1018         }
1019
1020         ca = ptlrpc_req_async_args(ca, req);
1021         ca->ca_set_arg = arg;
1022         ca->ca_lock = lock;
1023
1024         req->rq_interpret_reply = ldlm_cb_interpret;
1025         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1026
1027         body->lock_handle[0] = lock->l_remote_handle;
1028         body->lock_flags = ldlm_flags_to_wire(flags);
1029         ldlm_lock2desc(lock, &body->lock_desc);
1030         if (lvb_len > 0) {
1031                 void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
1032                 lvb_len = ldlm_lvbo_fill(lock, lvb, &lvb_len);
1033                 if (lvb_len < 0) {
1034                         /*
1035                          * We still need to send the RPC to wake up the blocked
1036                          * enqueue thread on the client.
1037                          *
1038                          * Consider old client, there is no better way to notify
1039                          * the failure, just zero-sized the LVB, then the client
1040                          * will fail out as "-EPROTO".
1041                          */
1042                         req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB, 0,
1043                                            RCL_CLIENT);
1044                         instant_cancel = 1;
1045                 } else {
1046                         req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB, lvb_len,
1047                                            RCL_CLIENT);
1048                 }
1049         }
1050
1051         LDLM_DEBUG(lock, "server preparing completion AST");
1052
1053         ptlrpc_request_set_replen(req);
1054
1055         req->rq_send_state = LUSTRE_IMP_FULL;
1056         /* ptlrpc_request_pack already set timeout */
1057         if (AT_OFF)
1058                 req->rq_timeout = ldlm_get_rq_timeout();
1059
1060         /* We only send real blocking ASTs after the lock is granted */
1061         lock_res_and_lock(lock);
1062         if (ldlm_is_ast_sent(lock)) {
1063                 body->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
1064                 /* Copy AST flags like LDLM_FL_DISCARD_DATA. */
1065                 body->lock_flags |= ldlm_flags_to_wire(lock->l_flags &
1066                                                        LDLM_FL_AST_MASK);
1067
1068                 /*
1069                  * We might get here prior to ldlm_handle_enqueue setting
1070                  * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
1071                  * into waiting list, but this is safe and similar code in
1072                  * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
1073                  * that would not only cancel the lock, but will also remove
1074                  * it from waiting list
1075                  */
1076                 if (ldlm_is_cancel_on_block(lock)) {
1077                         unlock_res_and_lock(lock);
1078                         ldlm_lock_cancel(lock);
1079
1080                         instant_cancel = 1;
1081                         req->rq_no_resend = 1;
1082
1083                         lock_res_and_lock(lock);
1084                 } else {
1085                         /* start the lock-timeout clock */
1086                         ldlm_add_waiting_lock(lock, ldlm_bl_timeout(lock));
1087                         /* Do not resend after lock callback timeout */
1088                         req->rq_delay_limit = ldlm_bl_timeout(lock);
1089                         req->rq_resend_cb = ldlm_update_resend;
1090                 }
1091         }
1092         unlock_res_and_lock(lock);
1093
1094         if (lock->l_export && lock->l_export->exp_nid_stats &&
1095             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1096                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1097                                      LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
1098
1099         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
1100
1101         RETURN(lvb_len < 0 ? lvb_len : rc);
1102 }
1103
1104 /**
1105  * Server side ->l_glimpse_ast handler for client locks.
1106  *
1107  * Sends glimpse AST to the client and waits for reply. Then updates
1108  * lvbo with the result.
1109  */
1110 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
1111 {
1112         struct ldlm_cb_set_arg *arg = data;
1113         struct ldlm_request *body;
1114         struct ptlrpc_request *req;
1115         struct ldlm_cb_async_args *ca;
1116         int rc;
1117         struct req_format *req_fmt;
1118
1119         ENTRY;
1120
1121         LASSERT(lock != NULL);
1122
1123         if (arg->gl_desc != NULL)
1124                 /* There is a glimpse descriptor to pack */
1125                 req_fmt = &RQF_LDLM_GL_CALLBACK_DESC;
1126         else
1127                 req_fmt = &RQF_LDLM_GL_CALLBACK;
1128
1129         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
1130                                         req_fmt, LUSTRE_DLM_VERSION,
1131                                         LDLM_GL_CALLBACK);
1132
1133         if (req == NULL)
1134                 RETURN(-ENOMEM);
1135
1136         if (arg->gl_desc != NULL) {
1137                 /* copy the GL descriptor */
1138                 union ldlm_gl_desc      *desc;
1139
1140                 desc = req_capsule_client_get(&req->rq_pill, &RMF_DLM_GL_DESC);
1141                 *desc = *arg->gl_desc;
1142         }
1143
1144         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1145         body->lock_handle[0] = lock->l_remote_handle;
1146         ldlm_lock2desc(lock, &body->lock_desc);
1147
1148         ca = ptlrpc_req_async_args(ca, req);
1149         ca->ca_set_arg = arg;
1150         ca->ca_lock = lock;
1151
1152         /* server namespace, doesn't need lock */
1153         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
1154                              ldlm_lvbo_size(lock));
1155         ptlrpc_request_set_replen(req);
1156
1157         req->rq_send_state = LUSTRE_IMP_FULL;
1158         /* ptlrpc_request_alloc_pack already set timeout */
1159         if (AT_OFF)
1160                 req->rq_timeout = ldlm_get_rq_timeout();
1161
1162         req->rq_interpret_reply = ldlm_cb_interpret;
1163
1164         if (lock->l_export && lock->l_export->exp_nid_stats) {
1165                 struct nid_stat *nid_stats = lock->l_export->exp_nid_stats;
1166
1167                 lprocfs_counter_incr(nid_stats->nid_ldlm_stats,
1168                                      LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
1169         }
1170
1171         rc = ldlm_ast_fini(req, arg, lock, 0);
1172
1173         RETURN(rc);
1174 }
1175 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
1176
1177 int ldlm_glimpse_locks(struct ldlm_resource *res,
1178                        struct list_head *gl_work_list)
1179 {
1180         int rc;
1181
1182         ENTRY;
1183
1184         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), gl_work_list,
1185                                LDLM_WORK_GL_AST);
1186         if (rc == -ERESTART)
1187                 ldlm_reprocess_all(res, NULL);
1188
1189         RETURN(rc);
1190 }
1191 EXPORT_SYMBOL(ldlm_glimpse_locks);
1192
1193 /* return LDLM lock associated with a lock callback request */
1194 struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req)
1195 {
1196         struct ldlm_cb_async_args *ca;
1197         struct ldlm_lock *lock;
1198
1199         ENTRY;
1200
1201         ca = ptlrpc_req_async_args(ca, req);
1202         lock = ca->ca_lock;
1203         if (lock == NULL)
1204                 RETURN(ERR_PTR(-EFAULT));
1205
1206         RETURN(lock);
1207 }
1208 EXPORT_SYMBOL(ldlm_request_lock);
1209
1210 /**
1211  * Main server-side entry point into LDLM for enqueue. This is called by ptlrpc
1212  * service threads to carry out client lock enqueueing requests.
1213  */
1214 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
1215                          struct ptlrpc_request *req,
1216                          const struct ldlm_request *dlm_req,
1217                          const struct ldlm_callback_suite *cbs)
1218 {
1219         struct ldlm_reply *dlm_rep;
1220         __u64 flags;
1221         enum ldlm_error err = ELDLM_OK;
1222         struct ldlm_lock *lock = NULL;
1223         void *cookie = NULL;
1224         int rc = 0;
1225         struct ldlm_resource *res = NULL;
1226         const struct lu_env *env = req->rq_svc_thread->t_env;
1227
1228         ENTRY;
1229
1230         LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
1231
1232         ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF, LATF_SKIP);
1233         flags = ldlm_flags_from_wire(dlm_req->lock_flags);
1234
1235         LASSERT(req->rq_export);
1236
1237         /* for intent enqueue the stat will be updated inside intent policy */
1238         if (ptlrpc_req2svc(req)->srv_stats != NULL &&
1239             !(dlm_req->lock_flags & LDLM_FL_HAS_INTENT))
1240                 ldlm_svc_get_eopc(dlm_req, ptlrpc_req2svc(req)->srv_stats);
1241
1242         if (req->rq_export && req->rq_export->exp_nid_stats &&
1243             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1244                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1245                                      LDLM_ENQUEUE - LDLM_FIRST_OPC);
1246
1247         if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
1248                      dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
1249                 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
1250                           dlm_req->lock_desc.l_resource.lr_type);
1251                 GOTO(out, rc = -EFAULT);
1252         }
1253
1254         if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
1255                      dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
1256                      dlm_req->lock_desc.l_req_mode &
1257                      (dlm_req->lock_desc.l_req_mode-1))) {
1258                 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
1259                           dlm_req->lock_desc.l_req_mode);
1260                 GOTO(out, rc = -EFAULT);
1261         }
1262
1263         if (unlikely((flags & LDLM_FL_REPLAY) ||
1264                      (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))) {
1265                 /* Find an existing lock in the per-export lock hash */
1266                 /*
1267                  * In the function below, .hs_keycmp resolves to
1268                  * ldlm_export_lock_keycmp()
1269                  */
1270                 /* coverity[overrun-buffer-val] */
1271                 lock = cfs_hash_lookup(req->rq_export->exp_lock_hash,
1272                                        (void *)&dlm_req->lock_handle[0]);
1273                 if (lock != NULL) {
1274                         DEBUG_REQ(D_DLMTRACE, req,
1275                                   "found existing lock cookie %#llx",
1276                                   lock->l_handle.h_cookie);
1277                         flags |= LDLM_FL_RESENT;
1278                         GOTO(existing_lock, rc = 0);
1279                 }
1280         } else {
1281                 if (ldlm_reclaim_full()) {
1282                         DEBUG_REQ(D_DLMTRACE, req,
1283                                   "Too many granted locks, reject current enqueue request and let the client retry later");
1284                         GOTO(out, rc = -EINPROGRESS);
1285                 }
1286         }
1287
1288         /* The lock's callback data might be set in the policy function */
1289         lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
1290                                 dlm_req->lock_desc.l_resource.lr_type,
1291                                 dlm_req->lock_desc.l_req_mode,
1292                                 cbs, NULL, 0, LVB_T_NONE);
1293         if (IS_ERR(lock)) {
1294                 rc = PTR_ERR(lock);
1295                 lock = NULL;
1296                 GOTO(out, rc);
1297         }
1298
1299         lock->l_remote_handle = dlm_req->lock_handle[0];
1300         LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
1301
1302         /*
1303          * Initialize resource lvb but not for a lock being replayed since
1304          * Client already got lvb sent in this case.
1305          * This must occur early since some policy methods assume resource
1306          * lvb is available (lr_lvb_data != NULL).
1307          */
1308         res = lock->l_resource;
1309         if (!(flags & LDLM_FL_REPLAY)) {
1310                 /* non-replayed lock, delayed lvb init may need to be done */
1311                 rc = ldlm_lvbo_init(res);
1312                 if (rc < 0) {
1313                         LDLM_DEBUG(lock, "delayed lvb init failed (rc %d)", rc);
1314                         GOTO(out, rc);
1315                 }
1316         }
1317
1318         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
1319         /*
1320          * Don't enqueue a lock onto the export if it is been disonnected
1321          * due to eviction (b=3822) or server umount (b=24324).
1322          * Cancel it now instead.
1323          */
1324         if (req->rq_export->exp_disconnected) {
1325                 LDLM_ERROR(lock, "lock on disconnected export %p",
1326                            req->rq_export);
1327                 GOTO(out, rc = -ENOTCONN);
1328         }
1329
1330         lock->l_export = class_export_lock_get(req->rq_export, lock);
1331         if (lock->l_export->exp_lock_hash)
1332                 cfs_hash_add(lock->l_export->exp_lock_hash,
1333                              &lock->l_remote_handle,
1334                              &lock->l_exp_hash);
1335
1336         /*
1337          * Inherit the enqueue flags before the operation, because we do not
1338          * keep the res lock on return and next operations (BL AST) may proceed
1339          * without them.
1340          */
1341         lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
1342                                               LDLM_FL_INHERIT_MASK);
1343
1344         ldlm_convert_policy_to_local(req->rq_export,
1345                                      dlm_req->lock_desc.l_resource.lr_type,
1346                                      &dlm_req->lock_desc.l_policy_data,
1347                                      &lock->l_policy_data);
1348         if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
1349                 lock->l_req_extent = lock->l_policy_data.l_extent;
1350
1351 existing_lock:
1352         if (flags & LDLM_FL_HAS_INTENT) {
1353                 /*
1354                  * In this case, the reply buffer is allocated deep in
1355                  * local_lock_enqueue by the policy function.
1356                  */
1357                 cookie = req;
1358         } else {
1359                 /*
1360                  * based on the assumption that lvb size never changes during
1361                  * resource life time otherwise it need resource->lr_lock's
1362                  * protection
1363                  */
1364                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
1365                                      RCL_SERVER, ldlm_lvbo_size(lock));
1366
1367                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
1368                         GOTO(out, rc = -ENOMEM);
1369
1370                 rc = req_capsule_server_pack(&req->rq_pill);
1371                 if (rc)
1372                         GOTO(out, rc);
1373         }
1374
1375         err = ldlm_lock_enqueue(env, ns, &lock, cookie, &flags);
1376         if (err) {
1377                 if ((int)err < 0)
1378                         rc = (int)err;
1379                 GOTO(out, err);
1380         }
1381
1382         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1383
1384         ldlm_lock2desc(lock, &dlm_rep->lock_desc);
1385         ldlm_lock2handle(lock, &dlm_rep->lock_handle);
1386
1387         if (lock && lock->l_resource->lr_type == LDLM_EXTENT)
1388                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_BL_EVICT, 6);
1389
1390         /*
1391          * We never send a blocking AST until the lock is granted, but
1392          * we can tell it right now
1393          */
1394         lock_res_and_lock(lock);
1395
1396         /*
1397          * Now take into account flags to be inherited from original lock
1398          * request both in reply to client and in our own lock flags.
1399          */
1400         dlm_rep->lock_flags = ldlm_flags_to_wire(flags);
1401         lock->l_flags |= flags & LDLM_FL_INHERIT_MASK;
1402
1403         /*
1404          * Don't move a pending lock onto the export if it has already been
1405          * disconnected due to eviction (b=5683) or server umount (b=24324).
1406          * Cancel it now instead.
1407          */
1408         if (unlikely(req->rq_export->exp_disconnected ||
1409                      OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
1410                 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
1411                 rc = -ENOTCONN;
1412         } else if (ldlm_is_ast_sent(lock)) {
1413                 /* fill lock desc for possible lock convert */
1414                 if (lock->l_blocking_lock &&
1415                     lock->l_resource->lr_type == LDLM_IBITS) {
1416                         struct ldlm_lock *bl_lock = lock->l_blocking_lock;
1417                         struct ldlm_lock_desc *rep_desc = &dlm_rep->lock_desc;
1418
1419                         LDLM_DEBUG(lock,
1420                                    "save blocking bits %llx in granted lock",
1421                                    bl_lock->l_policy_data.l_inodebits.bits);
1422                         /*
1423                          * If lock is blocked then save blocking ibits
1424                          * in returned lock policy for the possible lock
1425                          * convert on a client.
1426                          */
1427                         rep_desc->l_policy_data.l_inodebits.cancel_bits =
1428                                 bl_lock->l_policy_data.l_inodebits.bits;
1429                 }
1430                 dlm_rep->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
1431                 if (ldlm_is_granted(lock)) {
1432                         /*
1433                          * Only cancel lock if it was granted, because it would
1434                          * be destroyed immediately and would never be granted
1435                          * in the future, causing timeouts on client.  Not
1436                          * granted lock will be cancelled immediately after
1437                          * sending completion AST.
1438                          */
1439                         if (ldlm_is_cancel_on_block(lock)) {
1440                                 unlock_res_and_lock(lock);
1441                                 ldlm_lock_cancel(lock);
1442                                 lock_res_and_lock(lock);
1443                         } else {
1444                                 ldlm_add_waiting_lock(lock,
1445                                                       ldlm_bl_timeout(lock));
1446                         }
1447                 }
1448         }
1449         unlock_res_and_lock(lock);
1450
1451         EXIT;
1452 out:
1453         req->rq_status = rc ?: err; /* return either error - b=11190 */
1454         if (!req->rq_packed_final) {
1455                 err = lustre_pack_reply(req, 1, NULL, NULL);
1456                 if (rc == 0)
1457                         rc = err;
1458         }
1459
1460         /*
1461          * The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1462          * ldlm_reprocess_all.  If this moves, revisit that code. -phil
1463          */
1464         if (lock != NULL) {
1465                 LDLM_DEBUG(lock,
1466                            "server-side enqueue handler, sending reply (err=%d, rc=%d)",
1467                            err, rc);
1468
1469                 if (rc == 0 &&
1470                     req_capsule_has_field(&req->rq_pill, &RMF_DLM_LVB,
1471                                           RCL_SERVER) &&
1472                     ldlm_lvbo_size(lock) > 0) {
1473                         void *buf;
1474                         int buflen;
1475
1476 retry:
1477                         buf = req_capsule_server_get(&req->rq_pill,
1478                                                      &RMF_DLM_LVB);
1479                         LASSERTF(buf != NULL, "req %p, lock %p\n", req, lock);
1480                         buflen = req_capsule_get_size(&req->rq_pill,
1481                                         &RMF_DLM_LVB, RCL_SERVER);
1482                         /*
1483                          * non-replayed lock, delayed lvb init may
1484                          * need to be occur now
1485                          */
1486                         if ((buflen > 0) && !(flags & LDLM_FL_REPLAY)) {
1487                                 int rc2;
1488
1489                                 rc2 = ldlm_lvbo_fill(lock, buf, &buflen);
1490                                 if (rc2 >= 0) {
1491                                         req_capsule_shrink(&req->rq_pill,
1492                                                            &RMF_DLM_LVB,
1493                                                            rc2, RCL_SERVER);
1494                                 } else if (rc2 == -ERANGE) {
1495                                         rc2 = req_capsule_server_grow(
1496                                                         &req->rq_pill,
1497                                                         &RMF_DLM_LVB, buflen);
1498                                         if (!rc2) {
1499                                                 goto retry;
1500                                         } else {
1501                                                 /*
1502                                                  * if we can't grow the buffer,
1503                                                  * it's ok to return empty lvb
1504                                                  * to client.
1505                                                  */
1506                                                 req_capsule_shrink(
1507                                                         &req->rq_pill,
1508                                                         &RMF_DLM_LVB, 0,
1509                                                         RCL_SERVER);
1510                                         }
1511                                 } else {
1512                                         rc = rc2;
1513                                 }
1514                         } else if (flags & LDLM_FL_REPLAY) {
1515                                 /* no LVB resend upon replay */
1516                                 if (buflen > 0)
1517                                         req_capsule_shrink(&req->rq_pill,
1518                                                            &RMF_DLM_LVB,
1519                                                            0, RCL_SERVER);
1520                                 else
1521                                         rc = buflen;
1522                         } else {
1523                                 rc = buflen;
1524                         }
1525                 }
1526
1527                 if (rc != 0 && !(flags & LDLM_FL_RESENT)) {
1528                         if (lock->l_export) {
1529                                 ldlm_lock_cancel(lock);
1530                         } else {
1531                                 lock_res_and_lock(lock);
1532                                 ldlm_resource_unlink_lock(lock);
1533                                 ldlm_lock_destroy_nolock(lock);
1534                                 unlock_res_and_lock(lock);
1535
1536                         }
1537                 }
1538
1539                 if (!err && !ldlm_is_cbpending(lock) &&
1540                     dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1541                         ldlm_reprocess_all(lock->l_resource, lock);
1542
1543                 LDLM_LOCK_RELEASE(lock);
1544         }
1545
1546         LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1547                           lock, rc);
1548
1549         return rc;
1550 }
1551
1552 /*
1553  * Clear the blocking lock, the race is possible between ldlm_handle_convert0()
1554  * and ldlm_work_bl_ast_lock(), so this is done under lock with check for NULL.
1555  */
1556 void ldlm_clear_blocking_lock(struct ldlm_lock *lock)
1557 {
1558         if (lock->l_blocking_lock) {
1559                 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
1560                 lock->l_blocking_lock = NULL;
1561         }
1562 }
1563
1564 /* A lock can be converted to new ibits or mode and should be considered
1565  * as new lock. Clear all states related to a previous blocking AST
1566  * processing so new conflicts will cause new blocking ASTs.
1567  *
1568  * This is used during lock convert below and lock downgrade to COS mode in
1569  * ldlm_lock_mode_downgrade().
1570  */
1571 void ldlm_clear_blocking_data(struct ldlm_lock *lock)
1572 {
1573         ldlm_clear_ast_sent(lock);
1574         lock->l_bl_ast_run = 0;
1575         ldlm_clear_blocking_lock(lock);
1576 }
1577
1578 /**
1579  * Main LDLM entry point for server code to process lock conversion requests.
1580  */
1581 int ldlm_handle_convert0(struct ptlrpc_request *req,
1582                          const struct ldlm_request *dlm_req)
1583 {
1584         struct obd_export *exp = req->rq_export;
1585         struct ldlm_reply *dlm_rep;
1586         struct ldlm_lock *lock;
1587         int rc;
1588
1589         ENTRY;
1590
1591         if (exp && exp->exp_nid_stats && exp->exp_nid_stats->nid_ldlm_stats)
1592                 lprocfs_counter_incr(exp->exp_nid_stats->nid_ldlm_stats,
1593                                      LDLM_CONVERT - LDLM_FIRST_OPC);
1594
1595         rc = req_capsule_server_pack(&req->rq_pill);
1596         if (rc)
1597                 RETURN(rc);
1598
1599         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1600         dlm_rep->lock_flags = dlm_req->lock_flags;
1601
1602         lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1603         if (lock) {
1604                 __u64 bits;
1605                 __u64 new;
1606
1607                 bits = lock->l_policy_data.l_inodebits.bits;
1608                 new = dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
1609                 LDLM_DEBUG(lock, "server-side convert handler START");
1610
1611                 if (ldlm_is_cancel(lock)) {
1612                         LDLM_ERROR(lock, "convert on canceled lock!");
1613                         rc = ELDLM_NO_LOCK_DATA;
1614                 } else if (dlm_req->lock_desc.l_req_mode !=
1615                            lock->l_granted_mode) {
1616                         LDLM_ERROR(lock, "lock mode differs!");
1617                         rc = ELDLM_NO_LOCK_DATA;
1618                 } else if (bits == new) {
1619                         /*
1620                          * This can be valid situation if CONVERT RPCs are
1621                          * re-ordered. Just finish silently
1622                          */
1623                         LDLM_DEBUG(lock, "lock is converted already!");
1624                         rc = ELDLM_OK;
1625                 } else {
1626                         lock_res_and_lock(lock);
1627                         if (ldlm_is_waited(lock))
1628                                 ldlm_del_waiting_lock(lock);
1629
1630                         ldlm_clear_cbpending(lock);
1631                         lock->l_policy_data.l_inodebits.cancel_bits = 0;
1632                         ldlm_inodebits_drop(lock, bits & ~new);
1633
1634                         ldlm_clear_blocking_data(lock);
1635                         unlock_res_and_lock(lock);
1636
1637                         ldlm_reprocess_all(lock->l_resource, NULL);
1638                         rc = ELDLM_OK;
1639                 }
1640
1641                 if (rc == ELDLM_OK) {
1642                         dlm_rep->lock_handle = lock->l_remote_handle;
1643                         ldlm_ibits_policy_local_to_wire(&lock->l_policy_data,
1644                                         &dlm_rep->lock_desc.l_policy_data);
1645                 }
1646
1647                 LDLM_DEBUG(lock, "server-side convert handler END, rc = %d",
1648                            rc);
1649                 LDLM_LOCK_PUT(lock);
1650         } else {
1651                 rc = ELDLM_NO_LOCK_DATA;
1652                 LDLM_DEBUG_NOLOCK("server-side convert handler END, rc = %d",
1653                                   rc);
1654         }
1655
1656         req->rq_status = rc;
1657
1658         RETURN(0);
1659 }
1660
1661 /**
1662  * Cancel all the locks whose handles are packed into ldlm_request
1663  *
1664  * Called by server code expecting such combined cancel activity
1665  * requests.
1666  */
1667 int ldlm_request_cancel(struct ptlrpc_request *req,
1668                         const struct ldlm_request *dlm_req,
1669                         int first, enum lustre_at_flags flags)
1670 {
1671         struct ldlm_resource *res, *pres = NULL;
1672         struct ldlm_lock *lock;
1673         int i, count, done = 0;
1674         unsigned int size;
1675
1676         ENTRY;
1677
1678         size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT);
1679         if (size <= offsetof(struct ldlm_request, lock_handle) ||
1680             (size - offsetof(struct ldlm_request, lock_handle)) /
1681              sizeof(struct lustre_handle) < dlm_req->lock_count)
1682                 RETURN(0);
1683
1684         count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1685         if (first >= count)
1686                 RETURN(0);
1687
1688         if (count == 1 && dlm_req->lock_handle[0].cookie == 0)
1689                 RETURN(0);
1690
1691         /*
1692          * There is no lock on the server at the replay time,
1693          * skip lock cancelling to make replay tests to pass.
1694          */
1695         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1696                 RETURN(0);
1697
1698         LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, starting at %d",
1699                           count, first);
1700
1701         for (i = first; i < count; i++) {
1702                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1703                 if (!lock) {
1704                         /* below message checked in replay-single.sh test_36 */
1705                         LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock (cookie %llu)",
1706                                           dlm_req->lock_handle[i].cookie);
1707                         continue;
1708                 }
1709
1710                 res = lock->l_resource;
1711                 done++;
1712
1713                 /*
1714                  * This code is an optimization to only attempt lock
1715                  * granting on the resource (that could be CPU-expensive)
1716                  * after we are done cancelling lock in that resource.
1717                  */
1718                 if (res != pres) {
1719                         if (pres != NULL) {
1720                                 ldlm_reprocess_all(pres, NULL);
1721                                 LDLM_RESOURCE_DELREF(pres);
1722                                 ldlm_resource_putref(pres);
1723                         }
1724                         if (res != NULL) {
1725                                 ldlm_resource_getref(res);
1726                                 LDLM_RESOURCE_ADDREF(res);
1727
1728                                 if (!ldlm_is_discard_data(lock))
1729                                         ldlm_lvbo_update(res, lock,
1730                                                          NULL, 1);
1731                         }
1732                         pres = res;
1733                 }
1734
1735                 if ((flags & LATF_STATS) && ldlm_is_ast_sent(lock) &&
1736                     lock->l_blast_sent != 0) {
1737                         time64_t delay = ktime_get_real_seconds() -
1738                                          lock->l_blast_sent;
1739                         LDLM_DEBUG(lock,
1740                                    "server cancels blocked lock after %llds",
1741                                    (s64)delay);
1742                         at_measured(&lock->l_export->exp_bl_lock_at, delay);
1743                 }
1744                 ldlm_lock_cancel(lock);
1745                 LDLM_LOCK_PUT(lock);
1746         }
1747         if (pres != NULL) {
1748                 ldlm_reprocess_all(pres, NULL);
1749                 LDLM_RESOURCE_DELREF(pres);
1750                 ldlm_resource_putref(pres);
1751         }
1752         LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1753         RETURN(done);
1754 }
1755 EXPORT_SYMBOL(ldlm_request_cancel);
1756
1757 /**
1758  * Main LDLM entry point for server code to cancel locks.
1759  *
1760  * Typically gets called from service handler on LDLM_CANCEL opc.
1761  */
1762 int ldlm_handle_cancel(struct ptlrpc_request *req)
1763 {
1764         struct ldlm_request *dlm_req;
1765         int rc;
1766
1767         ENTRY;
1768
1769         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1770         if (dlm_req == NULL) {
1771                 CDEBUG(D_INFO, "bad request buffer for cancel\n");
1772                 RETURN(-EFAULT);
1773         }
1774
1775         if (req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) <
1776             offsetof(struct ldlm_request, lock_handle[1]))
1777                 RETURN(-EPROTO);
1778
1779         if (req->rq_export && req->rq_export->exp_nid_stats &&
1780             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1781                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1782                                      LDLM_CANCEL - LDLM_FIRST_OPC);
1783
1784         rc = req_capsule_server_pack(&req->rq_pill);
1785         if (rc)
1786                 RETURN(rc);
1787
1788         if (!ldlm_request_cancel(req, dlm_req, 0, LATF_STATS))
1789                 req->rq_status = LUSTRE_ESTALE;
1790
1791         RETURN(ptlrpc_reply(req));
1792 }
1793 #endif /* HAVE_SERVER_SUPPORT */
1794
1795 /**
1796  * Callback handler for receiving incoming blocking ASTs.
1797  *
1798  * This can only happen on client side.
1799  */
1800 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1801                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1802 {
1803         int do_ast;
1804
1805         ENTRY;
1806
1807         LDLM_DEBUG(lock, "client blocking AST callback handler");
1808
1809         lock_res_and_lock(lock);
1810
1811         /* set bits to cancel for this lock for possible lock convert */
1812         if (ns_is_client(ns) && (lock->l_resource->lr_type == LDLM_IBITS)) {
1813                 /*
1814                  * Lock description contains policy of blocking lock,
1815                  * and its cancel_bits is used to pass conflicting bits.
1816                  * NOTE: ld can be NULL or can be not NULL but zeroed if
1817                  * passed from ldlm_bl_thread_blwi(), check below used bits
1818                  * in ld to make sure it is valid description.
1819                  *
1820                  * If server may replace lock resource keeping the same cookie,
1821                  * never use cancel bits from different resource, full cancel
1822                  * is to be used.
1823                  */
1824                 if (ld && ld->l_policy_data.l_inodebits.bits &&
1825                     ldlm_res_eq(&ld->l_resource.lr_name,
1826                                 &lock->l_resource->lr_name))
1827                         lock->l_policy_data.l_inodebits.cancel_bits =
1828                                 ld->l_policy_data.l_inodebits.cancel_bits;
1829                 /*
1830                  * if there is no valid ld and lock is cbpending already
1831                  * then cancel_bits should be kept, otherwise it is zeroed.
1832                  */
1833                 else if (!ldlm_is_cbpending(lock))
1834                         lock->l_policy_data.l_inodebits.cancel_bits = 0;
1835         }
1836         ldlm_set_cbpending(lock);
1837
1838         do_ast = (!lock->l_readers && !lock->l_writers);
1839         unlock_res_and_lock(lock);
1840
1841         if (do_ast) {
1842                 CDEBUG(D_DLMTRACE,
1843                        "Lock %p already unused, calling callback (%p)\n",
1844                        lock, lock->l_blocking_ast);
1845                 if (lock->l_blocking_ast != NULL)
1846                         lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1847                                              LDLM_CB_BLOCKING);
1848         } else {
1849                 CDEBUG(D_DLMTRACE,
1850                        "Lock %p is referenced, will be cancelled later\n",
1851                        lock);
1852         }
1853
1854         LDLM_DEBUG(lock, "client blocking callback handler END");
1855         LDLM_LOCK_RELEASE(lock);
1856         EXIT;
1857 }
1858
1859 /**
1860  * Callback handler for receiving incoming completion ASTs.
1861  *
1862  * This only can happen on client side.
1863  */
1864 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1865                                     struct ldlm_namespace *ns,
1866                                     struct ldlm_request *dlm_req,
1867                                     struct ldlm_lock *lock)
1868 {
1869         struct list_head ast_list;
1870         int lvb_len;
1871         int rc = 0;
1872
1873         ENTRY;
1874
1875         LDLM_DEBUG(lock, "client completion callback handler START");
1876
1877         INIT_LIST_HEAD(&ast_list);
1878         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
1879                 long to = cfs_time_seconds(1);
1880
1881                 while (to > 0) {
1882                         set_current_state(TASK_INTERRUPTIBLE);
1883                         schedule_timeout(to);
1884                         if (ldlm_is_granted(lock) ||
1885                             ldlm_is_destroyed(lock))
1886                                 break;
1887                 }
1888         }
1889
1890         lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
1891         if (lvb_len < 0) {
1892                 LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len);
1893                 GOTO(out, rc = lvb_len);
1894         } else if (lvb_len > 0) {
1895                 if (lock->l_lvb_len > 0) {
1896                         /* for extent lock, lvb contains ost_lvb{}. */
1897                         LASSERT(lock->l_lvb_data != NULL);
1898
1899                         if (unlikely(lock->l_lvb_len < lvb_len)) {
1900                                 LDLM_ERROR(lock,
1901                                            "Replied LVB is larger than expectation, expected = %d, replied = %d",
1902                                            lock->l_lvb_len, lvb_len);
1903                                 GOTO(out, rc = -EINVAL);
1904                         }
1905                 }
1906         }
1907
1908         lock_res_and_lock(lock);
1909
1910         if (!ldlm_res_eq(&dlm_req->lock_desc.l_resource.lr_name,
1911                          &lock->l_resource->lr_name)) {
1912                 ldlm_resource_unlink_lock(lock);
1913                 unlock_res_and_lock(lock);
1914                 rc = ldlm_lock_change_resource(ns, lock,
1915                                 &dlm_req->lock_desc.l_resource.lr_name);
1916                 if (rc < 0) {
1917                         LDLM_ERROR(lock, "Failed to allocate resource");
1918                         GOTO(out, rc);
1919                 }
1920                 LDLM_DEBUG(lock, "completion AST, new resource");
1921                 lock_res_and_lock(lock);
1922         }
1923
1924         if (ldlm_is_destroyed(lock) ||
1925             ldlm_is_granted(lock)) {
1926                 /* b=11300: the lock has already been granted */
1927                 unlock_res_and_lock(lock);
1928                 LDLM_DEBUG(lock, "Double grant race happened");
1929                 GOTO(out, rc = 0);
1930         }
1931
1932         /*
1933          * If we receive the completion AST before the actual enqueue returned,
1934          * then we might need to switch lock modes, resources, or extents.
1935          */
1936         if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1937                 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1938                 LDLM_DEBUG(lock, "completion AST, new lock mode");
1939         }
1940
1941         if (lock->l_resource->lr_type != LDLM_PLAIN) {
1942                 ldlm_convert_policy_to_local(req->rq_export,
1943                                           dlm_req->lock_desc.l_resource.lr_type,
1944                                           &dlm_req->lock_desc.l_policy_data,
1945                                           &lock->l_policy_data);
1946                 LDLM_DEBUG(lock, "completion AST, new policy data");
1947         }
1948
1949         ldlm_resource_unlink_lock(lock);
1950
1951         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1952                 /*
1953                  * BL_AST locks are not needed in LRU.
1954                  * Let ldlm_cancel_lru() be fast.
1955                  */
1956                 ldlm_lock_remove_from_lru(lock);
1957                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
1958                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1959         }
1960
1961         if (lock->l_lvb_len > 0) {
1962                 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT,
1963                                    lock->l_lvb_data, lvb_len);
1964                 if (rc < 0) {
1965                         unlock_res_and_lock(lock);
1966                         GOTO(out, rc);
1967                 }
1968         }
1969
1970         ldlm_grant_lock(lock, &ast_list);
1971         unlock_res_and_lock(lock);
1972
1973         LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1974
1975         /*
1976          * Let Enqueue to call osc_lock_upcall() and initialize
1977          * l_ast_data
1978          */
1979         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
1980
1981         ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
1982
1983         LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1984                           lock);
1985         GOTO(out, rc);
1986
1987 out:
1988         if (rc < 0) {
1989                 lock_res_and_lock(lock);
1990                 ldlm_set_failed(lock);
1991                 unlock_res_and_lock(lock);
1992                 wake_up(&lock->l_waitq);
1993         }
1994         LDLM_LOCK_RELEASE(lock);
1995 }
1996
1997 /**
1998  * Callback handler for receiving incoming glimpse ASTs.
1999  *
2000  * This only can happen on client side.  After handling the glimpse AST
2001  * we also consider dropping the lock here if it is unused locally for a
2002  * long time.
2003  */
2004 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
2005                                     struct ldlm_namespace *ns,
2006                                     struct ldlm_request *dlm_req,
2007                                     struct ldlm_lock *lock)
2008 {
2009         int rc = -ENOSYS;
2010
2011         ENTRY;
2012
2013         LDLM_DEBUG(lock, "client glimpse AST callback handler");
2014
2015         if (lock->l_glimpse_ast != NULL)
2016                 rc = lock->l_glimpse_ast(lock, req);
2017
2018         if (req->rq_repmsg != NULL) {
2019                 ptlrpc_reply(req);
2020         } else {
2021                 req->rq_status = rc;
2022                 ptlrpc_error(req);
2023         }
2024
2025         lock_res_and_lock(lock);
2026         if (lock->l_granted_mode == LCK_PW &&
2027             !lock->l_readers && !lock->l_writers &&
2028             ktime_after(ktime_get(),
2029                         ktime_add(lock->l_last_used,
2030                                   ktime_set(ns->ns_dirty_age_limit, 0)))) {
2031                 unlock_res_and_lock(lock);
2032                 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
2033                         ldlm_handle_bl_callback(ns, NULL, lock);
2034
2035                 EXIT;
2036                 return;
2037         }
2038         unlock_res_and_lock(lock);
2039         LDLM_LOCK_RELEASE(lock);
2040         EXIT;
2041 }
2042
2043 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
2044 {
2045         if (req->rq_no_reply)
2046                 return 0;
2047
2048         req->rq_status = rc;
2049         if (!req->rq_packed_final) {
2050                 rc = lustre_pack_reply(req, 1, NULL, NULL);
2051                 if (rc)
2052                         return rc;
2053         }
2054         return ptlrpc_reply(req);
2055 }
2056
2057 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
2058                                enum ldlm_cancel_flags cancel_flags)
2059 {
2060         struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
2061
2062         ENTRY;
2063
2064         spin_lock(&blp->blp_lock);
2065         if (blwi->blwi_lock &&
2066             ldlm_is_discard_data(blwi->blwi_lock)) {
2067                 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
2068                 list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
2069         } else {
2070                 /* other blocking callbacks are added to the regular list */
2071                 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
2072         }
2073         spin_unlock(&blp->blp_lock);
2074
2075         wake_up(&blp->blp_waitq);
2076
2077         /*
2078          * can not check blwi->blwi_flags as blwi could be already freed in
2079          * LCF_ASYNC mode
2080          */
2081         if (!(cancel_flags & LCF_ASYNC))
2082                 wait_for_completion(&blwi->blwi_comp);
2083
2084         RETURN(0);
2085 }
2086
2087 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
2088                              struct ldlm_namespace *ns,
2089                              struct ldlm_lock_desc *ld,
2090                              struct list_head *cancels, int count,
2091                              struct ldlm_lock *lock,
2092                              enum ldlm_cancel_flags cancel_flags)
2093 {
2094         init_completion(&blwi->blwi_comp);
2095         INIT_LIST_HEAD(&blwi->blwi_head);
2096
2097         if (memory_pressure_get())
2098                 blwi->blwi_mem_pressure = 1;
2099
2100         blwi->blwi_ns = ns;
2101         blwi->blwi_flags = cancel_flags;
2102         if (ld != NULL)
2103                 blwi->blwi_ld = *ld;
2104         if (count) {
2105                 list_add(&blwi->blwi_head, cancels);
2106                 list_del_init(cancels);
2107                 blwi->blwi_count = count;
2108         } else {
2109                 blwi->blwi_lock = lock;
2110         }
2111 }
2112
2113 /**
2114  * Queues a list of locks \a cancels containing \a count locks
2115  * for later processing by a blocking thread.  If \a count is zero,
2116  * then the lock referenced as \a lock is queued instead.
2117  *
2118  * The blocking thread would then call ->l_blocking_ast callback in the lock.
2119  * If list addition fails an error is returned and caller is supposed to
2120  * call ->l_blocking_ast itself.
2121  */
2122 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
2123                              struct ldlm_lock_desc *ld,
2124                              struct ldlm_lock *lock,
2125                              struct list_head *cancels, int count,
2126                              enum ldlm_cancel_flags cancel_flags)
2127 {
2128         ENTRY;
2129
2130         if (cancels && count == 0)
2131                 RETURN(0);
2132
2133         if (cancel_flags & LCF_ASYNC) {
2134                 struct ldlm_bl_work_item *blwi;
2135
2136                 OBD_ALLOC(blwi, sizeof(*blwi));
2137                 if (blwi == NULL)
2138                         RETURN(-ENOMEM);
2139                 init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
2140
2141                 RETURN(__ldlm_bl_to_thread(blwi, cancel_flags));
2142         } else {
2143                 /*
2144                  * if it is synchronous call do minimum mem alloc, as it could
2145                  * be triggered from kernel shrinker
2146                  */
2147                 struct ldlm_bl_work_item blwi;
2148
2149                 memset(&blwi, 0, sizeof(blwi));
2150                 init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
2151                 RETURN(__ldlm_bl_to_thread(&blwi, cancel_flags));
2152         }
2153 }
2154
2155
2156 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
2157                            struct ldlm_lock *lock)
2158 {
2159         return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC);
2160 }
2161
2162 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
2163                            struct list_head *cancels, int count,
2164                            enum ldlm_cancel_flags cancel_flags)
2165 {
2166         return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
2167 }
2168
2169 int ldlm_bl_thread_wakeup(void)
2170 {
2171         wake_up(&ldlm_state->ldlm_bl_pool->blp_waitq);
2172         return 0;
2173 }
2174
2175 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
2176 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
2177 {
2178         struct obd_device *obd = req->rq_export->exp_obd;
2179         char *key;
2180         void *val;
2181         int keylen, vallen;
2182         int rc = -ENOSYS;
2183
2184         ENTRY;
2185
2186         DEBUG_REQ(D_HSM, req, "%s: handle setinfo", obd->obd_name);
2187
2188         req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
2189
2190         key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2191         if (key == NULL) {
2192                 DEBUG_REQ(D_IOCTL, req, "no set_info key");
2193                 RETURN(-EFAULT);
2194         }
2195         keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
2196                                       RCL_CLIENT);
2197         val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
2198         if (val == NULL) {
2199                 DEBUG_REQ(D_IOCTL, req, "no set_info val");
2200                 RETURN(-EFAULT);
2201         }
2202         vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
2203                                       RCL_CLIENT);
2204
2205         /* We are responsible for swabbing contents of val */
2206
2207         if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
2208                 /* Pass it on to mdc (the "export" in this case) */
2209                 rc = obd_set_info_async(req->rq_svc_thread->t_env,
2210                                         req->rq_export,
2211                                         sizeof(KEY_HSM_COPYTOOL_SEND),
2212                                         KEY_HSM_COPYTOOL_SEND,
2213                                         vallen, val, NULL);
2214         else
2215                 DEBUG_REQ(D_WARNING, req, "ignoring unknown key '%s'", key);
2216
2217         return rc;
2218 }
2219
2220 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
2221                                         const char *msg, int rc,
2222                                         const struct lustre_handle *handle)
2223 {
2224         DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
2225                   "%s, NID=%s lock=%#llx: rc = %d",
2226                   msg, libcfs_id2str(req->rq_peer),
2227                   handle ? handle->cookie : 0, rc);
2228         if (req->rq_no_reply)
2229                 CWARN("No reply was sent, maybe cause b=21636.\n");
2230         else if (rc)
2231                 CWARN("Send reply failed, maybe cause b=21636.\n");
2232 }
2233
2234 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
2235 static int ldlm_callback_handler(struct ptlrpc_request *req)
2236 {
2237         struct ldlm_namespace *ns;
2238         struct ldlm_request *dlm_req;
2239         struct ldlm_lock *lock;
2240         int rc;
2241
2242         ENTRY;
2243
2244         /*
2245          * Requests arrive in sender's byte order.  The ptlrpc service
2246          * handler has already checked and, if necessary, byte-swapped the
2247          * incoming request message body, but I am responsible for the
2248          * message buffers.
2249          */
2250
2251         /* do nothing for sec context finalize */
2252         if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
2253                 RETURN(0);
2254
2255         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2256
2257         if (req->rq_export == NULL) {
2258                 rc = ldlm_callback_reply(req, -ENOTCONN);
2259                 ldlm_callback_errmsg(req, "Operate on unconnected server",
2260                                      rc, NULL);
2261                 RETURN(0);
2262         }
2263
2264         LASSERT(req->rq_export != NULL);
2265         LASSERT(req->rq_export->exp_obd != NULL);
2266
2267         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2268         case LDLM_BL_CALLBACK:
2269                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET)) {
2270                         if (cfs_fail_err)
2271                                 ldlm_callback_reply(req, -(int)cfs_fail_err);
2272                         RETURN(0);
2273                 }
2274                 break;
2275         case LDLM_CP_CALLBACK:
2276                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
2277                         RETURN(0);
2278                 break;
2279         case LDLM_GL_CALLBACK:
2280                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
2281                         RETURN(0);
2282                 break;
2283         case LDLM_SET_INFO:
2284                 rc = ldlm_handle_setinfo(req);
2285                 ldlm_callback_reply(req, rc);
2286                 RETURN(0);
2287         default:
2288                 CERROR("unknown opcode %u\n",
2289                        lustre_msg_get_opc(req->rq_reqmsg));
2290                 ldlm_callback_reply(req, -EPROTO);
2291                 RETURN(0);
2292         }
2293
2294         ns = req->rq_export->exp_obd->obd_namespace;
2295         LASSERT(ns != NULL);
2296
2297         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2298
2299         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2300         if (dlm_req == NULL) {
2301                 rc = ldlm_callback_reply(req, -EPROTO);
2302                 ldlm_callback_errmsg(req, "Operate without parameter", rc,
2303                                      NULL);
2304                 RETURN(0);
2305         }
2306
2307         /*
2308          * Force a known safe race, send a cancel to the server for a lock
2309          * which the server has already started a blocking callback on.
2310          */
2311         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
2312             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2313                 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
2314                 if (rc < 0)
2315                         CERROR("ldlm_cli_cancel: %d\n", rc);
2316         }
2317
2318         lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
2319         if (!lock) {
2320                 CDEBUG(D_DLMTRACE,
2321                        "callback on lock %#llx - lock disappeared\n",
2322                        dlm_req->lock_handle[0].cookie);
2323                 rc = ldlm_callback_reply(req, -EINVAL);
2324                 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
2325                                      &dlm_req->lock_handle[0]);
2326                 RETURN(0);
2327         }
2328
2329         if (ldlm_is_fail_loc(lock) &&
2330             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
2331                 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
2332
2333         /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
2334         lock_res_and_lock(lock);
2335         lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
2336                                               LDLM_FL_AST_MASK);
2337         if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2338                 /*
2339                  * If somebody cancels lock and cache is already dropped,
2340                  * or lock is failed before cp_ast received on client,
2341                  * we can tell the server we have no lock. Otherwise, we
2342                  * should send cancel after dropping the cache.
2343                  */
2344                 if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
2345                      ldlm_is_failed(lock)) {
2346                         LDLM_DEBUG(lock,
2347                                    "callback on lock %llx - lock disappeared",
2348                                    dlm_req->lock_handle[0].cookie);
2349                         unlock_res_and_lock(lock);
2350                         LDLM_LOCK_RELEASE(lock);
2351                         rc = ldlm_callback_reply(req, -EINVAL);
2352                         ldlm_callback_errmsg(req, "Operate on stale lock", rc,
2353                                              &dlm_req->lock_handle[0]);
2354                         RETURN(0);
2355                 }
2356                 /*
2357                  * BL_AST locks are not needed in LRU.
2358                  * Let ldlm_cancel_lru() be fast.
2359                  */
2360                 ldlm_lock_remove_from_lru(lock);
2361                 ldlm_set_bl_ast(lock);
2362         }
2363         unlock_res_and_lock(lock);
2364
2365         /*
2366          * We want the ost thread to get this reply so that it can respond
2367          * to ost requests (write cache writeback) that might be triggered
2368          * in the callback.
2369          *
2370          * But we'd also like to be able to indicate in the reply that we're
2371          * cancelling right now, because it's unused, or have an intent result
2372          * in the reply, so we might have to push the responsibility for sending
2373          * the reply down into the AST handlers, alas.
2374          */
2375
2376         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2377         case LDLM_BL_CALLBACK:
2378                 CDEBUG(D_INODE, "blocking ast\n");
2379                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
2380                 if (!ldlm_is_cancel_on_block(lock)) {
2381                         rc = ldlm_callback_reply(req, 0);
2382                         if (req->rq_no_reply || rc)
2383                                 ldlm_callback_errmsg(req, "Normal process", rc,
2384                                                      &dlm_req->lock_handle[0]);
2385                 }
2386                 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
2387                         ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
2388                 break;
2389         case LDLM_CP_CALLBACK:
2390                 CDEBUG(D_INODE, "completion ast\n");
2391                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
2392                 ldlm_callback_reply(req, 0);
2393                 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
2394                 break;
2395         case LDLM_GL_CALLBACK:
2396                 CDEBUG(D_INODE, "glimpse ast\n");
2397                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
2398                 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
2399                 break;
2400         default:
2401                 LBUG(); /* checked above */
2402         }
2403
2404         RETURN(0);
2405 }
2406
2407 #ifdef HAVE_SERVER_SUPPORT
2408 /**
2409  * Main handler for canceld thread.
2410  *
2411  * Separated into its own thread to avoid deadlocks.
2412  */
2413 static int ldlm_cancel_handler(struct ptlrpc_request *req)
2414 {
2415         int rc;
2416
2417         ENTRY;
2418
2419         /*
2420          * Requests arrive in sender's byte order.  The ptlrpc service
2421          * handler has already checked and, if necessary, byte-swapped the
2422          * incoming request message body, but I am responsible for the
2423          * message buffers.
2424          */
2425
2426         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2427
2428         if (req->rq_export == NULL) {
2429                 struct ldlm_request *dlm_req;
2430
2431                 CERROR("%s from %s arrived at %llu with bad export cookie %llu\n",
2432                        ll_opcode2str(lustre_msg_get_opc(req->rq_reqmsg)),
2433                        libcfs_nid2str(req->rq_peer.nid),
2434                        (unsigned long long)req->rq_arrival_time.tv_sec,
2435                        lustre_msg_get_handle(req->rq_reqmsg)->cookie);
2436
2437                 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_CANCEL) {
2438                         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2439                         dlm_req = req_capsule_client_get(&req->rq_pill,
2440                                                          &RMF_DLM_REQ);
2441                         if (dlm_req != NULL)
2442                                 ldlm_lock_dump_handle(D_ERROR,
2443                                                       &dlm_req->lock_handle[0]);
2444                 }
2445                 ldlm_callback_reply(req, -ENOTCONN);
2446                 RETURN(0);
2447         }
2448
2449         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2450         /* XXX FIXME move this back to mds/handler.c, b=249 */
2451         case LDLM_CANCEL:
2452                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2453                 CDEBUG(D_INODE, "cancel\n");
2454                 if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_NET) ||
2455                     CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND) ||
2456                     CFS_FAIL_CHECK(OBD_FAIL_LDLM_BL_EVICT))
2457                         RETURN(0);
2458                 rc = ldlm_handle_cancel(req);
2459                 break;
2460         case LDLM_CONVERT:
2461         {
2462                 struct ldlm_request *dlm_req;
2463
2464                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CONVERT);
2465                 CDEBUG(D_INODE, "convert\n");
2466
2467                 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2468                 if (dlm_req == NULL) {
2469                         CDEBUG(D_INFO, "bad request buffer for cancel\n");
2470                         rc = ldlm_callback_reply(req, -EPROTO);
2471                 } else {
2472                         req->rq_status = ldlm_handle_convert0(req, dlm_req);
2473                         rc = ptlrpc_reply(req);
2474                 }
2475                 break;
2476         }
2477         default:
2478                 CERROR("invalid opcode %d\n",
2479                        lustre_msg_get_opc(req->rq_reqmsg));
2480                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2481                 rc = ldlm_callback_reply(req, -EINVAL);
2482         }
2483
2484         RETURN(rc);
2485 }
2486
2487 static int ldlm_cancel_hpreq_lock_match(struct ptlrpc_request *req,
2488                                         struct ldlm_lock *lock)
2489 {
2490         struct ldlm_request *dlm_req;
2491         struct lustre_handle lockh;
2492         int rc = 0;
2493         int i;
2494
2495         ENTRY;
2496
2497         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2498         if (dlm_req == NULL)
2499                 RETURN(0);
2500
2501         ldlm_lock2handle(lock, &lockh);
2502         for (i = 0; i < dlm_req->lock_count; i++) {
2503                 if (lustre_handle_equal(&dlm_req->lock_handle[i],
2504                                         &lockh)) {
2505                         DEBUG_REQ(D_RPCTRACE, req,
2506                                   "Prio raised by lock %#llx", lockh.cookie);
2507                         rc = 1;
2508                         break;
2509                 }
2510         }
2511
2512         RETURN(rc);
2513 }
2514
2515 static int ldlm_cancel_hpreq_check(struct ptlrpc_request *req)
2516 {
2517         struct ldlm_request *dlm_req;
2518         int rc = 0;
2519         int i;
2520         unsigned int size;
2521
2522         ENTRY;
2523
2524         /* no prolong in recovery */
2525         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
2526                 RETURN(0);
2527
2528         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2529         if (dlm_req == NULL)
2530                 RETURN(-EFAULT);
2531
2532         size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT);
2533         if (size <= offsetof(struct ldlm_request, lock_handle) ||
2534             (size - offsetof(struct ldlm_request, lock_handle)) /
2535              sizeof(struct lustre_handle) < dlm_req->lock_count)
2536                 RETURN(-EPROTO);
2537
2538         for (i = 0; i < dlm_req->lock_count; i++) {
2539                 struct ldlm_lock *lock;
2540
2541                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
2542                 if (lock == NULL)
2543                         continue;
2544
2545                 rc = ldlm_is_ast_sent(lock) ? 1 : 0;
2546                 if (rc)
2547                         LDLM_DEBUG(lock, "hpreq cancel/convert lock");
2548                 LDLM_LOCK_PUT(lock);
2549
2550                 if (rc)
2551                         break;
2552         }
2553
2554         RETURN(rc);
2555 }
2556
2557 static struct ptlrpc_hpreq_ops ldlm_cancel_hpreq_ops = {
2558         .hpreq_lock_match = ldlm_cancel_hpreq_lock_match,
2559         .hpreq_check      = ldlm_cancel_hpreq_check,
2560         .hpreq_fini       = NULL,
2561 };
2562
2563 static int ldlm_hpreq_handler(struct ptlrpc_request *req)
2564 {
2565         ENTRY;
2566
2567         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2568
2569         if (req->rq_export == NULL)
2570                 RETURN(0);
2571
2572         if (LDLM_CANCEL == lustre_msg_get_opc(req->rq_reqmsg)) {
2573                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2574                 req->rq_ops = &ldlm_cancel_hpreq_ops;
2575         } else if (LDLM_CONVERT == lustre_msg_get_opc(req->rq_reqmsg)) {
2576                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CONVERT);
2577                 req->rq_ops = &ldlm_cancel_hpreq_ops;
2578         }
2579         RETURN(0);
2580 }
2581
2582 static int ldlm_revoke_lock_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2583                                struct hlist_node *hnode, void *data)
2584
2585 {
2586         struct list_head *rpc_list = data;
2587         struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
2588
2589         lock_res_and_lock(lock);
2590
2591         if (!ldlm_is_granted(lock)) {
2592                 unlock_res_and_lock(lock);
2593                 return 0;
2594         }
2595
2596         LASSERT(lock->l_resource);
2597         if (lock->l_resource->lr_type != LDLM_IBITS &&
2598             lock->l_resource->lr_type != LDLM_PLAIN) {
2599                 unlock_res_and_lock(lock);
2600                 return 0;
2601         }
2602
2603         if (ldlm_is_ast_sent(lock)) {
2604                 unlock_res_and_lock(lock);
2605                 return 0;
2606         }
2607
2608         LASSERT(lock->l_blocking_ast);
2609         LASSERT(!lock->l_blocking_lock);
2610
2611         ldlm_set_ast_sent(lock);
2612         if (lock->l_export && lock->l_export->exp_lock_hash) {
2613                 /*
2614                  * NB: it's safe to call cfs_hash_del() even lock isn't
2615                  * in exp_lock_hash.
2616                  */
2617                 /*
2618                  * In the function below, .hs_keycmp resolves to
2619                  * ldlm_export_lock_keycmp()
2620                  */
2621                 /* coverity[overrun-buffer-val] */
2622                 cfs_hash_del(lock->l_export->exp_lock_hash,
2623                              &lock->l_remote_handle, &lock->l_exp_hash);
2624         }
2625
2626         list_add_tail(&lock->l_rk_ast, rpc_list);
2627         LDLM_LOCK_GET(lock);
2628
2629         unlock_res_and_lock(lock);
2630         return 0;
2631 }
2632
2633 void ldlm_revoke_export_locks(struct obd_export *exp)
2634 {
2635         struct list_head rpc_list;
2636
2637         ENTRY;
2638
2639         INIT_LIST_HEAD(&rpc_list);
2640         cfs_hash_for_each_nolock(exp->exp_lock_hash,
2641                                  ldlm_revoke_lock_cb, &rpc_list, 0);
2642         ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
2643                           LDLM_WORK_REVOKE_AST);
2644
2645         EXIT;
2646 }
2647 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2648 #endif /* HAVE_SERVER_SUPPORT */
2649
2650 static int ldlm_bl_get_work(struct ldlm_bl_pool *blp,
2651                             struct ldlm_bl_work_item **p_blwi,
2652                             struct obd_export **p_exp)
2653 {
2654         struct ldlm_bl_work_item *blwi = NULL;
2655         static unsigned int num_bl;
2656         static unsigned int num_stale;
2657         int num_th = atomic_read(&blp->blp_num_threads);
2658
2659         *p_exp = obd_stale_export_get();
2660
2661         spin_lock(&blp->blp_lock);
2662         if (*p_exp != NULL) {
2663                 if (num_th == 1 || ++num_stale < num_th) {
2664                         spin_unlock(&blp->blp_lock);
2665                         return 1;
2666                 }
2667                 num_stale = 0;
2668         }
2669
2670         /* process a request from the blp_list at least every blp_num_threads */
2671         if (!list_empty(&blp->blp_list) &&
2672             (list_empty(&blp->blp_prio_list) || num_bl == 0))
2673                 blwi = list_entry(blp->blp_list.next,
2674                                   struct ldlm_bl_work_item, blwi_entry);
2675         else
2676                 if (!list_empty(&blp->blp_prio_list))
2677                         blwi = list_entry(blp->blp_prio_list.next,
2678                                           struct ldlm_bl_work_item,
2679                                           blwi_entry);
2680
2681         if (blwi) {
2682                 if (++num_bl >= num_th)
2683                         num_bl = 0;
2684                 list_del(&blwi->blwi_entry);
2685         }
2686         spin_unlock(&blp->blp_lock);
2687         *p_blwi = blwi;
2688
2689         if (*p_exp != NULL && *p_blwi != NULL) {
2690                 obd_stale_export_put(*p_exp);
2691                 *p_exp = NULL;
2692         }
2693
2694         return (*p_blwi != NULL || *p_exp != NULL) ? 1 : 0;
2695 }
2696
2697 /* This only contains temporary data until the thread starts */
2698 struct ldlm_bl_thread_data {
2699         struct ldlm_bl_pool     *bltd_blp;
2700         struct completion       bltd_comp;
2701         int                     bltd_num;
2702 };
2703
2704 static int ldlm_bl_thread_main(void *arg);
2705
2706 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp, bool check_busy)
2707 {
2708         struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
2709         struct task_struct *task;
2710
2711         init_completion(&bltd.bltd_comp);
2712
2713         bltd.bltd_num = atomic_inc_return(&blp->blp_num_threads);
2714         if (bltd.bltd_num >= blp->blp_max_threads) {
2715                 atomic_dec(&blp->blp_num_threads);
2716                 return 0;
2717         }
2718
2719         LASSERTF(bltd.bltd_num > 0, "thread num:%d\n", bltd.bltd_num);
2720         if (check_busy &&
2721             atomic_read(&blp->blp_busy_threads) < (bltd.bltd_num - 1)) {
2722                 atomic_dec(&blp->blp_num_threads);
2723                 return 0;
2724         }
2725
2726         task = kthread_run(ldlm_bl_thread_main, &bltd, "ldlm_bl_%02d",
2727                            bltd.bltd_num);
2728         if (IS_ERR(task)) {
2729                 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
2730                        bltd.bltd_num, PTR_ERR(task));
2731                 atomic_dec(&blp->blp_num_threads);
2732                 return PTR_ERR(task);
2733         }
2734         wait_for_completion(&bltd.bltd_comp);
2735
2736         return 0;
2737 }
2738
2739 /* Not fatal if racy and have a few too many threads */
2740 static int ldlm_bl_thread_need_create(struct ldlm_bl_pool *blp,
2741                                       struct ldlm_bl_work_item *blwi)
2742 {
2743         if (atomic_read(&blp->blp_num_threads) >= blp->blp_max_threads)
2744                 return 0;
2745
2746         if (atomic_read(&blp->blp_busy_threads) <
2747             atomic_read(&blp->blp_num_threads))
2748                 return 0;
2749
2750         if (blwi != NULL && (blwi->blwi_ns == NULL ||
2751                              blwi->blwi_mem_pressure))
2752                 return 0;
2753
2754         return 1;
2755 }
2756
2757 static int ldlm_bl_thread_blwi(struct ldlm_bl_pool *blp,
2758                                struct ldlm_bl_work_item *blwi)
2759 {
2760         ENTRY;
2761
2762         if (blwi->blwi_ns == NULL)
2763                 /* added by ldlm_cleanup() */
2764                 RETURN(LDLM_ITER_STOP);
2765
2766         if (blwi->blwi_mem_pressure)
2767                 memory_pressure_set();
2768
2769         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL2, 4);
2770
2771         if (blwi->blwi_count) {
2772                 int count;
2773                 /*
2774                  * The special case when we cancel locks in lru
2775                  * asynchronously, we pass the list of locks here.
2776                  * Thus locks are marked LDLM_FL_CANCELING, but NOT
2777                  * canceled locally yet.
2778                  */
2779                 count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
2780                                                    blwi->blwi_count,
2781                                                    LCF_BL_AST);
2782                 ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
2783                                      blwi->blwi_flags);
2784         } else {
2785                 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
2786                                         blwi->blwi_lock);
2787         }
2788         if (blwi->blwi_mem_pressure)
2789                 memory_pressure_clr();
2790
2791         if (blwi->blwi_flags & LCF_ASYNC)
2792                 OBD_FREE(blwi, sizeof(*blwi));
2793         else
2794                 complete(&blwi->blwi_comp);
2795
2796         RETURN(0);
2797 }
2798
2799 /**
2800  * Cancel stale locks on export. Cancel blocked locks first.
2801  * If the given export has blocked locks, the next in the list may have
2802  * them too, thus cancel not blocked locks only if the current export has
2803  * no blocked locks.
2804  **/
2805 static int ldlm_bl_thread_exports(struct ldlm_bl_pool *blp,
2806                                   struct obd_export *exp)
2807 {
2808         int num;
2809
2810         ENTRY;
2811
2812         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_BL_EVICT, 4);
2813
2814         num = ldlm_export_cancel_blocked_locks(exp);
2815         if (num == 0)
2816                 ldlm_export_cancel_locks(exp);
2817
2818         obd_stale_export_put(exp);
2819
2820         RETURN(0);
2821 }
2822
2823
2824 /**
2825  * Main blocking requests processing thread.
2826  *
2827  * Callers put locks into its queue by calling ldlm_bl_to_thread.
2828  * This thread in the end ends up doing actual call to ->l_blocking_ast
2829  * for queued locks.
2830  */
2831 static int ldlm_bl_thread_main(void *arg)
2832 {
2833         struct lu_env *env;
2834         struct ldlm_bl_pool *blp;
2835         struct ldlm_bl_thread_data *bltd = arg;
2836         int rc;
2837
2838         ENTRY;
2839
2840         OBD_ALLOC_PTR(env);
2841         if (!env)
2842                 RETURN(-ENOMEM);
2843         rc = lu_env_init(env, LCT_DT_THREAD);
2844         if (rc)
2845                 GOTO(out_env, rc);
2846         rc = lu_env_add(env);
2847         if (rc)
2848                 GOTO(out_env_fini, rc);
2849
2850         blp = bltd->bltd_blp;
2851
2852         complete(&bltd->bltd_comp);
2853         /* cannot use bltd after this, it is only on caller's stack */
2854
2855         while (1) {
2856                 struct ldlm_bl_work_item *blwi = NULL;
2857                 struct obd_export *exp = NULL;
2858                 int rc;
2859
2860                 rc = ldlm_bl_get_work(blp, &blwi, &exp);
2861
2862                 if (rc == 0)
2863                         wait_event_idle_exclusive(blp->blp_waitq,
2864                                                   ldlm_bl_get_work(blp, &blwi,
2865                                                                    &exp));
2866                 atomic_inc(&blp->blp_busy_threads);
2867
2868                 if (ldlm_bl_thread_need_create(blp, blwi))
2869                         /* discard the return value, we tried */
2870                         ldlm_bl_thread_start(blp, true);
2871
2872                 if (exp)
2873                         rc = ldlm_bl_thread_exports(blp, exp);
2874                 else if (blwi)
2875                         rc = ldlm_bl_thread_blwi(blp, blwi);
2876
2877                 atomic_dec(&blp->blp_busy_threads);
2878
2879                 if (rc == LDLM_ITER_STOP)
2880                         break;
2881
2882                 /*
2883                  * If there are many namespaces, we will not sleep waiting for
2884                  * work, and must do a cond_resched to avoid holding the CPU
2885                  * for too long
2886                  */
2887                 cond_resched();
2888         }
2889
2890         atomic_dec(&blp->blp_num_threads);
2891         complete(&blp->blp_comp);
2892
2893         lu_env_remove(env);
2894 out_env_fini:
2895         lu_env_fini(env);
2896 out_env:
2897         OBD_FREE_PTR(env);
2898         RETURN(rc);
2899 }
2900
2901
2902 static int ldlm_setup(void);
2903 static int ldlm_cleanup(void);
2904
2905 int ldlm_get_ref(void)
2906 {
2907         int rc = 0;
2908
2909         ENTRY;
2910         mutex_lock(&ldlm_ref_mutex);
2911         if (++ldlm_refcount == 1) {
2912                 rc = ldlm_setup();
2913                 if (rc)
2914                         ldlm_refcount--;
2915         }
2916         mutex_unlock(&ldlm_ref_mutex);
2917
2918         RETURN(rc);
2919 }
2920
2921 void ldlm_put_ref(void)
2922 {
2923         ENTRY;
2924         mutex_lock(&ldlm_ref_mutex);
2925         if (ldlm_refcount == 1) {
2926                 int rc = ldlm_cleanup();
2927
2928                 if (rc)
2929                         CERROR("ldlm_cleanup failed: %d\n", rc);
2930                 else
2931                         ldlm_refcount--;
2932         } else {
2933                 ldlm_refcount--;
2934         }
2935         mutex_unlock(&ldlm_ref_mutex);
2936
2937         EXIT;
2938 }
2939
2940 /*
2941  * Export handle<->lock hash operations.
2942  */
2943 static unsigned
2944 ldlm_export_lock_hash(struct cfs_hash *hs, const void *key, unsigned int mask)
2945 {
2946         return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
2947 }
2948
2949 static void *
2950 ldlm_export_lock_key(struct hlist_node *hnode)
2951 {
2952         struct ldlm_lock *lock;
2953
2954         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2955         return &lock->l_remote_handle;
2956 }
2957
2958 static void
2959 ldlm_export_lock_keycpy(struct hlist_node *hnode, void *key)
2960 {
2961         struct ldlm_lock     *lock;
2962
2963         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2964         lock->l_remote_handle = *(struct lustre_handle *)key;
2965 }
2966
2967 static int
2968 ldlm_export_lock_keycmp(const void *key, struct hlist_node *hnode)
2969 {
2970         return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
2971 }
2972
2973 static void *
2974 ldlm_export_lock_object(struct hlist_node *hnode)
2975 {
2976         return hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2977 }
2978
2979 static void
2980 ldlm_export_lock_get(struct cfs_hash *hs, struct hlist_node *hnode)
2981 {
2982         struct ldlm_lock *lock;
2983
2984         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2985         LDLM_LOCK_GET(lock);
2986 }
2987
2988 static void
2989 ldlm_export_lock_put(struct cfs_hash *hs, struct hlist_node *hnode)
2990 {
2991         struct ldlm_lock *lock;
2992
2993         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2994         LDLM_LOCK_RELEASE(lock);
2995 }
2996
2997 static struct cfs_hash_ops ldlm_export_lock_ops = {
2998         .hs_hash        = ldlm_export_lock_hash,
2999         .hs_key         = ldlm_export_lock_key,
3000         .hs_keycmp      = ldlm_export_lock_keycmp,
3001         .hs_keycpy      = ldlm_export_lock_keycpy,
3002         .hs_object      = ldlm_export_lock_object,
3003         .hs_get         = ldlm_export_lock_get,
3004         .hs_put         = ldlm_export_lock_put,
3005         .hs_put_locked  = ldlm_export_lock_put,
3006 };
3007
3008 int ldlm_init_export(struct obd_export *exp)
3009 {
3010         int rc;
3011
3012         ENTRY;
3013
3014         exp->exp_lock_hash =
3015                 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
3016                                 HASH_EXP_LOCK_CUR_BITS,
3017                                 HASH_EXP_LOCK_MAX_BITS,
3018                                 HASH_EXP_LOCK_BKT_BITS, 0,
3019                                 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
3020                                 &ldlm_export_lock_ops,
3021                                 CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
3022                                 CFS_HASH_NBLK_CHANGE);
3023
3024         if (!exp->exp_lock_hash)
3025                 RETURN(-ENOMEM);
3026
3027         rc = ldlm_init_flock_export(exp);
3028         if (rc)
3029                 GOTO(err, rc);
3030
3031         RETURN(0);
3032 err:
3033         ldlm_destroy_export(exp);
3034         RETURN(rc);
3035 }
3036 EXPORT_SYMBOL(ldlm_init_export);
3037
3038 void ldlm_destroy_export(struct obd_export *exp)
3039 {
3040         ENTRY;
3041         cfs_hash_putref(exp->exp_lock_hash);
3042         exp->exp_lock_hash = NULL;
3043
3044         ldlm_destroy_flock_export(exp);
3045         EXIT;
3046 }
3047 EXPORT_SYMBOL(ldlm_destroy_export);
3048
3049 static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
3050                                                       struct attribute *attr,
3051                                                       char *buf)
3052 {
3053         return sprintf(buf, "%d\n", ldlm_cancel_unused_locks_before_replay);
3054 }
3055
3056 static ssize_t cancel_unused_locks_before_replay_store(struct kobject *kobj,
3057                                                        struct attribute *attr,
3058                                                        const char *buffer,
3059                                                        size_t count)
3060 {
3061         int rc;
3062         unsigned long val;
3063
3064         rc = kstrtoul(buffer, 10, &val);
3065         if (rc)
3066                 return rc;
3067
3068         ldlm_cancel_unused_locks_before_replay = val;
3069
3070         return count;
3071 }
3072 LUSTRE_RW_ATTR(cancel_unused_locks_before_replay);
3073
3074 static struct attribute *ldlm_attrs[] = {
3075         &lustre_attr_cancel_unused_locks_before_replay.attr,
3076         NULL,
3077 };
3078
3079 static struct attribute_group ldlm_attr_group = {
3080         .attrs = ldlm_attrs,
3081 };
3082
3083 static int ldlm_setup(void)
3084 {
3085         static struct ptlrpc_service_conf       conf;
3086         struct ldlm_bl_pool                    *blp = NULL;
3087 #ifdef HAVE_SERVER_SUPPORT
3088         struct task_struct *task;
3089 #endif /* HAVE_SERVER_SUPPORT */
3090         int i;
3091         int rc = 0;
3092
3093         ENTRY;
3094
3095         if (ldlm_state != NULL)
3096                 RETURN(-EALREADY);
3097
3098         OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
3099         if (ldlm_state == NULL)
3100                 RETURN(-ENOMEM);
3101
3102         ldlm_kobj = kobject_create_and_add("ldlm", &lustre_kset->kobj);
3103         if (!ldlm_kobj)
3104                 GOTO(out, -ENOMEM);
3105
3106         rc = sysfs_create_group(ldlm_kobj, &ldlm_attr_group);
3107         if (rc)
3108                 GOTO(out, rc);
3109
3110         ldlm_ns_kset = kset_create_and_add("namespaces", NULL, ldlm_kobj);
3111         if (!ldlm_ns_kset)
3112                 GOTO(out, -ENOMEM);
3113
3114         ldlm_svc_kset = kset_create_and_add("services", NULL, ldlm_kobj);
3115         if (!ldlm_svc_kset)
3116                 GOTO(out, -ENOMEM);
3117
3118         rc = ldlm_debugfs_setup();
3119         if (rc != 0)
3120                 GOTO(out, rc);
3121
3122         memset(&conf, 0, sizeof(conf));
3123         conf = (typeof(conf)) {
3124                 .psc_name               = "ldlm_cbd",
3125                 .psc_watchdog_factor    = 2,
3126                 .psc_buf                = {
3127                         .bc_nbufs               = LDLM_CLIENT_NBUFS,
3128                         .bc_buf_size            = LDLM_BUFSIZE,
3129                         .bc_req_max_size        = LDLM_MAXREQSIZE,
3130                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
3131                         .bc_req_portal          = LDLM_CB_REQUEST_PORTAL,
3132                         .bc_rep_portal          = LDLM_CB_REPLY_PORTAL,
3133                 },
3134                 .psc_thr                = {
3135                         .tc_thr_name            = "ldlm_cb",
3136                         .tc_thr_factor          = LDLM_THR_FACTOR,
3137                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
3138                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
3139                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
3140                         .tc_nthrs_user          = ldlm_num_threads,
3141                         .tc_cpu_bind            = ldlm_cpu_bind,
3142                         .tc_ctx_tags            = LCT_MD_THREAD | LCT_DT_THREAD,
3143                 },
3144                 .psc_cpt                = {
3145                         .cc_pattern             = ldlm_cpts,
3146                         .cc_affinity            = true,
3147                 },
3148                 .psc_ops                = {
3149                         .so_req_handler         = ldlm_callback_handler,
3150                 },
3151         };
3152         ldlm_state->ldlm_cb_service = \
3153                         ptlrpc_register_service(&conf, ldlm_svc_kset,
3154                                                 ldlm_svc_debugfs_dir);
3155         if (IS_ERR(ldlm_state->ldlm_cb_service)) {
3156                 CERROR("failed to start service\n");
3157                 rc = PTR_ERR(ldlm_state->ldlm_cb_service);
3158                 ldlm_state->ldlm_cb_service = NULL;
3159                 GOTO(out, rc);
3160         }
3161
3162 #ifdef HAVE_SERVER_SUPPORT
3163         memset(&conf, 0, sizeof(conf));
3164         conf = (typeof(conf)) {
3165                 .psc_name               = "ldlm_canceld",
3166                 .psc_watchdog_factor    = 6,
3167                 .psc_buf                = {
3168                         .bc_nbufs               = LDLM_SERVER_NBUFS,
3169                         .bc_buf_size            = LDLM_BUFSIZE,
3170                         .bc_req_max_size        = LDLM_MAXREQSIZE,
3171                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
3172                         .bc_req_portal          = LDLM_CANCEL_REQUEST_PORTAL,
3173                         .bc_rep_portal          = LDLM_CANCEL_REPLY_PORTAL,
3174
3175                 },
3176                 .psc_thr                = {
3177                         .tc_thr_name            = "ldlm_cn",
3178                         .tc_thr_factor          = LDLM_THR_FACTOR,
3179                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
3180                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
3181                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
3182                         .tc_nthrs_user          = ldlm_num_threads,
3183                         .tc_cpu_bind            = ldlm_cpu_bind,
3184                         .tc_ctx_tags            = LCT_MD_THREAD | \
3185                                                   LCT_DT_THREAD | \
3186                                                   LCT_CL_THREAD,
3187                 },
3188                 .psc_cpt                = {
3189                         .cc_pattern             = ldlm_cpts,
3190                         .cc_affinity            = true,
3191                 },
3192                 .psc_ops                = {
3193                         .so_req_handler         = ldlm_cancel_handler,
3194                         .so_hpreq_handler       = ldlm_hpreq_handler,
3195                 },
3196         };
3197         ldlm_state->ldlm_cancel_service = \
3198                         ptlrpc_register_service(&conf, ldlm_svc_kset,
3199                                                 ldlm_svc_debugfs_dir);
3200         if (IS_ERR(ldlm_state->ldlm_cancel_service)) {
3201                 CERROR("failed to start service\n");
3202                 rc = PTR_ERR(ldlm_state->ldlm_cancel_service);
3203                 ldlm_state->ldlm_cancel_service = NULL;
3204                 GOTO(out, rc);
3205         }
3206 #endif /* HAVE_SERVER_SUPPORT */
3207
3208         OBD_ALLOC(blp, sizeof(*blp));
3209         if (blp == NULL)
3210                 GOTO(out, rc = -ENOMEM);
3211         ldlm_state->ldlm_bl_pool = blp;
3212
3213         spin_lock_init(&blp->blp_lock);
3214         INIT_LIST_HEAD(&blp->blp_list);
3215         INIT_LIST_HEAD(&blp->blp_prio_list);
3216         init_waitqueue_head(&blp->blp_waitq);
3217         atomic_set(&blp->blp_num_threads, 0);
3218         atomic_set(&blp->blp_busy_threads, 0);
3219
3220         if (ldlm_num_threads == 0) {
3221                 blp->blp_min_threads = LDLM_NTHRS_INIT;
3222                 blp->blp_max_threads = LDLM_NTHRS_MAX;
3223         } else {
3224                 blp->blp_min_threads = blp->blp_max_threads = \
3225                         min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
3226                                                          ldlm_num_threads));
3227         }
3228
3229         for (i = 0; i < blp->blp_min_threads; i++) {
3230                 rc = ldlm_bl_thread_start(blp, false);
3231                 if (rc < 0)
3232                         GOTO(out, rc);
3233         }
3234
3235 #ifdef HAVE_SERVER_SUPPORT
3236         task = kthread_run(expired_lock_main, NULL, "ldlm_elt");
3237         if (IS_ERR(task)) {
3238                 rc = PTR_ERR(task);
3239                 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
3240                 GOTO(out, rc);
3241         }
3242
3243         wait_event(expired_lock_wait_queue,
3244                    expired_lock_thread_state == ELT_READY);
3245 #endif /* HAVE_SERVER_SUPPORT */
3246
3247         rc = ldlm_pools_init();
3248         if (rc) {
3249                 CERROR("Failed to initialize LDLM pools: %d\n", rc);
3250                 GOTO(out, rc);
3251         }
3252
3253         rc = ldlm_reclaim_setup();
3254         if (rc) {
3255                 CERROR("Failed to setup reclaim thread: rc = %d\n", rc);
3256                 GOTO(out, rc);
3257         }
3258         RETURN(0);
3259
3260  out:
3261         ldlm_cleanup();
3262         RETURN(rc);
3263 }
3264
3265 static int ldlm_cleanup(void)
3266 {
3267         ENTRY;
3268
3269         if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
3270             !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
3271                 CERROR("ldlm still has namespaces; clean these up first.\n");
3272                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
3273                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
3274                 RETURN(-EBUSY);
3275         }
3276
3277         ldlm_reclaim_cleanup();
3278         ldlm_pools_fini();
3279
3280         if (ldlm_state->ldlm_bl_pool != NULL) {
3281                 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
3282
3283                 while (atomic_read(&blp->blp_num_threads) > 0) {
3284                         struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
3285
3286                         init_completion(&blp->blp_comp);
3287
3288                         spin_lock(&blp->blp_lock);
3289                         list_add_tail(&blwi.blwi_entry, &blp->blp_list);
3290                         wake_up(&blp->blp_waitq);
3291                         spin_unlock(&blp->blp_lock);
3292
3293                         wait_for_completion(&blp->blp_comp);
3294                 }
3295
3296                 OBD_FREE(blp, sizeof(*blp));
3297         }
3298
3299         if (ldlm_state->ldlm_cb_service != NULL)
3300                 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
3301 #ifdef HAVE_SERVER_SUPPORT
3302         if (ldlm_state->ldlm_cancel_service != NULL)
3303                 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
3304 #endif
3305
3306         if (ldlm_ns_kset)
3307                 kset_unregister(ldlm_ns_kset);
3308         if (ldlm_svc_kset)
3309                 kset_unregister(ldlm_svc_kset);
3310         if (ldlm_kobj) {
3311                 sysfs_remove_group(ldlm_kobj, &ldlm_attr_group);
3312                 kobject_put(ldlm_kobj);
3313         }
3314
3315         ldlm_debugfs_cleanup();
3316
3317 #ifdef HAVE_SERVER_SUPPORT
3318         if (expired_lock_thread_state != ELT_STOPPED) {
3319                 expired_lock_thread_state = ELT_TERMINATE;
3320                 wake_up(&expired_lock_wait_queue);
3321                 wait_event(expired_lock_wait_queue,
3322                            expired_lock_thread_state == ELT_STOPPED);
3323         }
3324 #endif
3325
3326         OBD_FREE(ldlm_state, sizeof(*ldlm_state));
3327         ldlm_state = NULL;
3328
3329         RETURN(0);
3330 }
3331
3332 int ldlm_init(void)
3333 {
3334         ldlm_resource_slab = kmem_cache_create("ldlm_resources",
3335                                                sizeof(struct ldlm_resource), 0,
3336                                                SLAB_HWCACHE_ALIGN, NULL);
3337         if (ldlm_resource_slab == NULL)
3338                 return -ENOMEM;
3339
3340         ldlm_lock_slab = kmem_cache_create("ldlm_locks",
3341                               sizeof(struct ldlm_lock), 0,
3342                               SLAB_HWCACHE_ALIGN, NULL);
3343         if (ldlm_lock_slab == NULL)
3344                 goto out_resource;
3345
3346         ldlm_interval_slab = kmem_cache_create("interval_node",
3347                                         sizeof(struct ldlm_interval),
3348                                         0, SLAB_HWCACHE_ALIGN, NULL);
3349         if (ldlm_interval_slab == NULL)
3350                 goto out_lock;
3351
3352         ldlm_interval_tree_slab = kmem_cache_create("interval_tree",
3353                         sizeof(struct ldlm_interval_tree) * LCK_MODE_NUM,
3354                         0, SLAB_HWCACHE_ALIGN, NULL);
3355         if (ldlm_interval_tree_slab == NULL)
3356                 goto out_interval;
3357
3358 #ifdef HAVE_SERVER_SUPPORT
3359         ldlm_inodebits_slab = kmem_cache_create("ldlm_ibits_node",
3360                                                 sizeof(struct ldlm_ibits_node),
3361                                                 0, SLAB_HWCACHE_ALIGN, NULL);
3362         if (ldlm_inodebits_slab == NULL)
3363                 goto out_interval_tree;
3364
3365         ldlm_glimpse_work_kmem = kmem_cache_create("ldlm_glimpse_work_kmem",
3366                                         sizeof(struct ldlm_glimpse_work),
3367                                         0, 0, NULL);
3368         if (ldlm_glimpse_work_kmem == NULL)
3369                 goto out_inodebits;
3370 #endif
3371
3372 #if LUSTRE_TRACKS_LOCK_EXP_REFS
3373         class_export_dump_hook = ldlm_dump_export_locks;
3374 #endif
3375         return 0;
3376 #ifdef HAVE_SERVER_SUPPORT
3377 out_inodebits:
3378         kmem_cache_destroy(ldlm_inodebits_slab);
3379 out_interval_tree:
3380         kmem_cache_destroy(ldlm_interval_tree_slab);
3381 #endif
3382 out_interval:
3383         kmem_cache_destroy(ldlm_interval_slab);
3384 out_lock:
3385         kmem_cache_destroy(ldlm_lock_slab);
3386 out_resource:
3387         kmem_cache_destroy(ldlm_resource_slab);
3388
3389         return -ENOMEM;
3390 }
3391
3392 void ldlm_exit(void)
3393 {
3394         if (ldlm_refcount)
3395                 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
3396         kmem_cache_destroy(ldlm_resource_slab);
3397         /*
3398          * ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
3399          * rcu_barrier() to wait all outstanding RCU callbacks to complete,
3400          * so that ldlm_lock_free() get a chance to be called.
3401          */
3402         rcu_barrier();
3403         kmem_cache_destroy(ldlm_lock_slab);
3404         kmem_cache_destroy(ldlm_interval_slab);
3405         kmem_cache_destroy(ldlm_interval_tree_slab);
3406 #ifdef HAVE_SERVER_SUPPORT
3407         kmem_cache_destroy(ldlm_inodebits_slab);
3408         kmem_cache_destroy(ldlm_glimpse_work_kmem);
3409 #endif
3410 }