Whamcloud - gitweb
LU-1842 quota: add acq/rel logic on QMT
[fs/lustre-release.git] / lustre / ldlm / ldlm_lockd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_lockd.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 #else
47 # include <liblustre.h>
48 #endif
49
50 #include <lustre_dlm.h>
51 #include <obd_class.h>
52 #include <libcfs/list.h>
53 #include "ldlm_internal.h"
54
55 static int ldlm_num_threads;
56 CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
57                 "number of DLM service threads to start");
58
59 static char *ldlm_cpts;
60 CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444,
61                 "CPU partitions ldlm threads should run on");
62
63 extern cfs_mem_cache_t *ldlm_resource_slab;
64 extern cfs_mem_cache_t *ldlm_lock_slab;
65 static cfs_mutex_t      ldlm_ref_mutex;
66 static int ldlm_refcount;
67
68 struct ldlm_cb_async_args {
69         struct ldlm_cb_set_arg *ca_set_arg;
70         struct ldlm_lock       *ca_lock;
71 };
72
73 /* LDLM state */
74
75 static struct ldlm_state *ldlm_state;
76
77 inline cfs_time_t round_timeout(cfs_time_t timeout)
78 {
79         return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
80 }
81
82 /* timeout for initial callback (AST) reply (bz10399) */
83 static inline unsigned int ldlm_get_rq_timeout(void)
84 {
85         /* Non-AT value */
86         unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
87
88         return timeout < 1 ? 1 : timeout;
89 }
90
91 #define ELT_STOPPED   0
92 #define ELT_READY     1
93 #define ELT_TERMINATE 2
94
95 struct ldlm_bl_pool {
96         cfs_spinlock_t          blp_lock;
97
98         /*
99          * blp_prio_list is used for callbacks that should be handled
100          * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
101          * see bug 13843
102          */
103         cfs_list_t              blp_prio_list;
104
105         /*
106          * blp_list is used for all other callbacks which are likely
107          * to take longer to process.
108          */
109         cfs_list_t              blp_list;
110
111         cfs_waitq_t             blp_waitq;
112         cfs_completion_t        blp_comp;
113         cfs_atomic_t            blp_num_threads;
114         cfs_atomic_t            blp_busy_threads;
115         int                     blp_min_threads;
116         int                     blp_max_threads;
117 };
118
119 struct ldlm_bl_work_item {
120         cfs_list_t              blwi_entry;
121         struct ldlm_namespace  *blwi_ns;
122         struct ldlm_lock_desc   blwi_ld;
123         struct ldlm_lock       *blwi_lock;
124         cfs_list_t              blwi_head;
125         int                     blwi_count;
126         cfs_completion_t        blwi_comp;
127         int                     blwi_mode;
128         int                     blwi_mem_pressure;
129 };
130
131 #if defined(HAVE_SERVER_SUPPORT) && defined(__KERNEL__)
132
133 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
134 static cfs_spinlock_t waiting_locks_spinlock;   /* BH lock (timer) */
135 static cfs_list_t waiting_locks_list;
136 static cfs_timer_t waiting_locks_timer;
137
138 static struct expired_lock_thread {
139         cfs_waitq_t             elt_waitq;
140         int                     elt_state;
141         int                     elt_dump;
142         cfs_list_t              elt_expired_locks;
143 } expired_lock_thread;
144
145 static inline int have_expired_locks(void)
146 {
147         int need_to_run;
148
149         ENTRY;
150         cfs_spin_lock_bh(&waiting_locks_spinlock);
151         need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
152         cfs_spin_unlock_bh(&waiting_locks_spinlock);
153
154         RETURN(need_to_run);
155 }
156
157 static int expired_lock_main(void *arg)
158 {
159         cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
160         struct l_wait_info lwi = { 0 };
161         int do_dump;
162
163         ENTRY;
164         cfs_daemonize("ldlm_elt");
165
166         expired_lock_thread.elt_state = ELT_READY;
167         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
168
169         while (1) {
170                 l_wait_event(expired_lock_thread.elt_waitq,
171                              have_expired_locks() ||
172                              expired_lock_thread.elt_state == ELT_TERMINATE,
173                              &lwi);
174
175                 cfs_spin_lock_bh(&waiting_locks_spinlock);
176                 if (expired_lock_thread.elt_dump) {
177                         struct libcfs_debug_msg_data msgdata = {
178                                 .msg_file = __FILE__,
179                                 .msg_fn = "waiting_locks_callback",
180                                 .msg_line = expired_lock_thread.elt_dump };
181                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
182
183                         /* from waiting_locks_callback, but not in timer */
184                         libcfs_debug_dumplog();
185                         libcfs_run_lbug_upcall(&msgdata);
186
187                         cfs_spin_lock_bh(&waiting_locks_spinlock);
188                         expired_lock_thread.elt_dump = 0;
189                 }
190
191                 do_dump = 0;
192
193                 while (!cfs_list_empty(expired)) {
194                         struct obd_export *export;
195                         struct ldlm_lock *lock;
196
197                         lock = cfs_list_entry(expired->next, struct ldlm_lock,
198                                           l_pending_chain);
199                         if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
200                             (void *)lock >= LP_POISON) {
201                                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
202                                 CERROR("free lock on elt list %p\n", lock);
203                                 LBUG();
204                         }
205                         cfs_list_del_init(&lock->l_pending_chain);
206                         if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
207                             (void *)lock->l_export >= LP_POISON) {
208                                 CERROR("lock with free export on elt list %p\n",
209                                        lock->l_export);
210                                 lock->l_export = NULL;
211                                 LDLM_ERROR(lock, "free export");
212                                 /* release extra ref grabbed by
213                                  * ldlm_add_waiting_lock() or
214                                  * ldlm_failed_ast() */
215                                 LDLM_LOCK_RELEASE(lock);
216                                 continue;
217                         }
218
219                         if (lock->l_destroyed) {
220                                 /* release the lock refcount where
221                                  * waiting_locks_callback() founds */
222                                 LDLM_LOCK_RELEASE(lock);
223                                 continue;
224                         }
225                         export = class_export_lock_get(lock->l_export, lock);
226                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
227
228                         do_dump++;
229                         class_fail_export(export);
230                         class_export_lock_put(export, lock);
231
232                         /* release extra ref grabbed by ldlm_add_waiting_lock()
233                          * or ldlm_failed_ast() */
234                         LDLM_LOCK_RELEASE(lock);
235
236                         cfs_spin_lock_bh(&waiting_locks_spinlock);
237                 }
238                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
239
240                 if (do_dump && obd_dump_on_eviction) {
241                         CERROR("dump the log upon eviction\n");
242                         libcfs_debug_dumplog();
243                 }
244
245                 if (expired_lock_thread.elt_state == ELT_TERMINATE)
246                         break;
247         }
248
249         expired_lock_thread.elt_state = ELT_STOPPED;
250         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
251         RETURN(0);
252 }
253
254 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
255 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds);
256
257 /**
258  * Check if there is a request in the export request list
259  * which prevents the lock canceling.
260  */
261 static int ldlm_lock_busy(struct ldlm_lock *lock)
262 {
263         struct ptlrpc_request *req;
264         int match = 0;
265         ENTRY;
266
267         if (lock->l_export == NULL)
268                 return 0;
269
270         cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
271         cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
272                                 rq_exp_list) {
273                 if (req->rq_ops->hpreq_lock_match) {
274                         match = req->rq_ops->hpreq_lock_match(req, lock);
275                         if (match)
276                                 break;
277                 }
278         }
279         cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
280         RETURN(match);
281 }
282
283 /* This is called from within a timer interrupt and cannot schedule */
284 static void waiting_locks_callback(unsigned long unused)
285 {
286         struct ldlm_lock        *lock;
287         int                     need_dump = 0;
288
289         cfs_spin_lock_bh(&waiting_locks_spinlock);
290         while (!cfs_list_empty(&waiting_locks_list)) {
291                 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
292                                       l_pending_chain);
293                 if (cfs_time_after(lock->l_callback_timeout,
294                                    cfs_time_current()) ||
295                     (lock->l_req_mode == LCK_GROUP))
296                         break;
297
298                 if (ptlrpc_check_suspend()) {
299                         /* there is a case when we talk to one mds, holding
300                          * lock from another mds. this way we easily can get
301                          * here, if second mds is being recovered. so, we
302                          * suspend timeouts. bug 6019 */
303
304                         LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
305                                    lock->l_export->exp_client_uuid.uuid,
306                                    lock->l_export->exp_connection->c_remote_uuid.uuid,
307                                    libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
308
309                         cfs_list_del_init(&lock->l_pending_chain);
310                         if (lock->l_destroyed) {
311                                 /* relay the lock refcount decrease to
312                                  * expired lock thread */
313                                 cfs_list_add(&lock->l_pending_chain,
314                                         &expired_lock_thread.elt_expired_locks);
315                         } else {
316                                 __ldlm_add_waiting_lock(lock,
317                                                 ldlm_get_enq_timeout(lock));
318                         }
319                         continue;
320                 }
321
322                 /* if timeout overlaps the activation time of suspended timeouts
323                  * then extend it to give a chance for client to reconnect */
324                 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
325                                                  cfs_time_seconds(obd_timeout)/2),
326                                     ptlrpc_suspend_wakeup_time())) {
327                         LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
328                                    lock->l_export->exp_client_uuid.uuid,
329                                    lock->l_export->exp_connection->c_remote_uuid.uuid,
330                                    libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
331
332                         cfs_list_del_init(&lock->l_pending_chain);
333                         if (lock->l_destroyed) {
334                                 /* relay the lock refcount decrease to
335                                  * expired lock thread */
336                                 cfs_list_add(&lock->l_pending_chain,
337                                         &expired_lock_thread.elt_expired_locks);
338                         } else {
339                                 __ldlm_add_waiting_lock(lock,
340                                                 ldlm_get_enq_timeout(lock));
341                         }
342                         continue;
343                 }
344
345                 /* Check if we need to prolong timeout */
346                 if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
347                     ldlm_lock_busy(lock)) {
348                         int cont = 1;
349
350                         if (lock->l_pending_chain.next == &waiting_locks_list)
351                                 cont = 0;
352
353                         LDLM_LOCK_GET(lock);
354
355                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
356                         LDLM_DEBUG(lock, "prolong the busy lock");
357                         ldlm_refresh_waiting_lock(lock,
358                                                   ldlm_get_enq_timeout(lock));
359                         cfs_spin_lock_bh(&waiting_locks_spinlock);
360
361                         if (!cont) {
362                                 LDLM_LOCK_RELEASE(lock);
363                                 break;
364                         }
365
366                         LDLM_LOCK_RELEASE(lock);
367                         continue;
368                 }
369                 ldlm_lock_to_ns(lock)->ns_timeouts++;
370                 LDLM_ERROR(lock, "lock callback timer expired after %lds: "
371                            "evicting client at %s ",
372                            cfs_time_current_sec()- lock->l_last_activity,
373                            libcfs_nid2str(
374                                    lock->l_export->exp_connection->c_peer.nid));
375
376                 /* no needs to take an extra ref on the lock since it was in
377                  * the waiting_locks_list and ldlm_add_waiting_lock()
378                  * already grabbed a ref */
379                 cfs_list_del(&lock->l_pending_chain);
380                 cfs_list_add(&lock->l_pending_chain,
381                              &expired_lock_thread.elt_expired_locks);
382                 need_dump = 1;
383         }
384
385         if (!cfs_list_empty(&expired_lock_thread.elt_expired_locks)) {
386                 if (obd_dump_on_timeout && need_dump)
387                         expired_lock_thread.elt_dump = __LINE__;
388
389                 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
390         }
391
392         /*
393          * Make sure the timer will fire again if we have any locks
394          * left.
395          */
396         if (!cfs_list_empty(&waiting_locks_list)) {
397                 cfs_time_t timeout_rounded;
398                 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
399                                       l_pending_chain);
400                 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
401                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
402         }
403         cfs_spin_unlock_bh(&waiting_locks_spinlock);
404 }
405
406 /*
407  * Indicate that we're waiting for a client to call us back cancelling a given
408  * lock.  We add it to the pending-callback chain, and schedule the lock-timeout
409  * timer to fire appropriately.  (We round up to the next second, to avoid
410  * floods of timer firings during periods of high lock contention and traffic).
411  * As done by ldlm_add_waiting_lock(), the caller must grab a lock reference
412  * if it has been added to the waiting list (1 is returned).
413  *
414  * Called with the namespace lock held.
415  */
416 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
417 {
418         cfs_time_t timeout;
419         cfs_time_t timeout_rounded;
420
421         if (!cfs_list_empty(&lock->l_pending_chain))
422                 return 0;
423
424         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
425             OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
426                 seconds = 1;
427
428         timeout = cfs_time_shift(seconds);
429         if (likely(cfs_time_after(timeout, lock->l_callback_timeout)))
430                 lock->l_callback_timeout = timeout;
431
432         timeout_rounded = round_timeout(lock->l_callback_timeout);
433
434         if (cfs_time_before(timeout_rounded,
435                             cfs_timer_deadline(&waiting_locks_timer)) ||
436             !cfs_timer_is_armed(&waiting_locks_timer)) {
437                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
438         }
439         /* if the new lock has a shorter timeout than something earlier on
440            the list, we'll wait the longer amount of time; no big deal. */
441         /* FIFO */
442         cfs_list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
443         return 1;
444 }
445
446 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
447 {
448         int ret;
449         int timeout = ldlm_get_enq_timeout(lock);
450
451         /* NB: must be called with hold of lock_res_and_lock() */
452         LASSERT(lock->l_res_locked);
453         lock->l_waited = 1;
454
455         LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
456
457         cfs_spin_lock_bh(&waiting_locks_spinlock);
458         if (lock->l_destroyed) {
459                 static cfs_time_t next;
460                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
461                 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
462                 if (cfs_time_after(cfs_time_current(), next)) {
463                         next = cfs_time_shift(14400);
464                         libcfs_debug_dumpstack(NULL);
465                 }
466                 return 0;
467         }
468
469         ret = __ldlm_add_waiting_lock(lock, timeout);
470         if (ret) {
471                 /* grab ref on the lock if it has been added to the
472                  * waiting list */
473                 LDLM_LOCK_GET(lock);
474         }
475         cfs_spin_unlock_bh(&waiting_locks_spinlock);
476
477         if (ret) {
478                 cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
479                 if (cfs_list_empty(&lock->l_exp_list))
480                         cfs_list_add(&lock->l_exp_list,
481                                      &lock->l_export->exp_bl_list);
482                 cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
483         }
484
485         LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
486                    ret == 0 ? "not re-" : "", timeout,
487                    AT_OFF ? "off" : "on");
488         return ret;
489 }
490
491 /*
492  * Remove a lock from the pending list, likely because it had its cancellation
493  * callback arrive without incident.  This adjusts the lock-timeout timer if
494  * needed.  Returns 0 if the lock wasn't pending after all, 1 if it was.
495  * As done by ldlm_del_waiting_lock(), the caller must release the lock
496  * reference when the lock is removed from any list (1 is returned).
497  *
498  * Called with namespace lock held.
499  */
500 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
501 {
502         cfs_list_t *list_next;
503
504         if (cfs_list_empty(&lock->l_pending_chain))
505                 return 0;
506
507         list_next = lock->l_pending_chain.next;
508         if (lock->l_pending_chain.prev == &waiting_locks_list) {
509                 /* Removing the head of the list, adjust timer. */
510                 if (list_next == &waiting_locks_list) {
511                         /* No more, just cancel. */
512                         cfs_timer_disarm(&waiting_locks_timer);
513                 } else {
514                         struct ldlm_lock *next;
515                         next = cfs_list_entry(list_next, struct ldlm_lock,
516                                               l_pending_chain);
517                         cfs_timer_arm(&waiting_locks_timer,
518                                       round_timeout(next->l_callback_timeout));
519                 }
520         }
521         cfs_list_del_init(&lock->l_pending_chain);
522
523         return 1;
524 }
525
526 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
527 {
528         int ret;
529
530         if (lock->l_export == NULL) {
531                 /* We don't have a "waiting locks list" on clients. */
532                 CDEBUG(D_DLMTRACE, "Client lock %p : no-op\n", lock);
533                 return 0;
534         }
535
536         cfs_spin_lock_bh(&waiting_locks_spinlock);
537         ret = __ldlm_del_waiting_lock(lock);
538         cfs_spin_unlock_bh(&waiting_locks_spinlock);
539
540         /* remove the lock out of export blocking list */
541         cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
542         cfs_list_del_init(&lock->l_exp_list);
543         cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
544
545         if (ret) {
546                 /* release lock ref if it has indeed been removed
547                  * from a list */
548                 LDLM_LOCK_RELEASE(lock);
549         }
550
551         LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
552         return ret;
553 }
554
555 /*
556  * Prolong the lock
557  *
558  * Called with namespace lock held.
559  */
560 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
561 {
562         if (lock->l_export == NULL) {
563                 /* We don't have a "waiting locks list" on clients. */
564                 LDLM_DEBUG(lock, "client lock: no-op");
565                 return 0;
566         }
567
568         cfs_spin_lock_bh(&waiting_locks_spinlock);
569
570         if (cfs_list_empty(&lock->l_pending_chain)) {
571                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
572                 LDLM_DEBUG(lock, "wasn't waiting");
573                 return 0;
574         }
575
576         /* we remove/add the lock to the waiting list, so no needs to
577          * release/take a lock reference */
578         __ldlm_del_waiting_lock(lock);
579         __ldlm_add_waiting_lock(lock, timeout);
580         cfs_spin_unlock_bh(&waiting_locks_spinlock);
581
582         LDLM_DEBUG(lock, "refreshed");
583         return 1;
584 }
585
586 #else /* !HAVE_SERVER_SUPPORT ||  !__KERNEL__ */
587
588 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
589 {
590         RETURN(0);
591 }
592
593 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
594 {
595         RETURN(0);
596 }
597
598 # ifdef HAVE_SERVER_SUPPORT
599 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
600 {
601         LASSERT(lock->l_res_locked);
602         LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
603         RETURN(1);
604 }
605
606 # endif
607 #endif /* HAVE_SERVER_SUPPORT && __KERNEL__ */
608
609 #ifdef HAVE_SERVER_SUPPORT
610
611 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
612                             const char *ast_type)
613 {
614         LCONSOLE_ERROR_MSG(0x138, "%s: A client on nid %s was evicted due "
615                            "to a lock %s callback time out: rc %d\n",
616                            lock->l_export->exp_obd->obd_name,
617                            obd_export_nid2str(lock->l_export), ast_type, rc);
618
619         if (obd_dump_on_timeout)
620                 libcfs_debug_dumplog();
621 #ifdef __KERNEL__
622         cfs_spin_lock_bh(&waiting_locks_spinlock);
623         if (__ldlm_del_waiting_lock(lock) == 0)
624                 /* the lock was not in any list, grab an extra ref before adding
625                  * the lock to the expired list */
626                 LDLM_LOCK_GET(lock);
627         cfs_list_add(&lock->l_pending_chain,
628                      &expired_lock_thread.elt_expired_locks);
629         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
630         cfs_spin_unlock_bh(&waiting_locks_spinlock);
631 #else
632         class_fail_export(lock->l_export);
633 #endif
634 }
635
636 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
637                                  struct ptlrpc_request *req, int rc,
638                                  const char *ast_type)
639 {
640         lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
641
642         if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
643                 LASSERT(lock->l_export);
644                 if (lock->l_export->exp_libclient) {
645                         LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
646                                    " timeout, just cancelling lock", ast_type,
647                                    libcfs_nid2str(peer.nid));
648                         ldlm_lock_cancel(lock);
649                         rc = -ERESTART;
650                 } else if (lock->l_flags & LDLM_FL_CANCEL) {
651                         LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
652                                    "cancel was received (AST reply lost?)",
653                                    ast_type, libcfs_nid2str(peer.nid));
654                         ldlm_lock_cancel(lock);
655                         rc = -ERESTART;
656                 } else {
657                         ldlm_del_waiting_lock(lock);
658                         ldlm_failed_ast(lock, rc, ast_type);
659                 }
660         } else if (rc) {
661                 if (rc == -EINVAL) {
662                         struct ldlm_resource *res = lock->l_resource;
663                         LDLM_DEBUG(lock, "client (nid %s) returned %d"
664                                " from %s AST - normal race",
665                                libcfs_nid2str(peer.nid),
666                                req->rq_repmsg ?
667                                lustre_msg_get_status(req->rq_repmsg) : -1,
668                                ast_type);
669                         if (res) {
670                                 /* update lvbo to return proper attributes.
671                                  * see bug 23174 */
672                                 ldlm_resource_getref(res);
673                                 ldlm_res_lvbo_update(res, NULL, 1);
674                                 ldlm_resource_putref(res);
675                         }
676
677                 } else {
678                         LDLM_ERROR(lock, "client (nid %s) returned %d "
679                                    "from %s AST", libcfs_nid2str(peer.nid),
680                                    (req->rq_repmsg != NULL) ?
681                                    lustre_msg_get_status(req->rq_repmsg) : 0,
682                                    ast_type);
683                 }
684                 ldlm_lock_cancel(lock);
685                 /* Server-side AST functions are called from ldlm_reprocess_all,
686                  * which needs to be told to please restart its reprocessing. */
687                 rc = -ERESTART;
688         }
689
690         return rc;
691 }
692
693 static int ldlm_cb_interpret(const struct lu_env *env,
694                              struct ptlrpc_request *req, void *data, int rc)
695 {
696         struct ldlm_cb_async_args *ca   = data;
697         struct ldlm_lock          *lock = ca->ca_lock;
698         struct ldlm_cb_set_arg    *arg  = ca->ca_set_arg;
699         ENTRY;
700
701         LASSERT(lock != NULL);
702
703         switch (arg->type) {
704         case LDLM_GL_CALLBACK:
705                 /* Update the LVB from disk if the AST failed
706                  * (this is a legal race)
707                  *
708                  * - Glimpse callback of local lock just returns
709                  *   -ELDLM_NO_LOCK_DATA.
710                  * - Glimpse callback of remote lock might return
711                  *   -ELDLM_NO_LOCK_DATA when inode is cleared. LU-274
712                  */
713                 if (rc == -ELDLM_NO_LOCK_DATA) {
714                         LDLM_DEBUG(lock, "lost race - client has a lock but no "
715                                    "inode");
716                         ldlm_res_lvbo_update(lock->l_resource, NULL, 1);
717                 } else if (rc != 0) {
718                         rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
719                 } else {
720                         rc = ldlm_res_lvbo_update(lock->l_resource, req, 1);
721                 }
722                 break;
723         case LDLM_BL_CALLBACK:
724                 if (rc != 0)
725                         rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
726                 break;
727         case LDLM_CP_CALLBACK:
728                 if (rc != 0)
729                         rc = ldlm_handle_ast_error(lock, req, rc, "completion");
730                 break;
731         default:
732                 LDLM_ERROR(lock, "invalid opcode for lock callback %d",
733                            arg->type);
734                 LBUG();
735         }
736
737         /* release extra reference taken in ldlm_ast_fini() */
738         LDLM_LOCK_RELEASE(lock);
739
740         if (rc == -ERESTART)
741                 cfs_atomic_inc(&arg->restart);
742
743         RETURN(0);
744 }
745
746 static inline int ldlm_ast_fini(struct ptlrpc_request *req,
747                                 struct ldlm_cb_set_arg *arg,
748                                 struct ldlm_lock *lock,
749                                 int instant_cancel)
750 {
751         int rc = 0;
752         ENTRY;
753
754         if (unlikely(instant_cancel)) {
755                 rc = ptl_send_rpc(req, 1);
756                 ptlrpc_req_finished(req);
757                 if (rc == 0)
758                         cfs_atomic_inc(&arg->restart);
759         } else {
760                 LDLM_LOCK_GET(lock);
761                 ptlrpc_set_add_req(arg->set, req);
762         }
763
764         RETURN(rc);
765 }
766
767 /**
768  * Check if there are requests in the export request list which prevent
769  * the lock canceling and make these requests high priority ones.
770  */
771 static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
772 {
773         struct ptlrpc_request *req;
774         ENTRY;
775
776         if (lock->l_export == NULL) {
777                 LDLM_DEBUG(lock, "client lock: no-op");
778                 RETURN_EXIT;
779         }
780
781         cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
782         cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
783                                 rq_exp_list) {
784                 /* Do not process requests that were not yet added to there
785                  * incoming queue or were already removed from there for
786                  * processing */
787                 if (!req->rq_hp && !cfs_list_empty(&req->rq_list) &&
788                     req->rq_ops->hpreq_lock_match &&
789                     req->rq_ops->hpreq_lock_match(req, lock))
790                         ptlrpc_hpreq_reorder(req);
791         }
792         cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
793         EXIT;
794 }
795
796 /*
797  * ->l_blocking_ast() method for server-side locks. This is invoked when newly
798  * enqueued server lock conflicts with given one.
799  *
800  * Sends blocking ast rpc to the client owning that lock; arms timeout timer
801  * to wait for client response.
802  */
803 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
804                              struct ldlm_lock_desc *desc,
805                              void *data, int flag)
806 {
807         struct ldlm_cb_async_args *ca;
808         struct ldlm_cb_set_arg *arg = data;
809         struct ldlm_request    *body;
810         struct ptlrpc_request  *req;
811         int                     instant_cancel = 0;
812         int                     rc = 0;
813         ENTRY;
814
815         if (flag == LDLM_CB_CANCELING)
816                 /* Don't need to do anything here. */
817                 RETURN(0);
818
819         LASSERT(lock);
820         LASSERT(data != NULL);
821         if (lock->l_export->exp_obd->obd_recovering != 0)
822                 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
823
824         ldlm_lock_reorder_req(lock);
825
826         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
827                                         &RQF_LDLM_BL_CALLBACK,
828                                         LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
829         if (req == NULL)
830                 RETURN(-ENOMEM);
831
832         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
833         ca = ptlrpc_req_async_args(req);
834         ca->ca_set_arg = arg;
835         ca->ca_lock = lock;
836
837         req->rq_interpret_reply = ldlm_cb_interpret;
838         req->rq_no_resend = 1;
839
840         lock_res_and_lock(lock);
841         if (lock->l_granted_mode != lock->l_req_mode) {
842                 /* this blocking AST will be communicated as part of the
843                  * completion AST instead */
844                 unlock_res_and_lock(lock);
845
846                 ptlrpc_req_finished(req);
847                 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
848                 RETURN(0);
849         }
850
851         if (lock->l_destroyed) {
852                 /* What's the point? */
853                 unlock_res_and_lock(lock);
854                 ptlrpc_req_finished(req);
855                 RETURN(0);
856         }
857
858         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
859                 instant_cancel = 1;
860
861         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
862         body->lock_handle[0] = lock->l_remote_handle;
863         body->lock_desc = *desc;
864         body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
865
866         LDLM_DEBUG(lock, "server preparing blocking AST");
867
868         ptlrpc_request_set_replen(req);
869         if (instant_cancel) {
870                 unlock_res_and_lock(lock);
871                 ldlm_lock_cancel(lock);
872         } else {
873                 LASSERT(lock->l_granted_mode == lock->l_req_mode);
874                 ldlm_add_waiting_lock(lock);
875                 unlock_res_and_lock(lock);
876         }
877
878         req->rq_send_state = LUSTRE_IMP_FULL;
879         /* ptlrpc_request_alloc_pack already set timeout */
880         if (AT_OFF)
881                 req->rq_timeout = ldlm_get_rq_timeout();
882
883         if (lock->l_export && lock->l_export->exp_nid_stats &&
884             lock->l_export->exp_nid_stats->nid_ldlm_stats)
885                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
886                                      LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
887
888         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
889
890         RETURN(rc);
891 }
892
893 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
894 {
895         struct ldlm_cb_set_arg *arg = data;
896         struct ldlm_request    *body;
897         struct ptlrpc_request  *req;
898         struct ldlm_cb_async_args *ca;
899         long                    total_enqueue_wait;
900         int                     instant_cancel = 0;
901         int                     rc = 0;
902         int                     lvb_len;
903         ENTRY;
904
905         LASSERT(lock != NULL);
906         LASSERT(data != NULL);
907
908         total_enqueue_wait = cfs_time_sub(cfs_time_current_sec(),
909                                           lock->l_last_activity);
910
911         req = ptlrpc_request_alloc(lock->l_export->exp_imp_reverse,
912                                     &RQF_LDLM_CP_CALLBACK);
913         if (req == NULL)
914                 RETURN(-ENOMEM);
915
916         /* server namespace, doesn't need lock */
917         lvb_len = ldlm_lvbo_size(lock);
918         if (lvb_len > 0)
919                  req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT,
920                                       lvb_len);
921
922         rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
923         if (rc) {
924                 ptlrpc_request_free(req);
925                 RETURN(rc);
926         }
927
928         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
929         ca = ptlrpc_req_async_args(req);
930         ca->ca_set_arg = arg;
931         ca->ca_lock = lock;
932
933         req->rq_interpret_reply = ldlm_cb_interpret;
934         req->rq_no_resend = 1;
935         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
936
937         body->lock_handle[0] = lock->l_remote_handle;
938         body->lock_flags = flags;
939         ldlm_lock2desc(lock, &body->lock_desc);
940         if (lvb_len > 0) {
941                 void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
942
943                 lvb_len = ldlm_lvbo_fill(lock, lvb, lvb_len);
944                 req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB,
945                                    lvb_len, RCL_CLIENT);
946         }
947
948         LDLM_DEBUG(lock, "server preparing completion AST (after %lds wait)",
949                    total_enqueue_wait);
950
951         /* Server-side enqueue wait time estimate, used in
952             __ldlm_add_waiting_lock to set future enqueue timers */
953         if (total_enqueue_wait < ldlm_get_enq_timeout(lock))
954                 at_measured(ldlm_lock_to_ns_at(lock),
955                             total_enqueue_wait);
956         else
957                 /* bz18618. Don't add lock enqueue time we spend waiting for a
958                    previous callback to fail. Locks waiting legitimately will
959                    get extended by ldlm_refresh_waiting_lock regardless of the
960                    estimate, so it's okay to underestimate here. */
961                 LDLM_DEBUG(lock, "lock completed after %lus; estimate was %ds. "
962                        "It is likely that a previous callback timed out.",
963                        total_enqueue_wait,
964                        at_get(ldlm_lock_to_ns_at(lock)));
965
966         ptlrpc_request_set_replen(req);
967
968         req->rq_send_state = LUSTRE_IMP_FULL;
969         /* ptlrpc_request_pack already set timeout */
970         if (AT_OFF)
971                 req->rq_timeout = ldlm_get_rq_timeout();
972
973         /* We only send real blocking ASTs after the lock is granted */
974         lock_res_and_lock(lock);
975         if (lock->l_flags & LDLM_FL_AST_SENT) {
976                 body->lock_flags |= LDLM_FL_AST_SENT;
977                 /* copy ast flags like LDLM_FL_DISCARD_DATA */
978                 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
979
980                 /* We might get here prior to ldlm_handle_enqueue setting
981                  * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
982                  * into waiting list, but this is safe and similar code in
983                  * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
984                  * that would not only cancel the lock, but will also remove
985                  * it from waiting list */
986                 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
987                         unlock_res_and_lock(lock);
988                         ldlm_lock_cancel(lock);
989                         instant_cancel = 1;
990                         lock_res_and_lock(lock);
991                 } else {
992                         /* start the lock-timeout clock */
993                         ldlm_add_waiting_lock(lock);
994                 }
995         }
996         unlock_res_and_lock(lock);
997
998         if (lock->l_export && lock->l_export->exp_nid_stats &&
999             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1000                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1001                                      LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
1002
1003         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
1004
1005         RETURN(rc);
1006 }
1007
1008 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
1009 {
1010         struct ldlm_cb_set_arg          *arg = data;
1011         struct ldlm_request             *body;
1012         struct ptlrpc_request           *req;
1013         struct ldlm_cb_async_args       *ca;
1014         int                              rc;
1015         struct req_format               *req_fmt;
1016         ENTRY;
1017
1018         LASSERT(lock != NULL);
1019
1020         if (arg->gl_desc != NULL)
1021                 /* There is a glimpse descriptor to pack */
1022                 req_fmt = &RQF_LDLM_GL_DESC_CALLBACK;
1023         else
1024                 req_fmt = &RQF_LDLM_GL_CALLBACK;
1025
1026         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
1027                                         req_fmt, LUSTRE_DLM_VERSION,
1028                                         LDLM_GL_CALLBACK);
1029
1030         if (req == NULL)
1031                 RETURN(-ENOMEM);
1032
1033         if (arg->gl_desc != NULL) {
1034                 /* copy the GL descriptor */
1035                 union ldlm_gl_desc      *desc;
1036                 desc = req_capsule_client_get(&req->rq_pill, &RMF_DLM_GL_DESC);
1037                 *desc = *arg->gl_desc;
1038         }
1039
1040         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1041         body->lock_handle[0] = lock->l_remote_handle;
1042         ldlm_lock2desc(lock, &body->lock_desc);
1043
1044         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
1045         ca = ptlrpc_req_async_args(req);
1046         ca->ca_set_arg = arg;
1047         ca->ca_lock = lock;
1048
1049         /* server namespace, doesn't need lock */
1050         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
1051                              ldlm_lvbo_size(lock));
1052         ptlrpc_request_set_replen(req);
1053
1054         req->rq_send_state = LUSTRE_IMP_FULL;
1055         /* ptlrpc_request_alloc_pack already set timeout */
1056         if (AT_OFF)
1057                 req->rq_timeout = ldlm_get_rq_timeout();
1058
1059         req->rq_interpret_reply = ldlm_cb_interpret;
1060
1061         if (lock->l_export && lock->l_export->exp_nid_stats &&
1062             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1063                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1064                                      LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
1065
1066         rc = ldlm_ast_fini(req, arg, lock, 0);
1067
1068         RETURN(rc);
1069 }
1070
1071 int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list)
1072 {
1073         int     rc;
1074         ENTRY;
1075
1076         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), gl_work_list,
1077                                LDLM_WORK_GL_AST);
1078         if (rc == -ERESTART)
1079                 ldlm_reprocess_all(res);
1080
1081         RETURN(rc);
1082 }
1083
1084 /* return ldlm lock associated with a lock callback request */
1085 struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req)
1086 {
1087         struct ldlm_cb_async_args       *ca;
1088         struct ldlm_lock                *lock;
1089         ENTRY;
1090
1091         ca = ptlrpc_req_async_args(req);
1092         lock = ca->ca_lock;
1093         if (lock == NULL)
1094                 RETURN(ERR_PTR(-EFAULT));
1095
1096         RETURN(lock);
1097 }
1098 EXPORT_SYMBOL(ldlm_request_lock);
1099
1100 static void ldlm_svc_get_eopc(const struct ldlm_request *dlm_req,
1101                        struct lprocfs_stats *srv_stats)
1102 {
1103         int lock_type = 0, op = 0;
1104
1105         lock_type = dlm_req->lock_desc.l_resource.lr_type;
1106
1107         switch (lock_type) {
1108         case LDLM_PLAIN:
1109                 op = PTLRPC_LAST_CNTR + LDLM_PLAIN_ENQUEUE;
1110                 break;
1111         case LDLM_EXTENT:
1112                 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT)
1113                         op = PTLRPC_LAST_CNTR + LDLM_GLIMPSE_ENQUEUE;
1114                 else
1115                         op = PTLRPC_LAST_CNTR + LDLM_EXTENT_ENQUEUE;
1116                 break;
1117         case LDLM_FLOCK:
1118                 op = PTLRPC_LAST_CNTR + LDLM_FLOCK_ENQUEUE;
1119                 break;
1120         case LDLM_IBITS:
1121                 op = PTLRPC_LAST_CNTR + LDLM_IBITS_ENQUEUE;
1122                 break;
1123         default:
1124                 op = 0;
1125                 break;
1126         }
1127
1128         if (op)
1129                 lprocfs_counter_incr(srv_stats, op);
1130
1131         return;
1132 }
1133
1134 /*
1135  * Main server-side entry point into LDLM. This is called by ptlrpc service
1136  * threads to carry out client lock enqueueing requests.
1137  */
1138 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
1139                          struct ptlrpc_request *req,
1140                          const struct ldlm_request *dlm_req,
1141                          const struct ldlm_callback_suite *cbs)
1142 {
1143         struct ldlm_reply *dlm_rep;
1144         __u32 flags;
1145         ldlm_error_t err = ELDLM_OK;
1146         struct ldlm_lock *lock = NULL;
1147         void *cookie = NULL;
1148         int rc = 0;
1149         ENTRY;
1150
1151         LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
1152
1153         ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
1154         flags = dlm_req->lock_flags;
1155
1156         LASSERT(req->rq_export);
1157
1158         if (ptlrpc_req2svc(req)->srv_stats != NULL)
1159                 ldlm_svc_get_eopc(dlm_req, ptlrpc_req2svc(req)->srv_stats);
1160
1161         if (req->rq_export && req->rq_export->exp_nid_stats &&
1162             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1163                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1164                                      LDLM_ENQUEUE - LDLM_FIRST_OPC);
1165
1166         if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
1167                      dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
1168                 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
1169                           dlm_req->lock_desc.l_resource.lr_type);
1170                 GOTO(out, rc = -EFAULT);
1171         }
1172
1173         if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
1174                      dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
1175                      dlm_req->lock_desc.l_req_mode &
1176                      (dlm_req->lock_desc.l_req_mode-1))) {
1177                 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
1178                           dlm_req->lock_desc.l_req_mode);
1179                 GOTO(out, rc = -EFAULT);
1180         }
1181
1182         if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
1183                 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1184                              LDLM_PLAIN)) {
1185                         DEBUG_REQ(D_ERROR, req,
1186                                   "PLAIN lock request from IBITS client?");
1187                         GOTO(out, rc = -EPROTO);
1188                 }
1189         } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1190                             LDLM_IBITS)) {
1191                 DEBUG_REQ(D_ERROR, req,
1192                           "IBITS lock request from unaware client?");
1193                 GOTO(out, rc = -EPROTO);
1194         }
1195
1196 #if 0
1197         /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
1198            against server's _CONNECT_SUPPORTED flags? (I don't want to use
1199            ibits for mgc/mgs) */
1200
1201         /* INODEBITS_INTEROP: Perform conversion from plain lock to
1202          * inodebits lock if client does not support them. */
1203         if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
1204             (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
1205                 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
1206                 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
1207                         MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
1208                 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
1209                         dlm_req->lock_desc.l_req_mode = LCK_CR;
1210         }
1211 #endif
1212
1213         if (unlikely(flags & LDLM_FL_REPLAY)) {
1214                 /* Find an existing lock in the per-export lock hash */
1215                 /* In the function below, .hs_keycmp resolves to
1216                  * ldlm_export_lock_keycmp() */
1217                 /* coverity[overrun-buffer-val] */
1218                 lock = cfs_hash_lookup(req->rq_export->exp_lock_hash,
1219                                        (void *)&dlm_req->lock_handle[0]);
1220                 if (lock != NULL) {
1221                         DEBUG_REQ(D_DLMTRACE, req, "found existing lock cookie "
1222                                   LPX64, lock->l_handle.h_cookie);
1223                         GOTO(existing_lock, rc = 0);
1224                 }
1225         }
1226
1227         /* The lock's callback data might be set in the policy function */
1228         lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
1229                                 dlm_req->lock_desc.l_resource.lr_type,
1230                                 dlm_req->lock_desc.l_req_mode,
1231                                 cbs, NULL, 0);
1232
1233         if (!lock)
1234                 GOTO(out, rc = -ENOMEM);
1235
1236         lock->l_last_activity = cfs_time_current_sec();
1237         lock->l_remote_handle = dlm_req->lock_handle[0];
1238         LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
1239
1240         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
1241         /* Don't enqueue a lock onto the export if it is been disonnected
1242          * due to eviction (bug 3822) or server umount (bug 24324).
1243          * Cancel it now instead. */
1244         if (req->rq_export->exp_disconnected) {
1245                 LDLM_ERROR(lock, "lock on disconnected export %p",
1246                            req->rq_export);
1247                 GOTO(out, rc = -ENOTCONN);
1248         }
1249
1250         lock->l_export = class_export_lock_get(req->rq_export, lock);
1251         if (lock->l_export->exp_lock_hash)
1252                 cfs_hash_add(lock->l_export->exp_lock_hash,
1253                              &lock->l_remote_handle,
1254                              &lock->l_exp_hash);
1255
1256 existing_lock:
1257
1258         if (flags & LDLM_FL_HAS_INTENT) {
1259                 /* In this case, the reply buffer is allocated deep in
1260                  * local_lock_enqueue by the policy function. */
1261                 cookie = req;
1262         } else {
1263                 /* based on the assumption that lvb size never changes during
1264                  * resource life time otherwise it need resource->lr_lock's
1265                  * protection */
1266                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
1267                                      RCL_SERVER, ldlm_lvbo_size(lock));
1268
1269                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
1270                         GOTO(out, rc = -ENOMEM);
1271
1272                 rc = req_capsule_server_pack(&req->rq_pill);
1273                 if (rc)
1274                         GOTO(out, rc);
1275         }
1276
1277         if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
1278                 ldlm_convert_policy_to_local(req->rq_export,
1279                                           dlm_req->lock_desc.l_resource.lr_type,
1280                                           &dlm_req->lock_desc.l_policy_data,
1281                                           &lock->l_policy_data);
1282         if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
1283                 lock->l_req_extent = lock->l_policy_data.l_extent;
1284
1285         err = ldlm_lock_enqueue(ns, &lock, cookie, (int *)&flags);
1286         if (err)
1287                 GOTO(out, err);
1288
1289         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1290         dlm_rep->lock_flags = flags;
1291
1292         ldlm_lock2desc(lock, &dlm_rep->lock_desc);
1293         ldlm_lock2handle(lock, &dlm_rep->lock_handle);
1294
1295         /* We never send a blocking AST until the lock is granted, but
1296          * we can tell it right now */
1297         lock_res_and_lock(lock);
1298
1299         /* Now take into account flags to be inherited from original lock
1300            request both in reply to client and in our own lock flags. */
1301         dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1302         lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1303
1304         /* Don't move a pending lock onto the export if it has already been
1305          * disconnected due to eviction (bug 5683) or server umount (bug 24324).
1306          * Cancel it now instead. */
1307         if (unlikely(req->rq_export->exp_disconnected ||
1308                      OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
1309                 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
1310                 rc = -ENOTCONN;
1311         } else if (lock->l_flags & LDLM_FL_AST_SENT) {
1312                 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
1313                 if (lock->l_granted_mode == lock->l_req_mode) {
1314                         /*
1315                          * Only cancel lock if it was granted, because it would
1316                          * be destroyed immediately and would never be granted
1317                          * in the future, causing timeouts on client.  Not
1318                          * granted lock will be cancelled immediately after
1319                          * sending completion AST.
1320                          */
1321                         if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
1322                                 unlock_res_and_lock(lock);
1323                                 ldlm_lock_cancel(lock);
1324                                 lock_res_and_lock(lock);
1325                         } else
1326                                 ldlm_add_waiting_lock(lock);
1327                 }
1328         }
1329         /* Make sure we never ever grant usual metadata locks to liblustre
1330            clients */
1331         if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
1332             dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
1333              req->rq_export->exp_libclient) {
1334                 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
1335                              !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
1336                         CERROR("Granting sync lock to libclient. "
1337                                "req fl %d, rep fl %d, lock fl "LPX64"\n",
1338                                dlm_req->lock_flags, dlm_rep->lock_flags,
1339                                lock->l_flags);
1340                         LDLM_ERROR(lock, "sync lock");
1341                         if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
1342                                 struct ldlm_intent *it;
1343
1344                                 it = req_capsule_client_get(&req->rq_pill,
1345                                                             &RMF_LDLM_INTENT);
1346                                 if (it != NULL) {
1347                                         CERROR("This is intent %s ("LPU64")\n",
1348                                                ldlm_it2str(it->opc), it->opc);
1349                                 }
1350                         }
1351                 }
1352         }
1353
1354         unlock_res_and_lock(lock);
1355
1356         EXIT;
1357  out:
1358         req->rq_status = rc ?: err; /* return either error - bug 11190 */
1359         if (!req->rq_packed_final) {
1360                 err = lustre_pack_reply(req, 1, NULL, NULL);
1361                 if (rc == 0)
1362                         rc = err;
1363         }
1364
1365         /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1366          * ldlm_reprocess_all.  If this moves, revisit that code. -phil */
1367         if (lock) {
1368                 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
1369                            "(err=%d, rc=%d)", err, rc);
1370
1371                 if (rc == 0) {
1372                         int lvb_len = ldlm_lvbo_size(lock);
1373
1374                         if (lvb_len > 0) {
1375                                 void *buf;
1376                                 int buflen;
1377
1378                                 buf = req_capsule_server_get(&req->rq_pill,
1379                                                              &RMF_DLM_LVB);
1380                                 LASSERTF(buf != NULL, "req %p, lock %p\n",
1381                                          req, lock);
1382                                 buflen = req_capsule_get_size(&req->rq_pill,
1383                                                 &RMF_DLM_LVB, RCL_SERVER);
1384                                 buflen = ldlm_lvbo_fill(lock, buf, buflen);
1385                                 req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB,
1386                                                    buflen, RCL_SERVER);
1387                         }
1388                 } else {
1389                         lock_res_and_lock(lock);
1390                         ldlm_resource_unlink_lock(lock);
1391                         ldlm_lock_destroy_nolock(lock);
1392                         unlock_res_and_lock(lock);
1393                 }
1394
1395                 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1396                         ldlm_reprocess_all(lock->l_resource);
1397
1398                 LDLM_LOCK_RELEASE(lock);
1399         }
1400
1401         LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1402                           lock, rc);
1403
1404         return rc;
1405 }
1406
1407 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1408                         ldlm_completion_callback completion_callback,
1409                         ldlm_blocking_callback blocking_callback,
1410                         ldlm_glimpse_callback glimpse_callback)
1411 {
1412         struct ldlm_request *dlm_req;
1413         struct ldlm_callback_suite cbs = {
1414                 .lcs_completion = completion_callback,
1415                 .lcs_blocking   = blocking_callback,
1416                 .lcs_glimpse    = glimpse_callback
1417         };
1418         int rc;
1419
1420         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1421         if (dlm_req != NULL) {
1422                 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1423                                           req, dlm_req, &cbs);
1424         } else {
1425                 rc = -EFAULT;
1426         }
1427         return rc;
1428 }
1429
1430 int ldlm_handle_convert0(struct ptlrpc_request *req,
1431                          const struct ldlm_request *dlm_req)
1432 {
1433         struct ldlm_reply *dlm_rep;
1434         struct ldlm_lock *lock;
1435         int rc;
1436         ENTRY;
1437
1438         if (req->rq_export && req->rq_export->exp_nid_stats &&
1439             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1440                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1441                                      LDLM_CONVERT - LDLM_FIRST_OPC);
1442
1443         rc = req_capsule_server_pack(&req->rq_pill);
1444         if (rc)
1445                 RETURN(rc);
1446
1447         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1448         dlm_rep->lock_flags = dlm_req->lock_flags;
1449
1450         lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1451         if (!lock) {
1452                 req->rq_status = EINVAL;
1453         } else {
1454                 void *res = NULL;
1455
1456                 LDLM_DEBUG(lock, "server-side convert handler START");
1457
1458                 lock->l_last_activity = cfs_time_current_sec();
1459                 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1460                                         &dlm_rep->lock_flags);
1461                 if (res) {
1462                         if (ldlm_del_waiting_lock(lock))
1463                                 LDLM_DEBUG(lock, "converted waiting lock");
1464                         req->rq_status = 0;
1465                 } else {
1466                         req->rq_status = EDEADLOCK;
1467                 }
1468         }
1469
1470         if (lock) {
1471                 if (!req->rq_status)
1472                         ldlm_reprocess_all(lock->l_resource);
1473                 LDLM_DEBUG(lock, "server-side convert handler END");
1474                 LDLM_LOCK_PUT(lock);
1475         } else
1476                 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1477
1478         RETURN(0);
1479 }
1480
1481 int ldlm_handle_convert(struct ptlrpc_request *req)
1482 {
1483         int rc;
1484         struct ldlm_request *dlm_req;
1485
1486         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1487         if (dlm_req != NULL) {
1488                 rc = ldlm_handle_convert0(req, dlm_req);
1489         } else {
1490                 CERROR ("Can't unpack dlm_req\n");
1491                 rc = -EFAULT;
1492         }
1493         return rc;
1494 }
1495
1496 /* Cancel all the locks whos handles are packed into ldlm_request */
1497 int ldlm_request_cancel(struct ptlrpc_request *req,
1498                         const struct ldlm_request *dlm_req, int first)
1499 {
1500         struct ldlm_resource *res, *pres = NULL;
1501         struct ldlm_lock *lock;
1502         int i, count, done = 0;
1503         ENTRY;
1504
1505         count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1506         if (first >= count)
1507                 RETURN(0);
1508
1509         /* There is no lock on the server at the replay time,
1510          * skip lock cancelling to make replay tests to pass. */
1511         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1512                 RETURN(0);
1513
1514         LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
1515                           "starting at %d", count, first);
1516
1517         for (i = first; i < count; i++) {
1518                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1519                 if (!lock) {
1520                         LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1521                                           "lock (cookie "LPU64")",
1522                                           dlm_req->lock_handle[i].cookie);
1523                         continue;
1524                 }
1525
1526                 res = lock->l_resource;
1527                 done++;
1528
1529                 if (res != pres) {
1530                         if (pres != NULL) {
1531                                 ldlm_reprocess_all(pres);
1532                                 LDLM_RESOURCE_DELREF(pres);
1533                                 ldlm_resource_putref(pres);
1534                         }
1535                         if (res != NULL) {
1536                                 ldlm_resource_getref(res);
1537                                 LDLM_RESOURCE_ADDREF(res);
1538                                 ldlm_res_lvbo_update(res, NULL, 1);
1539                         }
1540                         pres = res;
1541                 }
1542                 ldlm_lock_cancel(lock);
1543                 LDLM_LOCK_PUT(lock);
1544         }
1545         if (pres != NULL) {
1546                 ldlm_reprocess_all(pres);
1547                 LDLM_RESOURCE_DELREF(pres);
1548                 ldlm_resource_putref(pres);
1549         }
1550         LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1551         RETURN(done);
1552 }
1553
1554 int ldlm_handle_cancel(struct ptlrpc_request *req)
1555 {
1556         struct ldlm_request *dlm_req;
1557         int rc;
1558         ENTRY;
1559
1560         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1561         if (dlm_req == NULL) {
1562                 CDEBUG(D_INFO, "bad request buffer for cancel\n");
1563                 RETURN(-EFAULT);
1564         }
1565
1566         if (req->rq_export && req->rq_export->exp_nid_stats &&
1567             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1568                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1569                                      LDLM_CANCEL - LDLM_FIRST_OPC);
1570
1571         rc = req_capsule_server_pack(&req->rq_pill);
1572         if (rc)
1573                 RETURN(rc);
1574
1575         if (!ldlm_request_cancel(req, dlm_req, 0))
1576                 req->rq_status = ESTALE;
1577
1578         RETURN(ptlrpc_reply(req));
1579 }
1580 #endif /* HAVE_SERVER_SUPPORT */
1581
1582 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1583                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1584 {
1585         int do_ast;
1586         ENTRY;
1587
1588         LDLM_DEBUG(lock, "client blocking AST callback handler");
1589
1590         lock_res_and_lock(lock);
1591         lock->l_flags |= LDLM_FL_CBPENDING;
1592
1593         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1594                 lock->l_flags |= LDLM_FL_CANCEL;
1595
1596         do_ast = (!lock->l_readers && !lock->l_writers);
1597         unlock_res_and_lock(lock);
1598
1599         if (do_ast) {
1600                 CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
1601                        lock, lock->l_blocking_ast);
1602                 if (lock->l_blocking_ast != NULL)
1603                         lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1604                                              LDLM_CB_BLOCKING);
1605         } else {
1606                 CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
1607                        lock);
1608         }
1609
1610         LDLM_DEBUG(lock, "client blocking callback handler END");
1611         LDLM_LOCK_RELEASE(lock);
1612         EXIT;
1613 }
1614
1615 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1616                                     struct ldlm_namespace *ns,
1617                                     struct ldlm_request *dlm_req,
1618                                     struct ldlm_lock *lock)
1619 {
1620         int lvb_len;
1621         CFS_LIST_HEAD(ast_list);
1622         ENTRY;
1623
1624         LDLM_DEBUG(lock, "client completion callback handler START");
1625
1626         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
1627                 int to = cfs_time_seconds(1);
1628                 while (to > 0) {
1629                         cfs_schedule_timeout_and_set_state(
1630                                 CFS_TASK_INTERRUPTIBLE, to);
1631                         if (lock->l_granted_mode == lock->l_req_mode ||
1632                             lock->l_destroyed)
1633                                 break;
1634                 }
1635         }
1636
1637         lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
1638         if (lvb_len > 0) {
1639                 if (lock->l_lvb_len > 0) {
1640                         /* for extent lock, lvb contains ost_lvb{}. */
1641                         LASSERT(lock->l_lvb_data != NULL);
1642                         LASSERTF(lock->l_lvb_len == lvb_len,
1643                                 "preallocated %d, actual %d.\n",
1644                                 lock->l_lvb_len, lvb_len);
1645                 } else { /* for layout lock, lvb has variable length */
1646                         void *lvb_data;
1647
1648                         OBD_ALLOC(lvb_data, lvb_len);
1649                         if (lvb_data == NULL)
1650                                 LDLM_ERROR(lock, "no memory.\n");
1651
1652                         lock_res_and_lock(lock);
1653                         if (lvb_data == NULL) {
1654                                 lock->l_flags |= LDLM_FL_FAILED;
1655                         } else {
1656                                 LASSERT(lock->l_lvb_data == NULL);
1657                                 lock->l_lvb_data = lvb_data;
1658                                 lock->l_lvb_len = lvb_len;
1659                         }
1660                         unlock_res_and_lock(lock);
1661                 }
1662         }
1663
1664         lock_res_and_lock(lock);
1665         if (lock->l_destroyed ||
1666             lock->l_granted_mode == lock->l_req_mode) {
1667                 /* bug 11300: the lock has already been granted */
1668                 unlock_res_and_lock(lock);
1669                 LDLM_DEBUG(lock, "Double grant race happened");
1670                 LDLM_LOCK_RELEASE(lock);
1671                 EXIT;
1672                 return;
1673         }
1674
1675         /* If we receive the completion AST before the actual enqueue returned,
1676          * then we might need to switch lock modes, resources, or extents. */
1677         if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1678                 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1679                 LDLM_DEBUG(lock, "completion AST, new lock mode");
1680         }
1681
1682         if (lock->l_resource->lr_type != LDLM_PLAIN) {
1683                 ldlm_convert_policy_to_local(req->rq_export,
1684                                           dlm_req->lock_desc.l_resource.lr_type,
1685                                           &dlm_req->lock_desc.l_policy_data,
1686                                           &lock->l_policy_data);
1687                 LDLM_DEBUG(lock, "completion AST, new policy data");
1688         }
1689
1690         ldlm_resource_unlink_lock(lock);
1691         if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1692                    &lock->l_resource->lr_name,
1693                    sizeof(lock->l_resource->lr_name)) != 0) {
1694                 unlock_res_and_lock(lock);
1695                 if (ldlm_lock_change_resource(ns, lock,
1696                                 &dlm_req->lock_desc.l_resource.lr_name) != 0) {
1697                         LDLM_ERROR(lock, "Failed to allocate resource");
1698                         LDLM_LOCK_RELEASE(lock);
1699                         EXIT;
1700                         return;
1701                 }
1702                 LDLM_DEBUG(lock, "completion AST, new resource");
1703                 CERROR("change resource!\n");
1704                 lock_res_and_lock(lock);
1705         }
1706
1707         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1708                 /* BL_AST locks are not needed in lru.
1709                  * let ldlm_cancel_lru() be fast. */
1710                 ldlm_lock_remove_from_lru(lock);
1711                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
1712                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1713         }
1714
1715         if (lock->l_lvb_len) {
1716                 if (req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
1717                                          RCL_CLIENT) < lock->l_lvb_len) {
1718                         LDLM_ERROR(lock, "completion AST did not contain "
1719                                    "expected LVB!");
1720                 } else {
1721                         void *lvb = req_capsule_client_get(&req->rq_pill,
1722                                                            &RMF_DLM_LVB);
1723                         memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1724                 }
1725         }
1726
1727         ldlm_grant_lock(lock, &ast_list);
1728         unlock_res_and_lock(lock);
1729
1730         LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1731
1732         /* Let Enqueue to call osc_lock_upcall() and initialize
1733          * l_ast_data */
1734         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
1735
1736         ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
1737
1738         LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1739                           lock);
1740         LDLM_LOCK_RELEASE(lock);
1741         EXIT;
1742 }
1743
1744 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1745                                     struct ldlm_namespace *ns,
1746                                     struct ldlm_request *dlm_req,
1747                                     struct ldlm_lock *lock)
1748 {
1749         int rc = -ENOSYS;
1750         ENTRY;
1751
1752         LDLM_DEBUG(lock, "client glimpse AST callback handler");
1753
1754         if (lock->l_glimpse_ast != NULL)
1755                 rc = lock->l_glimpse_ast(lock, req);
1756
1757         if (req->rq_repmsg != NULL) {
1758                 ptlrpc_reply(req);
1759         } else {
1760                 req->rq_status = rc;
1761                 ptlrpc_error(req);
1762         }
1763
1764         lock_res_and_lock(lock);
1765         if (lock->l_granted_mode == LCK_PW &&
1766             !lock->l_readers && !lock->l_writers &&
1767             cfs_time_after(cfs_time_current(),
1768                            cfs_time_add(lock->l_last_used,
1769                                         cfs_time_seconds(10)))) {
1770                 unlock_res_and_lock(lock);
1771                 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
1772                         ldlm_handle_bl_callback(ns, NULL, lock);
1773
1774                 EXIT;
1775                 return;
1776         }
1777         unlock_res_and_lock(lock);
1778         LDLM_LOCK_RELEASE(lock);
1779         EXIT;
1780 }
1781
1782 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1783 {
1784         if (req->rq_no_reply)
1785                 return 0;
1786
1787         req->rq_status = rc;
1788         if (!req->rq_packed_final) {
1789                 rc = lustre_pack_reply(req, 1, NULL, NULL);
1790                 if (rc)
1791                         return rc;
1792         }
1793         return ptlrpc_reply(req);
1794 }
1795
1796 #ifdef __KERNEL__
1797 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode)
1798 {
1799         struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1800         ENTRY;
1801
1802         cfs_spin_lock(&blp->blp_lock);
1803         if (blwi->blwi_lock && blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
1804                 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
1805                 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
1806         } else {
1807                 /* other blocking callbacks are added to the regular list */
1808                 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1809         }
1810         cfs_spin_unlock(&blp->blp_lock);
1811
1812         cfs_waitq_signal(&blp->blp_waitq);
1813
1814         /* can not use blwi->blwi_mode as blwi could be already freed in
1815            LDLM_ASYNC mode */
1816         if (mode == LDLM_SYNC)
1817                 cfs_wait_for_completion(&blwi->blwi_comp);
1818
1819         RETURN(0);
1820 }
1821
1822 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
1823                              struct ldlm_namespace *ns,
1824                              struct ldlm_lock_desc *ld,
1825                              cfs_list_t *cancels, int count,
1826                              struct ldlm_lock *lock,
1827                              int mode)
1828 {
1829         cfs_init_completion(&blwi->blwi_comp);
1830         CFS_INIT_LIST_HEAD(&blwi->blwi_head);
1831
1832         if (cfs_memory_pressure_get())
1833                 blwi->blwi_mem_pressure = 1;
1834
1835         blwi->blwi_ns = ns;
1836         blwi->blwi_mode = mode;
1837         if (ld != NULL)
1838                 blwi->blwi_ld = *ld;
1839         if (count) {
1840                 cfs_list_add(&blwi->blwi_head, cancels);
1841                 cfs_list_del_init(cancels);
1842                 blwi->blwi_count = count;
1843         } else {
1844                 blwi->blwi_lock = lock;
1845         }
1846 }
1847
1848 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
1849                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
1850                              cfs_list_t *cancels, int count, int mode)
1851 {
1852         ENTRY;
1853
1854         if (cancels && count == 0)
1855                 RETURN(0);
1856
1857         if (mode == LDLM_SYNC) {
1858                 /* if it is synchronous call do minimum mem alloc, as it could
1859                  * be triggered from kernel shrinker
1860                  */
1861                 struct ldlm_bl_work_item blwi;
1862                 memset(&blwi, 0, sizeof(blwi));
1863                 init_blwi(&blwi, ns, ld, cancels, count, lock, LDLM_SYNC);
1864                 RETURN(__ldlm_bl_to_thread(&blwi, LDLM_SYNC));
1865         } else {
1866                 struct ldlm_bl_work_item *blwi;
1867                 OBD_ALLOC(blwi, sizeof(*blwi));
1868                 if (blwi == NULL)
1869                         RETURN(-ENOMEM);
1870                 init_blwi(blwi, ns, ld, cancels, count, lock, LDLM_ASYNC);
1871
1872                 RETURN(__ldlm_bl_to_thread(blwi, LDLM_ASYNC));
1873         }
1874 }
1875
1876 #endif
1877
1878 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1879                            struct ldlm_lock *lock)
1880 {
1881 #ifdef __KERNEL__
1882         RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LDLM_ASYNC));
1883 #else
1884         RETURN(-ENOSYS);
1885 #endif
1886 }
1887
1888 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1889                            cfs_list_t *cancels, int count, int mode)
1890 {
1891 #ifdef __KERNEL__
1892         RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count, mode));
1893 #else
1894         RETURN(-ENOSYS);
1895 #endif
1896 }
1897
1898 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
1899 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
1900 {
1901         struct obd_device *obd = req->rq_export->exp_obd;
1902         char *key;
1903         void *val;
1904         int keylen, vallen;
1905         int rc = -ENOSYS;
1906         ENTRY;
1907
1908         DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
1909
1910         req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
1911
1912         key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1913         if (key == NULL) {
1914                 DEBUG_REQ(D_IOCTL, req, "no set_info key");
1915                 RETURN(-EFAULT);
1916         }
1917         keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
1918                                       RCL_CLIENT);
1919         val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1920         if (val == NULL) {
1921                 DEBUG_REQ(D_IOCTL, req, "no set_info val");
1922                 RETURN(-EFAULT);
1923         }
1924         vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
1925                                       RCL_CLIENT);
1926
1927         /* We are responsible for swabbing contents of val */
1928
1929         if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
1930                 /* Pass it on to mdc (the "export" in this case) */
1931                 rc = obd_set_info_async(req->rq_svc_thread->t_env,
1932                                         req->rq_export,
1933                                         sizeof(KEY_HSM_COPYTOOL_SEND),
1934                                         KEY_HSM_COPYTOOL_SEND,
1935                                         vallen, val, NULL);
1936         else
1937                 DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
1938
1939         return rc;
1940 }
1941
1942 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
1943                                         const char *msg, int rc,
1944                                         struct lustre_handle *handle)
1945 {
1946         DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
1947                   "%s: [nid %s] [rc %d] [lock "LPX64"]",
1948                   msg, libcfs_id2str(req->rq_peer), rc,
1949                   handle ? handle->cookie : 0);
1950         if (req->rq_no_reply)
1951                 CWARN("No reply was sent, maybe cause bug 21636.\n");
1952         else if (rc)
1953                 CWARN("Send reply failed, maybe cause bug 21636.\n");
1954 }
1955
1956 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
1957 static int ldlm_callback_handler(struct ptlrpc_request *req)
1958 {
1959         struct ldlm_namespace *ns;
1960         struct ldlm_request *dlm_req;
1961         struct ldlm_lock *lock;
1962         int rc;
1963         ENTRY;
1964
1965         /* Requests arrive in sender's byte order.  The ptlrpc service
1966          * handler has already checked and, if necessary, byte-swapped the
1967          * incoming request message body, but I am responsible for the
1968          * message buffers. */
1969
1970         /* do nothing for sec context finalize */
1971         if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
1972                 RETURN(0);
1973
1974         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
1975
1976         if (req->rq_export == NULL) {
1977                 rc = ldlm_callback_reply(req, -ENOTCONN);
1978                 ldlm_callback_errmsg(req, "Operate on unconnected server",
1979                                      rc, NULL);
1980                 RETURN(0);
1981         }
1982
1983         LASSERT(req->rq_export != NULL);
1984         LASSERT(req->rq_export->exp_obd != NULL);
1985
1986         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1987         case LDLM_BL_CALLBACK:
1988                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK))
1989                         RETURN(0);
1990                 break;
1991         case LDLM_CP_CALLBACK:
1992                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK))
1993                         RETURN(0);
1994                 break;
1995         case LDLM_GL_CALLBACK:
1996                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK))
1997                         RETURN(0);
1998                 break;
1999         case LDLM_SET_INFO:
2000                 rc = ldlm_handle_setinfo(req);
2001                 ldlm_callback_reply(req, rc);
2002                 RETURN(0);
2003         case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
2004                 CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
2005                 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
2006                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
2007                         RETURN(0);
2008                 rc = llog_origin_handle_cancel(req);
2009                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
2010                         RETURN(0);
2011                 ldlm_callback_reply(req, rc);
2012                 RETURN(0);
2013         case LLOG_ORIGIN_HANDLE_CREATE:
2014                 req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2015                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2016                         RETURN(0);
2017                 rc = llog_origin_handle_open(req);
2018                 ldlm_callback_reply(req, rc);
2019                 RETURN(0);
2020         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2021                 req_capsule_set(&req->rq_pill,
2022                                 &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2023                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2024                         RETURN(0);
2025                 rc = llog_origin_handle_next_block(req);
2026                 ldlm_callback_reply(req, rc);
2027                 RETURN(0);
2028         case LLOG_ORIGIN_HANDLE_READ_HEADER:
2029                 req_capsule_set(&req->rq_pill,
2030                                 &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2031                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2032                         RETURN(0);
2033                 rc = llog_origin_handle_read_header(req);
2034                 ldlm_callback_reply(req, rc);
2035                 RETURN(0);
2036         case LLOG_ORIGIN_HANDLE_CLOSE:
2037                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2038                         RETURN(0);
2039                 rc = llog_origin_handle_close(req);
2040                 ldlm_callback_reply(req, rc);
2041                 RETURN(0);
2042         default:
2043                 CERROR("unknown opcode %u\n",
2044                        lustre_msg_get_opc(req->rq_reqmsg));
2045                 ldlm_callback_reply(req, -EPROTO);
2046                 RETURN(0);
2047         }
2048
2049         ns = req->rq_export->exp_obd->obd_namespace;
2050         LASSERT(ns != NULL);
2051
2052         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2053
2054         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2055         if (dlm_req == NULL) {
2056                 rc = ldlm_callback_reply(req, -EPROTO);
2057                 ldlm_callback_errmsg(req, "Operate without parameter", rc,
2058                                      NULL);
2059                 RETURN(0);
2060         }
2061
2062         /* Force a known safe race, send a cancel to the server for a lock
2063          * which the server has already started a blocking callback on. */
2064         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
2065             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2066                 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0]);
2067                 if (rc < 0)
2068                         CERROR("ldlm_cli_cancel: %d\n", rc);
2069         }
2070
2071         lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
2072         if (!lock) {
2073                 CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
2074                        "disappeared\n", dlm_req->lock_handle[0].cookie);
2075                 rc = ldlm_callback_reply(req, -EINVAL);
2076                 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
2077                                      &dlm_req->lock_handle[0]);
2078                 RETURN(0);
2079         }
2080
2081         if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
2082             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
2083                 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
2084
2085         /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
2086         lock_res_and_lock(lock);
2087         lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
2088         if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2089                 /* If somebody cancels lock and cache is already dropped,
2090                  * or lock is failed before cp_ast received on client,
2091                  * we can tell the server we have no lock. Otherwise, we
2092                  * should send cancel after dropping the cache. */
2093                 if (((lock->l_flags & LDLM_FL_CANCELING) &&
2094                     (lock->l_flags & LDLM_FL_BL_DONE)) ||
2095                     (lock->l_flags & LDLM_FL_FAILED)) {
2096                         LDLM_DEBUG(lock, "callback on lock "
2097                                    LPX64" - lock disappeared\n",
2098                                    dlm_req->lock_handle[0].cookie);
2099                         unlock_res_and_lock(lock);
2100                         LDLM_LOCK_RELEASE(lock);
2101                         rc = ldlm_callback_reply(req, -EINVAL);
2102                         ldlm_callback_errmsg(req, "Operate on stale lock", rc,
2103                                              &dlm_req->lock_handle[0]);
2104                         RETURN(0);
2105                 }
2106                 /* BL_AST locks are not needed in lru.
2107                  * let ldlm_cancel_lru() be fast. */
2108                 ldlm_lock_remove_from_lru(lock);
2109                 lock->l_flags |= LDLM_FL_BL_AST;
2110         }
2111         unlock_res_and_lock(lock);
2112
2113         /* We want the ost thread to get this reply so that it can respond
2114          * to ost requests (write cache writeback) that might be triggered
2115          * in the callback.
2116          *
2117          * But we'd also like to be able to indicate in the reply that we're
2118          * cancelling right now, because it's unused, or have an intent result
2119          * in the reply, so we might have to push the responsibility for sending
2120          * the reply down into the AST handlers, alas. */
2121
2122         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2123         case LDLM_BL_CALLBACK:
2124                 CDEBUG(D_INODE, "blocking ast\n");
2125                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
2126                 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
2127                         rc = ldlm_callback_reply(req, 0);
2128                         if (req->rq_no_reply || rc)
2129                                 ldlm_callback_errmsg(req, "Normal process", rc,
2130                                                      &dlm_req->lock_handle[0]);
2131                 }
2132                 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
2133                         ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
2134                 break;
2135         case LDLM_CP_CALLBACK:
2136                 CDEBUG(D_INODE, "completion ast\n");
2137                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
2138                 ldlm_callback_reply(req, 0);
2139                 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
2140                 break;
2141         case LDLM_GL_CALLBACK:
2142                 CDEBUG(D_INODE, "glimpse ast\n");
2143                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
2144                 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
2145                 break;
2146         default:
2147                 LBUG();                         /* checked above */
2148         }
2149
2150         RETURN(0);
2151 }
2152
2153 #ifdef HAVE_SERVER_SUPPORT
2154 static int ldlm_cancel_handler(struct ptlrpc_request *req)
2155 {
2156         int rc;
2157         ENTRY;
2158
2159         /* Requests arrive in sender's byte order.  The ptlrpc service
2160          * handler has already checked and, if necessary, byte-swapped the
2161          * incoming request message body, but I am responsible for the
2162          * message buffers. */
2163
2164         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2165
2166         if (req->rq_export == NULL) {
2167                 struct ldlm_request *dlm_req;
2168
2169                 CERROR("%s from %s arrived at %lu with bad export cookie "
2170                        LPU64"\n",
2171                        ll_opcode2str(lustre_msg_get_opc(req->rq_reqmsg)),
2172                        libcfs_nid2str(req->rq_peer.nid),
2173                        req->rq_arrival_time.tv_sec,
2174                        lustre_msg_get_handle(req->rq_reqmsg)->cookie);
2175
2176                 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_CANCEL) {
2177                         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2178                         dlm_req = req_capsule_client_get(&req->rq_pill,
2179                                                          &RMF_DLM_REQ);
2180                         if (dlm_req != NULL)
2181                                 ldlm_lock_dump_handle(D_ERROR,
2182                                                       &dlm_req->lock_handle[0]);
2183                 }
2184                 ldlm_callback_reply(req, -ENOTCONN);
2185                 RETURN(0);
2186         }
2187
2188         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2189
2190         /* XXX FIXME move this back to mds/handler.c, bug 249 */
2191         case LDLM_CANCEL:
2192                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2193                 CDEBUG(D_INODE, "cancel\n");
2194                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL))
2195                         RETURN(0);
2196                 rc = ldlm_handle_cancel(req);
2197                 if (rc)
2198                         break;
2199                 RETURN(0);
2200         case OBD_LOG_CANCEL:
2201                 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
2202                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
2203                         RETURN(0);
2204                 rc = llog_origin_handle_cancel(req);
2205                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
2206                         RETURN(0);
2207                 ldlm_callback_reply(req, rc);
2208                 RETURN(0);
2209         default:
2210                 CERROR("invalid opcode %d\n",
2211                        lustre_msg_get_opc(req->rq_reqmsg));
2212                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2213                 ldlm_callback_reply(req, -EINVAL);
2214         }
2215
2216         RETURN(0);
2217 }
2218
2219 static int ldlm_cancel_hpreq_lock_match(struct ptlrpc_request *req,
2220                                         struct ldlm_lock *lock)
2221 {
2222         struct ldlm_request *dlm_req;
2223         struct lustre_handle lockh;
2224         int rc = 0;
2225         int i;
2226         ENTRY;
2227
2228         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2229         if (dlm_req == NULL)
2230                 RETURN(0);
2231
2232         ldlm_lock2handle(lock, &lockh);
2233         for (i = 0; i < dlm_req->lock_count; i++) {
2234                 if (lustre_handle_equal(&dlm_req->lock_handle[i],
2235                                         &lockh)) {
2236                         DEBUG_REQ(D_RPCTRACE, req,
2237                                   "Prio raised by lock "LPX64".", lockh.cookie);
2238
2239                         rc = 1;
2240                         break;
2241                 }
2242         }
2243
2244         RETURN(rc);
2245
2246 }
2247
2248 static int ldlm_cancel_hpreq_check(struct ptlrpc_request *req)
2249 {
2250         struct ldlm_request *dlm_req;
2251         int rc = 0;
2252         int i;
2253         ENTRY;
2254
2255         /* no prolong in recovery */
2256         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
2257                 RETURN(0);
2258
2259         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2260         if (dlm_req == NULL)
2261                 RETURN(-EFAULT);
2262
2263         for (i = 0; i < dlm_req->lock_count; i++) {
2264                 struct ldlm_lock *lock;
2265
2266                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
2267                 if (lock == NULL)
2268                         continue;
2269
2270                 rc = !!(lock->l_flags & LDLM_FL_AST_SENT);
2271                 if (rc)
2272                         LDLM_DEBUG(lock, "hpreq cancel lock");
2273                 LDLM_LOCK_PUT(lock);
2274
2275                 if (rc)
2276                         break;
2277         }
2278
2279         RETURN(rc);
2280 }
2281
2282 static struct ptlrpc_hpreq_ops ldlm_cancel_hpreq_ops = {
2283         .hpreq_lock_match = ldlm_cancel_hpreq_lock_match,
2284         .hpreq_check      = ldlm_cancel_hpreq_check,
2285         .hpreq_fini       = NULL,
2286 };
2287
2288 static int ldlm_hpreq_handler(struct ptlrpc_request *req)
2289 {
2290         ENTRY;
2291
2292         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2293
2294         if (req->rq_export == NULL)
2295                 RETURN(0);
2296
2297         if (LDLM_CANCEL == lustre_msg_get_opc(req->rq_reqmsg)) {
2298                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2299                 req->rq_ops = &ldlm_cancel_hpreq_ops;
2300         }
2301         RETURN(0);
2302 }
2303
2304 int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
2305                         cfs_hlist_node_t *hnode, void *data)
2306
2307 {
2308         cfs_list_t         *rpc_list = data;
2309         struct ldlm_lock   *lock = cfs_hash_object(hs, hnode);
2310
2311         lock_res_and_lock(lock);
2312
2313         if (lock->l_req_mode != lock->l_granted_mode) {
2314                 unlock_res_and_lock(lock);
2315                 return 0;
2316         }
2317
2318         LASSERT(lock->l_resource);
2319         if (lock->l_resource->lr_type != LDLM_IBITS &&
2320             lock->l_resource->lr_type != LDLM_PLAIN) {
2321                 unlock_res_and_lock(lock);
2322                 return 0;
2323         }
2324
2325         if (lock->l_flags & LDLM_FL_AST_SENT) {
2326                 unlock_res_and_lock(lock);
2327                 return 0;
2328         }
2329
2330         LASSERT(lock->l_blocking_ast);
2331         LASSERT(!lock->l_blocking_lock);
2332
2333         lock->l_flags |= LDLM_FL_AST_SENT;
2334         if (lock->l_export && lock->l_export->exp_lock_hash) {
2335                 /* NB: it's safe to call cfs_hash_del() even lock isn't
2336                  * in exp_lock_hash. */
2337                 /* In the function below, .hs_keycmp resolves to
2338                  * ldlm_export_lock_keycmp() */
2339                 /* coverity[overrun-buffer-val] */
2340                 cfs_hash_del(lock->l_export->exp_lock_hash,
2341                              &lock->l_remote_handle, &lock->l_exp_hash);
2342         }
2343
2344         cfs_list_add_tail(&lock->l_rk_ast, rpc_list);
2345         LDLM_LOCK_GET(lock);
2346
2347         unlock_res_and_lock(lock);
2348         return 0;
2349 }
2350
2351 void ldlm_revoke_export_locks(struct obd_export *exp)
2352 {
2353         cfs_list_t  rpc_list;
2354         ENTRY;
2355
2356         CFS_INIT_LIST_HEAD(&rpc_list);
2357         cfs_hash_for_each_empty(exp->exp_lock_hash,
2358                                 ldlm_revoke_lock_cb, &rpc_list);
2359         ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
2360                           LDLM_WORK_REVOKE_AST);
2361
2362         EXIT;
2363 }
2364 #endif /* HAVE_SERVER_SUPPORT */
2365
2366 #ifdef __KERNEL__
2367 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
2368 {
2369         struct ldlm_bl_work_item *blwi = NULL;
2370         static unsigned int num_bl = 0;
2371
2372         cfs_spin_lock(&blp->blp_lock);
2373         /* process a request from the blp_list at least every blp_num_threads */
2374         if (!cfs_list_empty(&blp->blp_list) &&
2375             (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
2376                 blwi = cfs_list_entry(blp->blp_list.next,
2377                                       struct ldlm_bl_work_item, blwi_entry);
2378         else
2379                 if (!cfs_list_empty(&blp->blp_prio_list))
2380                         blwi = cfs_list_entry(blp->blp_prio_list.next,
2381                                               struct ldlm_bl_work_item,
2382                                               blwi_entry);
2383
2384         if (blwi) {
2385                 if (++num_bl >= cfs_atomic_read(&blp->blp_num_threads))
2386                         num_bl = 0;
2387                 cfs_list_del(&blwi->blwi_entry);
2388         }
2389         cfs_spin_unlock(&blp->blp_lock);
2390
2391         return blwi;
2392 }
2393
2394 /* This only contains temporary data until the thread starts */
2395 struct ldlm_bl_thread_data {
2396         char                    bltd_name[CFS_CURPROC_COMM_MAX];
2397         struct ldlm_bl_pool     *bltd_blp;
2398         cfs_completion_t        bltd_comp;
2399         int                     bltd_num;
2400 };
2401
2402 static int ldlm_bl_thread_main(void *arg);
2403
2404 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
2405 {
2406         struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
2407         int rc;
2408
2409         cfs_init_completion(&bltd.bltd_comp);
2410         rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
2411         if (rc < 0) {
2412                 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
2413                        cfs_atomic_read(&blp->blp_num_threads), rc);
2414                 return rc;
2415         }
2416         cfs_wait_for_completion(&bltd.bltd_comp);
2417
2418         return 0;
2419 }
2420
2421 static int ldlm_bl_thread_main(void *arg)
2422 {
2423         struct ldlm_bl_pool *blp;
2424         ENTRY;
2425
2426         {
2427                 struct ldlm_bl_thread_data *bltd = arg;
2428
2429                 blp = bltd->bltd_blp;
2430
2431                 bltd->bltd_num =
2432                         cfs_atomic_inc_return(&blp->blp_num_threads) - 1;
2433                 cfs_atomic_inc(&blp->blp_busy_threads);
2434
2435                 snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
2436                         "ldlm_bl_%02d", bltd->bltd_num);
2437                 cfs_daemonize(bltd->bltd_name);
2438
2439                 cfs_complete(&bltd->bltd_comp);
2440                 /* cannot use bltd after this, it is only on caller's stack */
2441         }
2442
2443         while (1) {
2444                 struct l_wait_info lwi = { 0 };
2445                 struct ldlm_bl_work_item *blwi = NULL;
2446                 int busy;
2447
2448                 blwi = ldlm_bl_get_work(blp);
2449
2450                 if (blwi == NULL) {
2451                         cfs_atomic_dec(&blp->blp_busy_threads);
2452                         l_wait_event_exclusive(blp->blp_waitq,
2453                                          (blwi = ldlm_bl_get_work(blp)) != NULL,
2454                                          &lwi);
2455                         busy = cfs_atomic_inc_return(&blp->blp_busy_threads);
2456                 } else {
2457                         busy = cfs_atomic_read(&blp->blp_busy_threads);
2458                 }
2459
2460                 if (blwi->blwi_ns == NULL)
2461                         /* added by ldlm_cleanup() */
2462                         break;
2463
2464                 /* Not fatal if racy and have a few too many threads */
2465                 if (unlikely(busy < blp->blp_max_threads &&
2466                              busy >= cfs_atomic_read(&blp->blp_num_threads) &&
2467                              !blwi->blwi_mem_pressure))
2468                         /* discard the return value, we tried */
2469                         ldlm_bl_thread_start(blp);
2470
2471                 if (blwi->blwi_mem_pressure)
2472                         cfs_memory_pressure_set();
2473
2474                 if (blwi->blwi_count) {
2475                         int count;
2476                         /* The special case when we cancel locks in lru
2477                          * asynchronously, we pass the list of locks here.
2478                          * Thus locks are marked LDLM_FL_CANCELING, but NOT
2479                          * canceled locally yet. */
2480                         count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
2481                                                            blwi->blwi_count,
2482                                                            LCF_BL_AST);
2483                         ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0);
2484                 } else {
2485                         ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
2486                                                 blwi->blwi_lock);
2487                 }
2488                 if (blwi->blwi_mem_pressure)
2489                         cfs_memory_pressure_clr();
2490
2491                 if (blwi->blwi_mode == LDLM_ASYNC)
2492                         OBD_FREE(blwi, sizeof(*blwi));
2493                 else
2494                         cfs_complete(&blwi->blwi_comp);
2495         }
2496
2497         cfs_atomic_dec(&blp->blp_busy_threads);
2498         cfs_atomic_dec(&blp->blp_num_threads);
2499         cfs_complete(&blp->blp_comp);
2500         RETURN(0);
2501 }
2502
2503 #endif
2504
2505 static int ldlm_setup(void);
2506 static int ldlm_cleanup(void);
2507
2508 int ldlm_get_ref(void)
2509 {
2510         int rc = 0;
2511         ENTRY;
2512         cfs_mutex_lock(&ldlm_ref_mutex);
2513         if (++ldlm_refcount == 1) {
2514                 rc = ldlm_setup();
2515                 if (rc)
2516                         ldlm_refcount--;
2517         }
2518         cfs_mutex_unlock(&ldlm_ref_mutex);
2519
2520         RETURN(rc);
2521 }
2522
2523 void ldlm_put_ref(void)
2524 {
2525         ENTRY;
2526         cfs_mutex_lock(&ldlm_ref_mutex);
2527         if (ldlm_refcount == 1) {
2528                 int rc = ldlm_cleanup();
2529                 if (rc)
2530                         CERROR("ldlm_cleanup failed: %d\n", rc);
2531                 else
2532                         ldlm_refcount--;
2533         } else {
2534                 ldlm_refcount--;
2535         }
2536         cfs_mutex_unlock(&ldlm_ref_mutex);
2537
2538         EXIT;
2539 }
2540
2541 /*
2542  * Export handle<->lock hash operations.
2543  */
2544 static unsigned
2545 ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
2546 {
2547         return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
2548 }
2549
2550 static void *
2551 ldlm_export_lock_key(cfs_hlist_node_t *hnode)
2552 {
2553         struct ldlm_lock *lock;
2554
2555         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2556         return &lock->l_remote_handle;
2557 }
2558
2559 static void
2560 ldlm_export_lock_keycpy(cfs_hlist_node_t *hnode, void *key)
2561 {
2562         struct ldlm_lock     *lock;
2563
2564         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2565         lock->l_remote_handle = *(struct lustre_handle *)key;
2566 }
2567
2568 static int
2569 ldlm_export_lock_keycmp(const void *key, cfs_hlist_node_t *hnode)
2570 {
2571         return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
2572 }
2573
2574 static void *
2575 ldlm_export_lock_object(cfs_hlist_node_t *hnode)
2576 {
2577         return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2578 }
2579
2580 static void
2581 ldlm_export_lock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2582 {
2583         struct ldlm_lock *lock;
2584
2585         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2586         LDLM_LOCK_GET(lock);
2587 }
2588
2589 static void
2590 ldlm_export_lock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2591 {
2592         struct ldlm_lock *lock;
2593
2594         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2595         LDLM_LOCK_RELEASE(lock);
2596 }
2597
2598 static cfs_hash_ops_t ldlm_export_lock_ops = {
2599         .hs_hash        = ldlm_export_lock_hash,
2600         .hs_key         = ldlm_export_lock_key,
2601         .hs_keycmp      = ldlm_export_lock_keycmp,
2602         .hs_keycpy      = ldlm_export_lock_keycpy,
2603         .hs_object      = ldlm_export_lock_object,
2604         .hs_get         = ldlm_export_lock_get,
2605         .hs_put         = ldlm_export_lock_put,
2606         .hs_put_locked  = ldlm_export_lock_put,
2607 };
2608
2609 int ldlm_init_export(struct obd_export *exp)
2610 {
2611         ENTRY;
2612
2613         exp->exp_lock_hash =
2614                 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
2615                                 HASH_EXP_LOCK_CUR_BITS,
2616                                 HASH_EXP_LOCK_MAX_BITS,
2617                                 HASH_EXP_LOCK_BKT_BITS, 0,
2618                                 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
2619                                 &ldlm_export_lock_ops,
2620                                 CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
2621                                 CFS_HASH_NBLK_CHANGE);
2622
2623         if (!exp->exp_lock_hash)
2624                 RETURN(-ENOMEM);
2625
2626         RETURN(0);
2627 }
2628 EXPORT_SYMBOL(ldlm_init_export);
2629
2630 void ldlm_destroy_export(struct obd_export *exp)
2631 {
2632         ENTRY;
2633         cfs_hash_putref(exp->exp_lock_hash);
2634         exp->exp_lock_hash = NULL;
2635
2636         ldlm_destroy_flock_export(exp);
2637         EXIT;
2638 }
2639 EXPORT_SYMBOL(ldlm_destroy_export);
2640
2641 static int ldlm_setup(void)
2642 {
2643         static struct ptlrpc_service_conf       conf;
2644         struct ldlm_bl_pool                     *blp = NULL;
2645         int rc = 0;
2646 #ifdef __KERNEL__
2647         int i;
2648 #endif
2649         ENTRY;
2650
2651         if (ldlm_state != NULL)
2652                 RETURN(-EALREADY);
2653
2654         OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
2655         if (ldlm_state == NULL)
2656                 RETURN(-ENOMEM);
2657
2658 #ifdef LPROCFS
2659         rc = ldlm_proc_setup();
2660         if (rc != 0)
2661                 GOTO(out, rc);
2662 #endif
2663
2664         memset(&conf, 0, sizeof(conf));
2665         conf = (typeof(conf)) {
2666                 .psc_name               = "ldlm_cbd",
2667                 .psc_watchdog_factor    = 2,
2668                 .psc_buf                = {
2669                         .bc_nbufs               = LDLM_NBUFS,
2670                         .bc_buf_size            = LDLM_BUFSIZE,
2671                         .bc_req_max_size        = LDLM_MAXREQSIZE,
2672                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
2673                         .bc_req_portal          = LDLM_CB_REQUEST_PORTAL,
2674                         .bc_rep_portal          = LDLM_CB_REPLY_PORTAL,
2675                 },
2676                 .psc_thr                = {
2677                         .tc_thr_name            = "ldlm_cb",
2678                         .tc_thr_factor          = LDLM_THR_FACTOR,
2679                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
2680                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
2681                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
2682                         .tc_nthrs_user          = ldlm_num_threads,
2683                         .tc_cpu_affinity        = 1,
2684                         .tc_ctx_tags            = LCT_MD_THREAD | LCT_DT_THREAD,
2685                 },
2686                 .psc_cpt                = {
2687                         .cc_pattern             = ldlm_cpts,
2688                 },
2689                 .psc_ops                = {
2690                         .so_req_handler         = ldlm_callback_handler,
2691                 },
2692         };
2693         ldlm_state->ldlm_cb_service = \
2694                         ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
2695         if (IS_ERR(ldlm_state->ldlm_cb_service)) {
2696                 CERROR("failed to start service\n");
2697                 rc = PTR_ERR(ldlm_state->ldlm_cb_service);
2698                 ldlm_state->ldlm_cb_service = NULL;
2699                 GOTO(out, rc);
2700         }
2701
2702 #ifdef HAVE_SERVER_SUPPORT
2703         memset(&conf, 0, sizeof(conf));
2704         conf = (typeof(conf)) {
2705                 .psc_name               = "ldlm_canceld",
2706                 .psc_watchdog_factor    = 6,
2707                 .psc_buf                = {
2708                         .bc_nbufs               = LDLM_NBUFS,
2709                         .bc_buf_size            = LDLM_BUFSIZE,
2710                         .bc_req_max_size        = LDLM_MAXREQSIZE,
2711                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
2712                         .bc_req_portal          = LDLM_CANCEL_REQUEST_PORTAL,
2713                         .bc_rep_portal          = LDLM_CANCEL_REPLY_PORTAL,
2714
2715                 },
2716                 .psc_thr                = {
2717                         .tc_thr_name            = "ldlm_cn",
2718                         .tc_thr_factor          = LDLM_THR_FACTOR,
2719                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
2720                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
2721                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
2722                         .tc_nthrs_user          = ldlm_num_threads,
2723                         .tc_cpu_affinity        = 1,
2724                         .tc_ctx_tags            = LCT_MD_THREAD | \
2725                                                   LCT_DT_THREAD | \
2726                                                   LCT_CL_THREAD,
2727                 },
2728                 .psc_cpt                = {
2729                         .cc_pattern             = ldlm_cpts,
2730                 },
2731                 .psc_ops                = {
2732                         .so_req_handler         = ldlm_cancel_handler,
2733                         .so_hpreq_handler       = ldlm_hpreq_handler,
2734                 },
2735         };
2736         ldlm_state->ldlm_cancel_service = \
2737                         ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
2738         if (IS_ERR(ldlm_state->ldlm_cancel_service)) {
2739                 CERROR("failed to start service\n");
2740                 rc = PTR_ERR(ldlm_state->ldlm_cancel_service);
2741                 ldlm_state->ldlm_cancel_service = NULL;
2742                 GOTO(out, rc);
2743         }
2744 #endif
2745
2746         OBD_ALLOC(blp, sizeof(*blp));
2747         if (blp == NULL)
2748                 GOTO(out, rc = -ENOMEM);
2749         ldlm_state->ldlm_bl_pool = blp;
2750
2751         cfs_spin_lock_init(&blp->blp_lock);
2752         CFS_INIT_LIST_HEAD(&blp->blp_list);
2753         CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
2754         cfs_waitq_init(&blp->blp_waitq);
2755         cfs_atomic_set(&blp->blp_num_threads, 0);
2756         cfs_atomic_set(&blp->blp_busy_threads, 0);
2757
2758 #ifdef __KERNEL__
2759         if (ldlm_num_threads == 0) {
2760                 blp->blp_min_threads = LDLM_NTHRS_INIT;
2761                 blp->blp_max_threads = LDLM_NTHRS_MAX;
2762         } else {
2763                 blp->blp_min_threads = blp->blp_max_threads = \
2764                         min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
2765                                                          ldlm_num_threads));
2766         }
2767
2768         for (i = 0; i < blp->blp_min_threads; i++) {
2769                 rc = ldlm_bl_thread_start(blp);
2770                 if (rc < 0)
2771                         GOTO(out, rc);
2772         }
2773
2774 # ifdef HAVE_SERVER_SUPPORT
2775         CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
2776         expired_lock_thread.elt_state = ELT_STOPPED;
2777         cfs_waitq_init(&expired_lock_thread.elt_waitq);
2778
2779         CFS_INIT_LIST_HEAD(&waiting_locks_list);
2780         cfs_spin_lock_init(&waiting_locks_spinlock);
2781         cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
2782
2783         rc = cfs_create_thread(expired_lock_main, NULL, CFS_DAEMON_FLAGS);
2784         if (rc < 0) {
2785                 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
2786                 GOTO(out, rc);
2787         }
2788
2789         cfs_wait_event(expired_lock_thread.elt_waitq,
2790                        expired_lock_thread.elt_state == ELT_READY);
2791 # endif /* HAVE_SERVER_SUPPORT */
2792
2793         rc = ldlm_pools_init();
2794         if (rc) {
2795                 CERROR("Failed to initialize LDLM pools: %d\n", rc);
2796                 GOTO(out, rc);
2797         }
2798 #endif
2799         RETURN(0);
2800
2801  out:
2802         ldlm_cleanup();
2803         RETURN(rc);
2804 }
2805
2806 static int ldlm_cleanup(void)
2807 {
2808         ENTRY;
2809
2810         if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
2811             !cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
2812                 CERROR("ldlm still has namespaces; clean these up first.\n");
2813                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
2814                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
2815                 RETURN(-EBUSY);
2816         }
2817
2818 #ifdef __KERNEL__
2819         ldlm_pools_fini();
2820
2821         if (ldlm_state->ldlm_bl_pool != NULL) {
2822                 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
2823
2824                 while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
2825                         struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
2826
2827                         cfs_init_completion(&blp->blp_comp);
2828
2829                         cfs_spin_lock(&blp->blp_lock);
2830                         cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
2831                         cfs_waitq_signal(&blp->blp_waitq);
2832                         cfs_spin_unlock(&blp->blp_lock);
2833
2834                         cfs_wait_for_completion(&blp->blp_comp);
2835                 }
2836
2837                 OBD_FREE(blp, sizeof(*blp));
2838         }
2839 #endif /* __KERNEL__ */
2840
2841         if (ldlm_state->ldlm_cb_service != NULL)
2842                 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2843 # ifdef HAVE_SERVER_SUPPORT
2844         if (ldlm_state->ldlm_cancel_service != NULL)
2845                 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2846 # endif
2847
2848 #ifdef __KERNEL__
2849         ldlm_proc_cleanup();
2850
2851 # ifdef HAVE_SERVER_SUPPORT
2852         if (expired_lock_thread.elt_state != ELT_STOPPED) {
2853                 expired_lock_thread.elt_state = ELT_TERMINATE;
2854                 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
2855                 cfs_wait_event(expired_lock_thread.elt_waitq,
2856                                expired_lock_thread.elt_state == ELT_STOPPED);
2857         }
2858 # endif
2859 #endif /* __KERNEL__ */
2860
2861         OBD_FREE(ldlm_state, sizeof(*ldlm_state));
2862         ldlm_state = NULL;
2863
2864         RETURN(0);
2865 }
2866
2867 int ldlm_init(void)
2868 {
2869         cfs_mutex_init(&ldlm_ref_mutex);
2870         cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
2871         cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
2872         ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
2873                                                sizeof(struct ldlm_resource), 0,
2874                                                CFS_SLAB_HWCACHE_ALIGN);
2875         if (ldlm_resource_slab == NULL)
2876                 return -ENOMEM;
2877
2878         ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
2879                               sizeof(struct ldlm_lock), 0,
2880                               CFS_SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU);
2881         if (ldlm_lock_slab == NULL) {
2882                 cfs_mem_cache_destroy(ldlm_resource_slab);
2883                 return -ENOMEM;
2884         }
2885
2886         ldlm_interval_slab = cfs_mem_cache_create("interval_node",
2887                                         sizeof(struct ldlm_interval),
2888                                         0, CFS_SLAB_HWCACHE_ALIGN);
2889         if (ldlm_interval_slab == NULL) {
2890                 cfs_mem_cache_destroy(ldlm_resource_slab);
2891                 cfs_mem_cache_destroy(ldlm_lock_slab);
2892                 return -ENOMEM;
2893         }
2894 #if LUSTRE_TRACKS_LOCK_EXP_REFS
2895         class_export_dump_hook = ldlm_dump_export_locks;
2896 #endif
2897         return 0;
2898 }
2899
2900 void ldlm_exit(void)
2901 {
2902         int rc;
2903         if (ldlm_refcount)
2904                 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
2905         rc = cfs_mem_cache_destroy(ldlm_resource_slab);
2906         LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
2907 #ifdef __KERNEL__
2908         /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
2909          * synchronize_rcu() to wait a grace period elapsed, so that
2910          * ldlm_lock_free() get a chance to be called. */
2911         synchronize_rcu();
2912 #endif
2913         rc = cfs_mem_cache_destroy(ldlm_lock_slab);
2914         LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
2915         rc = cfs_mem_cache_destroy(ldlm_interval_slab);
2916         LASSERTF(rc == 0, "couldn't free interval node slab\n");
2917 }
2918
2919 /* ldlm_extent.c */
2920 EXPORT_SYMBOL(ldlm_extent_shift_kms);
2921
2922 /* ldlm_lock.c */
2923 #ifdef HAVE_SERVER_SUPPORT
2924 EXPORT_SYMBOL(ldlm_get_processing_policy);
2925 #endif
2926 EXPORT_SYMBOL(ldlm_lock2desc);
2927 EXPORT_SYMBOL(ldlm_register_intent);
2928 EXPORT_SYMBOL(ldlm_lockname);
2929 EXPORT_SYMBOL(ldlm_typename);
2930 EXPORT_SYMBOL(ldlm_lock2handle);
2931 EXPORT_SYMBOL(__ldlm_handle2lock);
2932 EXPORT_SYMBOL(ldlm_lock_get);
2933 EXPORT_SYMBOL(ldlm_lock_put);
2934 EXPORT_SYMBOL(ldlm_lock_match);
2935 EXPORT_SYMBOL(ldlm_lock_cancel);
2936 EXPORT_SYMBOL(ldlm_lock_addref);
2937 EXPORT_SYMBOL(ldlm_lock_addref_try);
2938 EXPORT_SYMBOL(ldlm_lock_decref);
2939 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
2940 EXPORT_SYMBOL(ldlm_lock_change_resource);
2941 EXPORT_SYMBOL(ldlm_it2str);
2942 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2943 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
2944 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
2945 EXPORT_SYMBOL(ldlm_lock_allow_match);
2946 EXPORT_SYMBOL(ldlm_lock_downgrade);
2947 EXPORT_SYMBOL(ldlm_lock_convert);
2948
2949 /* ldlm_request.c */
2950 EXPORT_SYMBOL(ldlm_completion_ast_async);
2951 EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
2952 EXPORT_SYMBOL(ldlm_completion_ast);
2953 EXPORT_SYMBOL(ldlm_blocking_ast);
2954 EXPORT_SYMBOL(ldlm_glimpse_ast);
2955 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2956 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2957 EXPORT_SYMBOL(ldlm_prep_elc_req);
2958 EXPORT_SYMBOL(ldlm_cli_convert);
2959 EXPORT_SYMBOL(ldlm_cli_enqueue);
2960 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2961 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2962 EXPORT_SYMBOL(ldlm_cli_cancel);
2963 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2964 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2965 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2966 EXPORT_SYMBOL(ldlm_replay_locks);
2967 EXPORT_SYMBOL(ldlm_resource_foreach);
2968 EXPORT_SYMBOL(ldlm_namespace_foreach);
2969 EXPORT_SYMBOL(ldlm_resource_iterate);
2970 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2971 EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
2972 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2973
2974 /* ldlm_lockd.c */
2975 #ifdef HAVE_SERVER_SUPPORT
2976 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2977 EXPORT_SYMBOL(ldlm_server_completion_ast);
2978 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2979 EXPORT_SYMBOL(ldlm_glimpse_locks);
2980 EXPORT_SYMBOL(ldlm_handle_enqueue);
2981 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2982 EXPORT_SYMBOL(ldlm_handle_cancel);
2983 EXPORT_SYMBOL(ldlm_request_cancel);
2984 EXPORT_SYMBOL(ldlm_handle_convert);
2985 EXPORT_SYMBOL(ldlm_handle_convert0);
2986 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2987 #endif
2988 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2989 EXPORT_SYMBOL(ldlm_get_ref);
2990 EXPORT_SYMBOL(ldlm_put_ref);
2991 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2992
2993 /* ldlm_resource.c */
2994 EXPORT_SYMBOL(ldlm_namespace_new);
2995 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2996 EXPORT_SYMBOL(ldlm_namespace_free);
2997 EXPORT_SYMBOL(ldlm_namespace_dump);
2998 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2999 EXPORT_SYMBOL(ldlm_resource_get);
3000 EXPORT_SYMBOL(ldlm_resource_putref);
3001 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
3002
3003 /* ldlm_lib.c */
3004 EXPORT_SYMBOL(client_import_add_conn);
3005 EXPORT_SYMBOL(client_import_del_conn);
3006 EXPORT_SYMBOL(client_obd_setup);
3007 EXPORT_SYMBOL(client_obd_cleanup);
3008 EXPORT_SYMBOL(client_connect_import);
3009 EXPORT_SYMBOL(client_disconnect_export);
3010 EXPORT_SYMBOL(target_send_reply);
3011 EXPORT_SYMBOL(target_pack_pool_reply);
3012
3013 #ifdef HAVE_SERVER_SUPPORT
3014 EXPORT_SYMBOL(server_disconnect_export);
3015 EXPORT_SYMBOL(target_stop_recovery_thread);
3016 EXPORT_SYMBOL(target_handle_connect);
3017 EXPORT_SYMBOL(target_cleanup_recovery);
3018 EXPORT_SYMBOL(target_destroy_export);
3019 EXPORT_SYMBOL(target_cancel_recovery_timer);
3020 EXPORT_SYMBOL(target_queue_recovery_request);
3021 EXPORT_SYMBOL(target_handle_ping);
3022 EXPORT_SYMBOL(target_handle_disconnect);
3023 #endif
3024
3025 /* l_lock.c */
3026 EXPORT_SYMBOL(lock_res_and_lock);
3027 EXPORT_SYMBOL(unlock_res_and_lock);