Whamcloud - gitweb
6a6eabcc448605e202ffc43ff2da585fd8bdfeb1
[fs/lustre-release.git] / lustre / ldlm / ldlm_lockd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_lockd.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 #else
47 # include <liblustre.h>
48 #endif
49
50 #include <lustre_dlm.h>
51 #include <obd_class.h>
52 #include <libcfs/list.h>
53 #include "ldlm_internal.h"
54
55 static int ldlm_num_threads;
56 CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
57                 "number of DLM service threads to start");
58
59 static char *ldlm_cpts;
60 CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444,
61                 "CPU partitions ldlm threads should run on");
62
63 extern cfs_mem_cache_t *ldlm_resource_slab;
64 extern cfs_mem_cache_t *ldlm_lock_slab;
65 static cfs_mutex_t      ldlm_ref_mutex;
66 static int ldlm_refcount;
67
68 struct ldlm_cb_async_args {
69         struct ldlm_cb_set_arg *ca_set_arg;
70         struct ldlm_lock       *ca_lock;
71 };
72
73 /* LDLM state */
74
75 static struct ldlm_state *ldlm_state;
76
77 inline cfs_time_t round_timeout(cfs_time_t timeout)
78 {
79         return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
80 }
81
82 /* timeout for initial callback (AST) reply (bz10399) */
83 static inline unsigned int ldlm_get_rq_timeout(void)
84 {
85         /* Non-AT value */
86         unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
87
88         return timeout < 1 ? 1 : timeout;
89 }
90
91 #define ELT_STOPPED   0
92 #define ELT_READY     1
93 #define ELT_TERMINATE 2
94
95 struct ldlm_bl_pool {
96         cfs_spinlock_t          blp_lock;
97
98         /*
99          * blp_prio_list is used for callbacks that should be handled
100          * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
101          * see bug 13843
102          */
103         cfs_list_t              blp_prio_list;
104
105         /*
106          * blp_list is used for all other callbacks which are likely
107          * to take longer to process.
108          */
109         cfs_list_t              blp_list;
110
111         cfs_waitq_t             blp_waitq;
112         cfs_completion_t        blp_comp;
113         cfs_atomic_t            blp_num_threads;
114         cfs_atomic_t            blp_busy_threads;
115         int                     blp_min_threads;
116         int                     blp_max_threads;
117 };
118
119 struct ldlm_bl_work_item {
120         cfs_list_t              blwi_entry;
121         struct ldlm_namespace  *blwi_ns;
122         struct ldlm_lock_desc   blwi_ld;
123         struct ldlm_lock       *blwi_lock;
124         cfs_list_t              blwi_head;
125         int                     blwi_count;
126         cfs_completion_t        blwi_comp;
127         int                     blwi_mode;
128         int                     blwi_mem_pressure;
129 };
130
131 #if defined(HAVE_SERVER_SUPPORT) && defined(__KERNEL__)
132
133 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
134 static cfs_spinlock_t waiting_locks_spinlock;   /* BH lock (timer) */
135 static cfs_list_t waiting_locks_list;
136 static cfs_timer_t waiting_locks_timer;
137
138 static struct expired_lock_thread {
139         cfs_waitq_t             elt_waitq;
140         int                     elt_state;
141         int                     elt_dump;
142         cfs_list_t              elt_expired_locks;
143 } expired_lock_thread;
144
145 static inline int have_expired_locks(void)
146 {
147         int need_to_run;
148
149         ENTRY;
150         cfs_spin_lock_bh(&waiting_locks_spinlock);
151         need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
152         cfs_spin_unlock_bh(&waiting_locks_spinlock);
153
154         RETURN(need_to_run);
155 }
156
157 static int expired_lock_main(void *arg)
158 {
159         cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
160         struct l_wait_info lwi = { 0 };
161         int do_dump;
162
163         ENTRY;
164         cfs_daemonize("ldlm_elt");
165
166         expired_lock_thread.elt_state = ELT_READY;
167         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
168
169         while (1) {
170                 l_wait_event(expired_lock_thread.elt_waitq,
171                              have_expired_locks() ||
172                              expired_lock_thread.elt_state == ELT_TERMINATE,
173                              &lwi);
174
175                 cfs_spin_lock_bh(&waiting_locks_spinlock);
176                 if (expired_lock_thread.elt_dump) {
177                         struct libcfs_debug_msg_data msgdata = {
178                                 .msg_file = __FILE__,
179                                 .msg_fn = "waiting_locks_callback",
180                                 .msg_line = expired_lock_thread.elt_dump };
181                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
182
183                         /* from waiting_locks_callback, but not in timer */
184                         libcfs_debug_dumplog();
185                         libcfs_run_lbug_upcall(&msgdata);
186
187                         cfs_spin_lock_bh(&waiting_locks_spinlock);
188                         expired_lock_thread.elt_dump = 0;
189                 }
190
191                 do_dump = 0;
192
193                 while (!cfs_list_empty(expired)) {
194                         struct obd_export *export;
195                         struct ldlm_lock *lock;
196
197                         lock = cfs_list_entry(expired->next, struct ldlm_lock,
198                                           l_pending_chain);
199                         if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
200                             (void *)lock >= LP_POISON) {
201                                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
202                                 CERROR("free lock on elt list %p\n", lock);
203                                 LBUG();
204                         }
205                         cfs_list_del_init(&lock->l_pending_chain);
206                         if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
207                             (void *)lock->l_export >= LP_POISON) {
208                                 CERROR("lock with free export on elt list %p\n",
209                                        lock->l_export);
210                                 lock->l_export = NULL;
211                                 LDLM_ERROR(lock, "free export");
212                                 /* release extra ref grabbed by
213                                  * ldlm_add_waiting_lock() or
214                                  * ldlm_failed_ast() */
215                                 LDLM_LOCK_RELEASE(lock);
216                                 continue;
217                         }
218
219                         if (lock->l_destroyed) {
220                                 /* release the lock refcount where
221                                  * waiting_locks_callback() founds */
222                                 LDLM_LOCK_RELEASE(lock);
223                                 continue;
224                         }
225                         export = class_export_lock_get(lock->l_export, lock);
226                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
227
228                         do_dump++;
229                         class_fail_export(export);
230                         class_export_lock_put(export, lock);
231
232                         /* release extra ref grabbed by ldlm_add_waiting_lock()
233                          * or ldlm_failed_ast() */
234                         LDLM_LOCK_RELEASE(lock);
235
236                         cfs_spin_lock_bh(&waiting_locks_spinlock);
237                 }
238                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
239
240                 if (do_dump && obd_dump_on_eviction) {
241                         CERROR("dump the log upon eviction\n");
242                         libcfs_debug_dumplog();
243                 }
244
245                 if (expired_lock_thread.elt_state == ELT_TERMINATE)
246                         break;
247         }
248
249         expired_lock_thread.elt_state = ELT_STOPPED;
250         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
251         RETURN(0);
252 }
253
254 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
255 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds);
256
257 /**
258  * Check if there is a request in the export request list
259  * which prevents the lock canceling.
260  */
261 static int ldlm_lock_busy(struct ldlm_lock *lock)
262 {
263         struct ptlrpc_request *req;
264         int match = 0;
265         ENTRY;
266
267         if (lock->l_export == NULL)
268                 return 0;
269
270         cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
271         cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
272                                 rq_exp_list) {
273                 if (req->rq_ops->hpreq_lock_match) {
274                         match = req->rq_ops->hpreq_lock_match(req, lock);
275                         if (match)
276                                 break;
277                 }
278         }
279         cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
280         RETURN(match);
281 }
282
283 /* This is called from within a timer interrupt and cannot schedule */
284 static void waiting_locks_callback(unsigned long unused)
285 {
286         struct ldlm_lock        *lock;
287         int                     need_dump = 0;
288
289         cfs_spin_lock_bh(&waiting_locks_spinlock);
290         while (!cfs_list_empty(&waiting_locks_list)) {
291                 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
292                                       l_pending_chain);
293                 if (cfs_time_after(lock->l_callback_timeout,
294                                    cfs_time_current()) ||
295                     (lock->l_req_mode == LCK_GROUP))
296                         break;
297
298                 if (ptlrpc_check_suspend()) {
299                         /* there is a case when we talk to one mds, holding
300                          * lock from another mds. this way we easily can get
301                          * here, if second mds is being recovered. so, we
302                          * suspend timeouts. bug 6019 */
303
304                         LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
305                                    lock->l_export->exp_client_uuid.uuid,
306                                    lock->l_export->exp_connection->c_remote_uuid.uuid,
307                                    libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
308
309                         cfs_list_del_init(&lock->l_pending_chain);
310                         if (lock->l_destroyed) {
311                                 /* relay the lock refcount decrease to
312                                  * expired lock thread */
313                                 cfs_list_add(&lock->l_pending_chain,
314                                         &expired_lock_thread.elt_expired_locks);
315                         } else {
316                                 __ldlm_add_waiting_lock(lock,
317                                                 ldlm_get_enq_timeout(lock));
318                         }
319                         continue;
320                 }
321
322                 /* if timeout overlaps the activation time of suspended timeouts
323                  * then extend it to give a chance for client to reconnect */
324                 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
325                                                  cfs_time_seconds(obd_timeout)/2),
326                                     ptlrpc_suspend_wakeup_time())) {
327                         LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
328                                    lock->l_export->exp_client_uuid.uuid,
329                                    lock->l_export->exp_connection->c_remote_uuid.uuid,
330                                    libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
331
332                         cfs_list_del_init(&lock->l_pending_chain);
333                         if (lock->l_destroyed) {
334                                 /* relay the lock refcount decrease to
335                                  * expired lock thread */
336                                 cfs_list_add(&lock->l_pending_chain,
337                                         &expired_lock_thread.elt_expired_locks);
338                         } else {
339                                 __ldlm_add_waiting_lock(lock,
340                                                 ldlm_get_enq_timeout(lock));
341                         }
342                         continue;
343                 }
344
345                 /* Check if we need to prolong timeout */
346                 if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
347                     ldlm_lock_busy(lock)) {
348                         int cont = 1;
349
350                         if (lock->l_pending_chain.next == &waiting_locks_list)
351                                 cont = 0;
352
353                         LDLM_LOCK_GET(lock);
354
355                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
356                         LDLM_DEBUG(lock, "prolong the busy lock");
357                         ldlm_refresh_waiting_lock(lock,
358                                                   ldlm_get_enq_timeout(lock));
359                         cfs_spin_lock_bh(&waiting_locks_spinlock);
360
361                         if (!cont) {
362                                 LDLM_LOCK_RELEASE(lock);
363                                 break;
364                         }
365
366                         LDLM_LOCK_RELEASE(lock);
367                         continue;
368                 }
369                 ldlm_lock_to_ns(lock)->ns_timeouts++;
370                 LDLM_ERROR(lock, "lock callback timer expired after %lds: "
371                            "evicting client at %s ",
372                            cfs_time_current_sec()- lock->l_last_activity,
373                            libcfs_nid2str(
374                                    lock->l_export->exp_connection->c_peer.nid));
375
376                 /* no needs to take an extra ref on the lock since it was in
377                  * the waiting_locks_list and ldlm_add_waiting_lock()
378                  * already grabbed a ref */
379                 cfs_list_del(&lock->l_pending_chain);
380                 cfs_list_add(&lock->l_pending_chain,
381                              &expired_lock_thread.elt_expired_locks);
382                 need_dump = 1;
383         }
384
385         if (!cfs_list_empty(&expired_lock_thread.elt_expired_locks)) {
386                 if (obd_dump_on_timeout && need_dump)
387                         expired_lock_thread.elt_dump = __LINE__;
388
389                 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
390         }
391
392         /*
393          * Make sure the timer will fire again if we have any locks
394          * left.
395          */
396         if (!cfs_list_empty(&waiting_locks_list)) {
397                 cfs_time_t timeout_rounded;
398                 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
399                                       l_pending_chain);
400                 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
401                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
402         }
403         cfs_spin_unlock_bh(&waiting_locks_spinlock);
404 }
405
406 /*
407  * Indicate that we're waiting for a client to call us back cancelling a given
408  * lock.  We add it to the pending-callback chain, and schedule the lock-timeout
409  * timer to fire appropriately.  (We round up to the next second, to avoid
410  * floods of timer firings during periods of high lock contention and traffic).
411  * As done by ldlm_add_waiting_lock(), the caller must grab a lock reference
412  * if it has been added to the waiting list (1 is returned).
413  *
414  * Called with the namespace lock held.
415  */
416 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
417 {
418         cfs_time_t timeout;
419         cfs_time_t timeout_rounded;
420
421         if (!cfs_list_empty(&lock->l_pending_chain))
422                 return 0;
423
424         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
425             OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
426                 seconds = 1;
427
428         timeout = cfs_time_shift(seconds);
429         if (likely(cfs_time_after(timeout, lock->l_callback_timeout)))
430                 lock->l_callback_timeout = timeout;
431
432         timeout_rounded = round_timeout(lock->l_callback_timeout);
433
434         if (cfs_time_before(timeout_rounded,
435                             cfs_timer_deadline(&waiting_locks_timer)) ||
436             !cfs_timer_is_armed(&waiting_locks_timer)) {
437                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
438         }
439         /* if the new lock has a shorter timeout than something earlier on
440            the list, we'll wait the longer amount of time; no big deal. */
441         /* FIFO */
442         cfs_list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
443         return 1;
444 }
445
446 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
447 {
448         int ret;
449         int timeout = ldlm_get_enq_timeout(lock);
450
451         /* NB: must be called with hold of lock_res_and_lock() */
452         LASSERT(lock->l_res_locked);
453         lock->l_waited = 1;
454
455         LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
456
457         cfs_spin_lock_bh(&waiting_locks_spinlock);
458         if (lock->l_destroyed) {
459                 static cfs_time_t next;
460                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
461                 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
462                 if (cfs_time_after(cfs_time_current(), next)) {
463                         next = cfs_time_shift(14400);
464                         libcfs_debug_dumpstack(NULL);
465                 }
466                 return 0;
467         }
468
469         ret = __ldlm_add_waiting_lock(lock, timeout);
470         if (ret) {
471                 /* grab ref on the lock if it has been added to the
472                  * waiting list */
473                 LDLM_LOCK_GET(lock);
474         }
475         cfs_spin_unlock_bh(&waiting_locks_spinlock);
476
477         if (ret) {
478                 cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
479                 if (cfs_list_empty(&lock->l_exp_list))
480                         cfs_list_add(&lock->l_exp_list,
481                                      &lock->l_export->exp_bl_list);
482                 cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
483         }
484
485         LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
486                    ret == 0 ? "not re-" : "", timeout,
487                    AT_OFF ? "off" : "on");
488         return ret;
489 }
490
491 /*
492  * Remove a lock from the pending list, likely because it had its cancellation
493  * callback arrive without incident.  This adjusts the lock-timeout timer if
494  * needed.  Returns 0 if the lock wasn't pending after all, 1 if it was.
495  * As done by ldlm_del_waiting_lock(), the caller must release the lock
496  * reference when the lock is removed from any list (1 is returned).
497  *
498  * Called with namespace lock held.
499  */
500 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
501 {
502         cfs_list_t *list_next;
503
504         if (cfs_list_empty(&lock->l_pending_chain))
505                 return 0;
506
507         list_next = lock->l_pending_chain.next;
508         if (lock->l_pending_chain.prev == &waiting_locks_list) {
509                 /* Removing the head of the list, adjust timer. */
510                 if (list_next == &waiting_locks_list) {
511                         /* No more, just cancel. */
512                         cfs_timer_disarm(&waiting_locks_timer);
513                 } else {
514                         struct ldlm_lock *next;
515                         next = cfs_list_entry(list_next, struct ldlm_lock,
516                                               l_pending_chain);
517                         cfs_timer_arm(&waiting_locks_timer,
518                                       round_timeout(next->l_callback_timeout));
519                 }
520         }
521         cfs_list_del_init(&lock->l_pending_chain);
522
523         return 1;
524 }
525
526 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
527 {
528         int ret;
529
530         if (lock->l_export == NULL) {
531                 /* We don't have a "waiting locks list" on clients. */
532                 CDEBUG(D_DLMTRACE, "Client lock %p : no-op\n", lock);
533                 return 0;
534         }
535
536         cfs_spin_lock_bh(&waiting_locks_spinlock);
537         ret = __ldlm_del_waiting_lock(lock);
538         cfs_spin_unlock_bh(&waiting_locks_spinlock);
539
540         /* remove the lock out of export blocking list */
541         cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
542         cfs_list_del_init(&lock->l_exp_list);
543         cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
544
545         if (ret) {
546                 /* release lock ref if it has indeed been removed
547                  * from a list */
548                 LDLM_LOCK_RELEASE(lock);
549         }
550
551         LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
552         return ret;
553 }
554
555 /*
556  * Prolong the lock
557  *
558  * Called with namespace lock held.
559  */
560 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
561 {
562         if (lock->l_export == NULL) {
563                 /* We don't have a "waiting locks list" on clients. */
564                 LDLM_DEBUG(lock, "client lock: no-op");
565                 return 0;
566         }
567
568         cfs_spin_lock_bh(&waiting_locks_spinlock);
569
570         if (cfs_list_empty(&lock->l_pending_chain)) {
571                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
572                 LDLM_DEBUG(lock, "wasn't waiting");
573                 return 0;
574         }
575
576         /* we remove/add the lock to the waiting list, so no needs to
577          * release/take a lock reference */
578         __ldlm_del_waiting_lock(lock);
579         __ldlm_add_waiting_lock(lock, timeout);
580         cfs_spin_unlock_bh(&waiting_locks_spinlock);
581
582         LDLM_DEBUG(lock, "refreshed");
583         return 1;
584 }
585
586 #else /* !HAVE_SERVER_SUPPORT ||  !__KERNEL__ */
587
588 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
589 {
590         RETURN(0);
591 }
592
593 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
594 {
595         RETURN(0);
596 }
597
598 # ifdef HAVE_SERVER_SUPPORT
599 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
600 {
601         LASSERT(lock->l_res_locked);
602         LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
603         RETURN(1);
604 }
605
606 # endif
607 #endif /* HAVE_SERVER_SUPPORT && __KERNEL__ */
608
609 #ifdef HAVE_SERVER_SUPPORT
610
611 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
612                             const char *ast_type)
613 {
614         LCONSOLE_ERROR_MSG(0x138, "%s: A client on nid %s was evicted due "
615                            "to a lock %s callback time out: rc %d\n",
616                            lock->l_export->exp_obd->obd_name,
617                            obd_export_nid2str(lock->l_export), ast_type, rc);
618
619         if (obd_dump_on_timeout)
620                 libcfs_debug_dumplog();
621 #ifdef __KERNEL__
622         cfs_spin_lock_bh(&waiting_locks_spinlock);
623         if (__ldlm_del_waiting_lock(lock) == 0)
624                 /* the lock was not in any list, grab an extra ref before adding
625                  * the lock to the expired list */
626                 LDLM_LOCK_GET(lock);
627         cfs_list_add(&lock->l_pending_chain,
628                      &expired_lock_thread.elt_expired_locks);
629         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
630         cfs_spin_unlock_bh(&waiting_locks_spinlock);
631 #else
632         class_fail_export(lock->l_export);
633 #endif
634 }
635
636 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
637                                  struct ptlrpc_request *req, int rc,
638                                  const char *ast_type)
639 {
640         lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
641
642         if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
643                 LASSERT(lock->l_export);
644                 if (lock->l_export->exp_libclient) {
645                         LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
646                                    " timeout, just cancelling lock", ast_type,
647                                    libcfs_nid2str(peer.nid));
648                         ldlm_lock_cancel(lock);
649                         rc = -ERESTART;
650                 } else if (lock->l_flags & LDLM_FL_CANCEL) {
651                         LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
652                                    "cancel was received (AST reply lost?)",
653                                    ast_type, libcfs_nid2str(peer.nid));
654                         ldlm_lock_cancel(lock);
655                         rc = -ERESTART;
656                 } else {
657                         ldlm_del_waiting_lock(lock);
658                         ldlm_failed_ast(lock, rc, ast_type);
659                 }
660         } else if (rc) {
661                 if (rc == -EINVAL) {
662                         struct ldlm_resource *res = lock->l_resource;
663                         LDLM_DEBUG(lock, "client (nid %s) returned %d"
664                                " from %s AST - normal race",
665                                libcfs_nid2str(peer.nid),
666                                req->rq_repmsg ?
667                                lustre_msg_get_status(req->rq_repmsg) : -1,
668                                ast_type);
669                         if (res) {
670                                 /* update lvbo to return proper attributes.
671                                  * see bug 23174 */
672                                 ldlm_resource_getref(res);
673                                 ldlm_res_lvbo_update(res, NULL, 1);
674                                 ldlm_resource_putref(res);
675                         }
676
677                 } else {
678                         LDLM_ERROR(lock, "client (nid %s) returned %d "
679                                    "from %s AST", libcfs_nid2str(peer.nid),
680                                    (req->rq_repmsg != NULL) ?
681                                    lustre_msg_get_status(req->rq_repmsg) : 0,
682                                    ast_type);
683                 }
684                 ldlm_lock_cancel(lock);
685                 /* Server-side AST functions are called from ldlm_reprocess_all,
686                  * which needs to be told to please restart its reprocessing. */
687                 rc = -ERESTART;
688         }
689
690         return rc;
691 }
692
693 static int ldlm_cb_interpret(const struct lu_env *env,
694                              struct ptlrpc_request *req, void *data, int rc)
695 {
696         struct ldlm_cb_async_args *ca   = data;
697         struct ldlm_lock          *lock = ca->ca_lock;
698         struct ldlm_cb_set_arg    *arg  = ca->ca_set_arg;
699         ENTRY;
700
701         LASSERT(lock != NULL);
702
703         switch (arg->type) {
704         case LDLM_GL_CALLBACK:
705                 /* Update the LVB from disk if the AST failed
706                  * (this is a legal race)
707                  *
708                  * - Glimpse callback of local lock just returns
709                  *   -ELDLM_NO_LOCK_DATA.
710                  * - Glimpse callback of remote lock might return
711                  *   -ELDLM_NO_LOCK_DATA when inode is cleared. LU-274
712                  */
713                 if (rc == -ELDLM_NO_LOCK_DATA) {
714                         LDLM_DEBUG(lock, "lost race - client has a lock but no "
715                                    "inode");
716                         ldlm_res_lvbo_update(lock->l_resource, NULL, 1);
717                 } else if (rc != 0) {
718                         rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
719                 } else {
720                         rc = ldlm_res_lvbo_update(lock->l_resource, req, 1);
721                 }
722                 break;
723         case LDLM_BL_CALLBACK:
724                 if (rc != 0)
725                         rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
726                 break;
727         case LDLM_CP_CALLBACK:
728                 if (rc != 0)
729                         rc = ldlm_handle_ast_error(lock, req, rc, "completion");
730                 break;
731         default:
732                 LDLM_ERROR(lock, "invalid opcode for lock callback %d",
733                            arg->type);
734                 LBUG();
735         }
736
737         /* release extra reference taken in ldlm_ast_fini() */
738         LDLM_LOCK_RELEASE(lock);
739
740         if (rc == -ERESTART)
741                 cfs_atomic_inc(&arg->restart);
742
743         RETURN(0);
744 }
745
746 static inline int ldlm_ast_fini(struct ptlrpc_request *req,
747                                 struct ldlm_cb_set_arg *arg,
748                                 struct ldlm_lock *lock,
749                                 int instant_cancel)
750 {
751         int rc = 0;
752         ENTRY;
753
754         if (unlikely(instant_cancel)) {
755                 rc = ptl_send_rpc(req, 1);
756                 ptlrpc_req_finished(req);
757                 if (rc == 0)
758                         cfs_atomic_inc(&arg->restart);
759         } else {
760                 LDLM_LOCK_GET(lock);
761                 ptlrpc_set_add_req(arg->set, req);
762         }
763
764         RETURN(rc);
765 }
766
767 /**
768  * Check if there are requests in the export request list which prevent
769  * the lock canceling and make these requests high priority ones.
770  */
771 static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
772 {
773         struct ptlrpc_request *req;
774         ENTRY;
775
776         if (lock->l_export == NULL) {
777                 LDLM_DEBUG(lock, "client lock: no-op");
778                 RETURN_EXIT;
779         }
780
781         cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
782         cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
783                                 rq_exp_list) {
784                 /* Do not process requests that were not yet added to there
785                  * incoming queue or were already removed from there for
786                  * processing */
787                 if (!req->rq_hp && !cfs_list_empty(&req->rq_list) &&
788                     req->rq_ops->hpreq_lock_match &&
789                     req->rq_ops->hpreq_lock_match(req, lock))
790                         ptlrpc_hpreq_reorder(req);
791         }
792         cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
793         EXIT;
794 }
795
796 /*
797  * ->l_blocking_ast() method for server-side locks. This is invoked when newly
798  * enqueued server lock conflicts with given one.
799  *
800  * Sends blocking ast rpc to the client owning that lock; arms timeout timer
801  * to wait for client response.
802  */
803 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
804                              struct ldlm_lock_desc *desc,
805                              void *data, int flag)
806 {
807         struct ldlm_cb_async_args *ca;
808         struct ldlm_cb_set_arg *arg = data;
809         struct ldlm_request    *body;
810         struct ptlrpc_request  *req;
811         int                     instant_cancel = 0;
812         int                     rc = 0;
813         ENTRY;
814
815         if (flag == LDLM_CB_CANCELING)
816                 /* Don't need to do anything here. */
817                 RETURN(0);
818
819         LASSERT(lock);
820         LASSERT(data != NULL);
821         if (lock->l_export->exp_obd->obd_recovering != 0)
822                 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
823
824         ldlm_lock_reorder_req(lock);
825
826         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
827                                         &RQF_LDLM_BL_CALLBACK,
828                                         LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
829         if (req == NULL)
830                 RETURN(-ENOMEM);
831
832         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
833         ca = ptlrpc_req_async_args(req);
834         ca->ca_set_arg = arg;
835         ca->ca_lock = lock;
836
837         req->rq_interpret_reply = ldlm_cb_interpret;
838         req->rq_no_resend = 1;
839
840         lock_res_and_lock(lock);
841         if (lock->l_granted_mode != lock->l_req_mode) {
842                 /* this blocking AST will be communicated as part of the
843                  * completion AST instead */
844                 unlock_res_and_lock(lock);
845
846                 ptlrpc_req_finished(req);
847                 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
848                 RETURN(0);
849         }
850
851         if (lock->l_destroyed) {
852                 /* What's the point? */
853                 unlock_res_and_lock(lock);
854                 ptlrpc_req_finished(req);
855                 RETURN(0);
856         }
857
858         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
859                 instant_cancel = 1;
860
861         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
862         body->lock_handle[0] = lock->l_remote_handle;
863         body->lock_desc = *desc;
864         body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
865
866         LDLM_DEBUG(lock, "server preparing blocking AST");
867
868         ptlrpc_request_set_replen(req);
869         if (instant_cancel) {
870                 unlock_res_and_lock(lock);
871                 ldlm_lock_cancel(lock);
872         } else {
873                 LASSERT(lock->l_granted_mode == lock->l_req_mode);
874                 ldlm_add_waiting_lock(lock);
875                 unlock_res_and_lock(lock);
876         }
877
878         req->rq_send_state = LUSTRE_IMP_FULL;
879         /* ptlrpc_request_alloc_pack already set timeout */
880         if (AT_OFF)
881                 req->rq_timeout = ldlm_get_rq_timeout();
882
883         if (lock->l_export && lock->l_export->exp_nid_stats &&
884             lock->l_export->exp_nid_stats->nid_ldlm_stats)
885                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
886                                      LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
887
888         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
889
890         RETURN(rc);
891 }
892
893 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
894 {
895         struct ldlm_cb_set_arg *arg = data;
896         struct ldlm_request    *body;
897         struct ptlrpc_request  *req;
898         struct ldlm_cb_async_args *ca;
899         long                    total_enqueue_wait;
900         int                     instant_cancel = 0;
901         int                     rc = 0;
902         ENTRY;
903
904         LASSERT(lock != NULL);
905         LASSERT(data != NULL);
906
907         total_enqueue_wait = cfs_time_sub(cfs_time_current_sec(),
908                                           lock->l_last_activity);
909
910         req = ptlrpc_request_alloc(lock->l_export->exp_imp_reverse,
911                                     &RQF_LDLM_CP_CALLBACK);
912         if (req == NULL)
913                 RETURN(-ENOMEM);
914
915         /* server namespace, doesn't need lock */
916         if (lock->l_resource->lr_lvb_len) {
917                  req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT,
918                                       lock->l_resource->lr_lvb_len);
919         }
920
921         rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
922         if (rc) {
923                 ptlrpc_request_free(req);
924                 RETURN(rc);
925         }
926
927         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
928         ca = ptlrpc_req_async_args(req);
929         ca->ca_set_arg = arg;
930         ca->ca_lock = lock;
931
932         req->rq_interpret_reply = ldlm_cb_interpret;
933         req->rq_no_resend = 1;
934         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
935
936         body->lock_handle[0] = lock->l_remote_handle;
937         body->lock_flags = flags;
938         ldlm_lock2desc(lock, &body->lock_desc);
939         if (lock->l_resource->lr_lvb_len) {
940                 void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
941
942                 lock_res(lock->l_resource);
943                 memcpy(lvb, lock->l_resource->lr_lvb_data,
944                        lock->l_resource->lr_lvb_len);
945                 unlock_res(lock->l_resource);
946         }
947
948         LDLM_DEBUG(lock, "server preparing completion AST (after %lds wait)",
949                    total_enqueue_wait);
950
951         /* Server-side enqueue wait time estimate, used in
952             __ldlm_add_waiting_lock to set future enqueue timers */
953         if (total_enqueue_wait < ldlm_get_enq_timeout(lock))
954                 at_measured(ldlm_lock_to_ns_at(lock),
955                             total_enqueue_wait);
956         else
957                 /* bz18618. Don't add lock enqueue time we spend waiting for a
958                    previous callback to fail. Locks waiting legitimately will
959                    get extended by ldlm_refresh_waiting_lock regardless of the
960                    estimate, so it's okay to underestimate here. */
961                 LDLM_DEBUG(lock, "lock completed after %lus; estimate was %ds. "
962                        "It is likely that a previous callback timed out.",
963                        total_enqueue_wait,
964                        at_get(ldlm_lock_to_ns_at(lock)));
965
966         ptlrpc_request_set_replen(req);
967
968         req->rq_send_state = LUSTRE_IMP_FULL;
969         /* ptlrpc_request_pack already set timeout */
970         if (AT_OFF)
971                 req->rq_timeout = ldlm_get_rq_timeout();
972
973         /* We only send real blocking ASTs after the lock is granted */
974         lock_res_and_lock(lock);
975         if (lock->l_flags & LDLM_FL_AST_SENT) {
976                 body->lock_flags |= LDLM_FL_AST_SENT;
977                 /* copy ast flags like LDLM_FL_DISCARD_DATA */
978                 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
979
980                 /* We might get here prior to ldlm_handle_enqueue setting
981                  * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
982                  * into waiting list, but this is safe and similar code in
983                  * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
984                  * that would not only cancel the lock, but will also remove
985                  * it from waiting list */
986                 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
987                         unlock_res_and_lock(lock);
988                         ldlm_lock_cancel(lock);
989                         instant_cancel = 1;
990                         lock_res_and_lock(lock);
991                 } else {
992                         /* start the lock-timeout clock */
993                         ldlm_add_waiting_lock(lock);
994                 }
995         }
996         unlock_res_and_lock(lock);
997
998         if (lock->l_export && lock->l_export->exp_nid_stats &&
999             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1000                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1001                                      LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
1002
1003         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
1004
1005         RETURN(rc);
1006 }
1007
1008 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
1009 {
1010         struct ldlm_cb_set_arg          *arg = data;
1011         struct ldlm_request             *body;
1012         struct ptlrpc_request           *req;
1013         struct ldlm_cb_async_args       *ca;
1014         int                              rc;
1015         ENTRY;
1016
1017         LASSERT(lock != NULL);
1018
1019         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
1020                                         &RQF_LDLM_GL_CALLBACK,
1021                                         LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK);
1022
1023         if (req == NULL)
1024                 RETURN(-ENOMEM);
1025
1026         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1027         body->lock_handle[0] = lock->l_remote_handle;
1028         ldlm_lock2desc(lock, &body->lock_desc);
1029
1030         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
1031         ca = ptlrpc_req_async_args(req);
1032         ca->ca_set_arg = arg;
1033         ca->ca_lock = lock;
1034
1035         /* server namespace, doesn't need lock */
1036         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
1037                              lock->l_resource->lr_lvb_len);
1038         ptlrpc_request_set_replen(req);
1039
1040         req->rq_send_state = LUSTRE_IMP_FULL;
1041         /* ptlrpc_request_alloc_pack already set timeout */
1042         if (AT_OFF)
1043                 req->rq_timeout = ldlm_get_rq_timeout();
1044
1045         req->rq_interpret_reply = ldlm_cb_interpret;
1046
1047         if (lock->l_export && lock->l_export->exp_nid_stats &&
1048             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1049                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1050                                      LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
1051
1052         rc = ldlm_ast_fini(req, arg, lock, 0);
1053
1054         RETURN(rc);
1055 }
1056
1057 int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list)
1058 {
1059         int     rc;
1060         ENTRY;
1061
1062         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), gl_work_list,
1063                                LDLM_WORK_GL_AST);
1064         if (rc == -ERESTART)
1065                 ldlm_reprocess_all(res);
1066
1067         RETURN(rc);
1068 }
1069
1070 static void ldlm_svc_get_eopc(const struct ldlm_request *dlm_req,
1071                        struct lprocfs_stats *srv_stats)
1072 {
1073         int lock_type = 0, op = 0;
1074
1075         lock_type = dlm_req->lock_desc.l_resource.lr_type;
1076
1077         switch (lock_type) {
1078         case LDLM_PLAIN:
1079                 op = PTLRPC_LAST_CNTR + LDLM_PLAIN_ENQUEUE;
1080                 break;
1081         case LDLM_EXTENT:
1082                 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT)
1083                         op = PTLRPC_LAST_CNTR + LDLM_GLIMPSE_ENQUEUE;
1084                 else
1085                         op = PTLRPC_LAST_CNTR + LDLM_EXTENT_ENQUEUE;
1086                 break;
1087         case LDLM_FLOCK:
1088                 op = PTLRPC_LAST_CNTR + LDLM_FLOCK_ENQUEUE;
1089                 break;
1090         case LDLM_IBITS:
1091                 op = PTLRPC_LAST_CNTR + LDLM_IBITS_ENQUEUE;
1092                 break;
1093         default:
1094                 op = 0;
1095                 break;
1096         }
1097
1098         if (op)
1099                 lprocfs_counter_incr(srv_stats, op);
1100
1101         return;
1102 }
1103
1104 /*
1105  * Main server-side entry point into LDLM. This is called by ptlrpc service
1106  * threads to carry out client lock enqueueing requests.
1107  */
1108 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
1109                          struct ptlrpc_request *req,
1110                          const struct ldlm_request *dlm_req,
1111                          const struct ldlm_callback_suite *cbs)
1112 {
1113         struct ldlm_reply *dlm_rep;
1114         __u32 flags;
1115         ldlm_error_t err = ELDLM_OK;
1116         struct ldlm_lock *lock = NULL;
1117         void *cookie = NULL;
1118         int rc = 0;
1119         ENTRY;
1120
1121         LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
1122
1123         ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
1124         flags = dlm_req->lock_flags;
1125
1126         LASSERT(req->rq_export);
1127
1128         if (ptlrpc_req2svc(req)->srv_stats != NULL)
1129                 ldlm_svc_get_eopc(dlm_req, ptlrpc_req2svc(req)->srv_stats);
1130
1131         if (req->rq_export && req->rq_export->exp_nid_stats &&
1132             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1133                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1134                                      LDLM_ENQUEUE - LDLM_FIRST_OPC);
1135
1136         if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
1137                      dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
1138                 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
1139                           dlm_req->lock_desc.l_resource.lr_type);
1140                 GOTO(out, rc = -EFAULT);
1141         }
1142
1143         if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
1144                      dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
1145                      dlm_req->lock_desc.l_req_mode &
1146                      (dlm_req->lock_desc.l_req_mode-1))) {
1147                 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
1148                           dlm_req->lock_desc.l_req_mode);
1149                 GOTO(out, rc = -EFAULT);
1150         }
1151
1152         if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
1153                 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1154                              LDLM_PLAIN)) {
1155                         DEBUG_REQ(D_ERROR, req,
1156                                   "PLAIN lock request from IBITS client?");
1157                         GOTO(out, rc = -EPROTO);
1158                 }
1159         } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1160                             LDLM_IBITS)) {
1161                 DEBUG_REQ(D_ERROR, req,
1162                           "IBITS lock request from unaware client?");
1163                 GOTO(out, rc = -EPROTO);
1164         }
1165
1166 #if 0
1167         /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
1168            against server's _CONNECT_SUPPORTED flags? (I don't want to use
1169            ibits for mgc/mgs) */
1170
1171         /* INODEBITS_INTEROP: Perform conversion from plain lock to
1172          * inodebits lock if client does not support them. */
1173         if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
1174             (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
1175                 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
1176                 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
1177                         MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
1178                 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
1179                         dlm_req->lock_desc.l_req_mode = LCK_CR;
1180         }
1181 #endif
1182
1183         if (unlikely(flags & LDLM_FL_REPLAY)) {
1184                 /* Find an existing lock in the per-export lock hash */
1185                 lock = cfs_hash_lookup(req->rq_export->exp_lock_hash,
1186                                        (void *)&dlm_req->lock_handle[0]);
1187                 if (lock != NULL) {
1188                         DEBUG_REQ(D_DLMTRACE, req, "found existing lock cookie "
1189                                   LPX64, lock->l_handle.h_cookie);
1190                         GOTO(existing_lock, rc = 0);
1191                 }
1192         }
1193
1194         /* The lock's callback data might be set in the policy function */
1195         lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
1196                                 dlm_req->lock_desc.l_resource.lr_type,
1197                                 dlm_req->lock_desc.l_req_mode,
1198                                 cbs, NULL, 0);
1199
1200         if (!lock)
1201                 GOTO(out, rc = -ENOMEM);
1202
1203         lock->l_last_activity = cfs_time_current_sec();
1204         lock->l_remote_handle = dlm_req->lock_handle[0];
1205         LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
1206
1207         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
1208         /* Don't enqueue a lock onto the export if it is been disonnected
1209          * due to eviction (bug 3822) or server umount (bug 24324).
1210          * Cancel it now instead. */
1211         if (req->rq_export->exp_disconnected) {
1212                 LDLM_ERROR(lock, "lock on disconnected export %p",
1213                            req->rq_export);
1214                 GOTO(out, rc = -ENOTCONN);
1215         }
1216
1217         lock->l_export = class_export_lock_get(req->rq_export, lock);
1218         if (lock->l_export->exp_lock_hash)
1219                 cfs_hash_add(lock->l_export->exp_lock_hash,
1220                              &lock->l_remote_handle,
1221                              &lock->l_exp_hash);
1222
1223 existing_lock:
1224
1225         if (flags & LDLM_FL_HAS_INTENT) {
1226                 /* In this case, the reply buffer is allocated deep in
1227                  * local_lock_enqueue by the policy function. */
1228                 cookie = req;
1229         } else {
1230                 /* based on the assumption that lvb size never changes during
1231                  * resource life time otherwise it need resource->lr_lock's
1232                  * protection */
1233                 if (lock->l_resource->lr_lvb_len) {
1234                         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
1235                                              RCL_SERVER,
1236                                              lock->l_resource->lr_lvb_len);
1237                 }
1238
1239                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
1240                         GOTO(out, rc = -ENOMEM);
1241
1242                 rc = req_capsule_server_pack(&req->rq_pill);
1243                 if (rc)
1244                         GOTO(out, rc);
1245         }
1246
1247         if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
1248                 ldlm_convert_policy_to_local(req->rq_export,
1249                                           dlm_req->lock_desc.l_resource.lr_type,
1250                                           &dlm_req->lock_desc.l_policy_data,
1251                                           &lock->l_policy_data);
1252         if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
1253                 lock->l_req_extent = lock->l_policy_data.l_extent;
1254
1255         err = ldlm_lock_enqueue(ns, &lock, cookie, (int *)&flags);
1256         if (err)
1257                 GOTO(out, err);
1258
1259         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1260         dlm_rep->lock_flags = flags;
1261
1262         ldlm_lock2desc(lock, &dlm_rep->lock_desc);
1263         ldlm_lock2handle(lock, &dlm_rep->lock_handle);
1264
1265         /* We never send a blocking AST until the lock is granted, but
1266          * we can tell it right now */
1267         lock_res_and_lock(lock);
1268
1269         /* Now take into account flags to be inherited from original lock
1270            request both in reply to client and in our own lock flags. */
1271         dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1272         lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1273
1274         /* Don't move a pending lock onto the export if it has already been
1275          * disconnected due to eviction (bug 5683) or server umount (bug 24324).
1276          * Cancel it now instead. */
1277         if (unlikely(req->rq_export->exp_disconnected ||
1278                      OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
1279                 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
1280                 rc = -ENOTCONN;
1281         } else if (lock->l_flags & LDLM_FL_AST_SENT) {
1282                 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
1283                 if (lock->l_granted_mode == lock->l_req_mode) {
1284                         /*
1285                          * Only cancel lock if it was granted, because it would
1286                          * be destroyed immediately and would never be granted
1287                          * in the future, causing timeouts on client.  Not
1288                          * granted lock will be cancelled immediately after
1289                          * sending completion AST.
1290                          */
1291                         if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
1292                                 unlock_res_and_lock(lock);
1293                                 ldlm_lock_cancel(lock);
1294                                 lock_res_and_lock(lock);
1295                         } else
1296                                 ldlm_add_waiting_lock(lock);
1297                 }
1298         }
1299         /* Make sure we never ever grant usual metadata locks to liblustre
1300            clients */
1301         if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
1302             dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
1303              req->rq_export->exp_libclient) {
1304                 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
1305                              !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
1306                         CERROR("Granting sync lock to libclient. "
1307                                "req fl %d, rep fl %d, lock fl "LPX64"\n",
1308                                dlm_req->lock_flags, dlm_rep->lock_flags,
1309                                lock->l_flags);
1310                         LDLM_ERROR(lock, "sync lock");
1311                         if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
1312                                 struct ldlm_intent *it;
1313
1314                                 it = req_capsule_client_get(&req->rq_pill,
1315                                                             &RMF_LDLM_INTENT);
1316                                 if (it != NULL) {
1317                                         CERROR("This is intent %s ("LPU64")\n",
1318                                                ldlm_it2str(it->opc), it->opc);
1319                                 }
1320                         }
1321                 }
1322         }
1323
1324         unlock_res_and_lock(lock);
1325
1326         EXIT;
1327  out:
1328         req->rq_status = rc ?: err; /* return either error - bug 11190 */
1329         if (!req->rq_packed_final) {
1330                 err = lustre_pack_reply(req, 1, NULL, NULL);
1331                 if (rc == 0)
1332                         rc = err;
1333         }
1334
1335         /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1336          * ldlm_reprocess_all.  If this moves, revisit that code. -phil */
1337         if (lock) {
1338                 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
1339                            "(err=%d, rc=%d)", err, rc);
1340
1341                 if (rc == 0) {
1342                         if (lock->l_resource->lr_lvb_len > 0) {
1343                                 /* MDT path won't handle lr_lvb_data, so
1344                                  * lock/unlock better be contained in the
1345                                  * if block */
1346                                 void *lvb;
1347
1348                                 lvb = req_capsule_server_get(&req->rq_pill,
1349                                                              &RMF_DLM_LVB);
1350                                 LASSERTF(lvb != NULL, "req %p, lock %p\n",
1351                                          req, lock);
1352                                 lock_res(lock->l_resource);
1353                                 memcpy(lvb, lock->l_resource->lr_lvb_data,
1354                                        lock->l_resource->lr_lvb_len);
1355                                 unlock_res(lock->l_resource);
1356                         }
1357                 } else {
1358                         lock_res_and_lock(lock);
1359                         ldlm_resource_unlink_lock(lock);
1360                         ldlm_lock_destroy_nolock(lock);
1361                         unlock_res_and_lock(lock);
1362                 }
1363
1364                 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1365                         ldlm_reprocess_all(lock->l_resource);
1366
1367                 LDLM_LOCK_RELEASE(lock);
1368         }
1369
1370         LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1371                           lock, rc);
1372
1373         return rc;
1374 }
1375
1376 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1377                         ldlm_completion_callback completion_callback,
1378                         ldlm_blocking_callback blocking_callback,
1379                         ldlm_glimpse_callback glimpse_callback)
1380 {
1381         struct ldlm_request *dlm_req;
1382         struct ldlm_callback_suite cbs = {
1383                 .lcs_completion = completion_callback,
1384                 .lcs_blocking   = blocking_callback,
1385                 .lcs_glimpse    = glimpse_callback
1386         };
1387         int rc;
1388
1389         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1390         if (dlm_req != NULL) {
1391                 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1392                                           req, dlm_req, &cbs);
1393         } else {
1394                 rc = -EFAULT;
1395         }
1396         return rc;
1397 }
1398
1399 int ldlm_handle_convert0(struct ptlrpc_request *req,
1400                          const struct ldlm_request *dlm_req)
1401 {
1402         struct ldlm_reply *dlm_rep;
1403         struct ldlm_lock *lock;
1404         int rc;
1405         ENTRY;
1406
1407         if (req->rq_export && req->rq_export->exp_nid_stats &&
1408             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1409                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1410                                      LDLM_CONVERT - LDLM_FIRST_OPC);
1411
1412         rc = req_capsule_server_pack(&req->rq_pill);
1413         if (rc)
1414                 RETURN(rc);
1415
1416         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1417         dlm_rep->lock_flags = dlm_req->lock_flags;
1418
1419         lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1420         if (!lock) {
1421                 req->rq_status = EINVAL;
1422         } else {
1423                 void *res = NULL;
1424
1425                 LDLM_DEBUG(lock, "server-side convert handler START");
1426
1427                 lock->l_last_activity = cfs_time_current_sec();
1428                 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1429                                         &dlm_rep->lock_flags);
1430                 if (res) {
1431                         if (ldlm_del_waiting_lock(lock))
1432                                 LDLM_DEBUG(lock, "converted waiting lock");
1433                         req->rq_status = 0;
1434                 } else {
1435                         req->rq_status = EDEADLOCK;
1436                 }
1437         }
1438
1439         if (lock) {
1440                 if (!req->rq_status)
1441                         ldlm_reprocess_all(lock->l_resource);
1442                 LDLM_DEBUG(lock, "server-side convert handler END");
1443                 LDLM_LOCK_PUT(lock);
1444         } else
1445                 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1446
1447         RETURN(0);
1448 }
1449
1450 int ldlm_handle_convert(struct ptlrpc_request *req)
1451 {
1452         int rc;
1453         struct ldlm_request *dlm_req;
1454
1455         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1456         if (dlm_req != NULL) {
1457                 rc = ldlm_handle_convert0(req, dlm_req);
1458         } else {
1459                 CERROR ("Can't unpack dlm_req\n");
1460                 rc = -EFAULT;
1461         }
1462         return rc;
1463 }
1464
1465 /* Cancel all the locks whos handles are packed into ldlm_request */
1466 int ldlm_request_cancel(struct ptlrpc_request *req,
1467                         const struct ldlm_request *dlm_req, int first)
1468 {
1469         struct ldlm_resource *res, *pres = NULL;
1470         struct ldlm_lock *lock;
1471         int i, count, done = 0;
1472         ENTRY;
1473
1474         count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1475         if (first >= count)
1476                 RETURN(0);
1477
1478         /* There is no lock on the server at the replay time,
1479          * skip lock cancelling to make replay tests to pass. */
1480         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1481                 RETURN(0);
1482
1483         LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
1484                           "starting at %d", count, first);
1485
1486         for (i = first; i < count; i++) {
1487                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1488                 if (!lock) {
1489                         LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1490                                           "lock (cookie "LPU64")",
1491                                           dlm_req->lock_handle[i].cookie);
1492                         continue;
1493                 }
1494
1495                 res = lock->l_resource;
1496                 done++;
1497
1498                 if (res != pres) {
1499                         if (pres != NULL) {
1500                                 ldlm_reprocess_all(pres);
1501                                 LDLM_RESOURCE_DELREF(pres);
1502                                 ldlm_resource_putref(pres);
1503                         }
1504                         if (res != NULL) {
1505                                 ldlm_resource_getref(res);
1506                                 LDLM_RESOURCE_ADDREF(res);
1507                                 ldlm_res_lvbo_update(res, NULL, 1);
1508                         }
1509                         pres = res;
1510                 }
1511                 ldlm_lock_cancel(lock);
1512                 LDLM_LOCK_PUT(lock);
1513         }
1514         if (pres != NULL) {
1515                 ldlm_reprocess_all(pres);
1516                 LDLM_RESOURCE_DELREF(pres);
1517                 ldlm_resource_putref(pres);
1518         }
1519         LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1520         RETURN(done);
1521 }
1522
1523 int ldlm_handle_cancel(struct ptlrpc_request *req)
1524 {
1525         struct ldlm_request *dlm_req;
1526         int rc;
1527         ENTRY;
1528
1529         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1530         if (dlm_req == NULL) {
1531                 CDEBUG(D_INFO, "bad request buffer for cancel\n");
1532                 RETURN(-EFAULT);
1533         }
1534
1535         if (req->rq_export && req->rq_export->exp_nid_stats &&
1536             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1537                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1538                                      LDLM_CANCEL - LDLM_FIRST_OPC);
1539
1540         rc = req_capsule_server_pack(&req->rq_pill);
1541         if (rc)
1542                 RETURN(rc);
1543
1544         if (!ldlm_request_cancel(req, dlm_req, 0))
1545                 req->rq_status = ESTALE;
1546
1547         RETURN(ptlrpc_reply(req));
1548 }
1549 #endif /* HAVE_SERVER_SUPPORT */
1550
1551 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1552                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1553 {
1554         int do_ast;
1555         ENTRY;
1556
1557         LDLM_DEBUG(lock, "client blocking AST callback handler");
1558
1559         lock_res_and_lock(lock);
1560         lock->l_flags |= LDLM_FL_CBPENDING;
1561
1562         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1563                 lock->l_flags |= LDLM_FL_CANCEL;
1564
1565         do_ast = (!lock->l_readers && !lock->l_writers);
1566         unlock_res_and_lock(lock);
1567
1568         if (do_ast) {
1569                 CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
1570                        lock, lock->l_blocking_ast);
1571                 if (lock->l_blocking_ast != NULL)
1572                         lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1573                                              LDLM_CB_BLOCKING);
1574         } else {
1575                 CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
1576                        lock);
1577         }
1578
1579         LDLM_DEBUG(lock, "client blocking callback handler END");
1580         LDLM_LOCK_RELEASE(lock);
1581         EXIT;
1582 }
1583
1584 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1585                                     struct ldlm_namespace *ns,
1586                                     struct ldlm_request *dlm_req,
1587                                     struct ldlm_lock *lock)
1588 {
1589         int lvb_len;
1590         CFS_LIST_HEAD(ast_list);
1591         ENTRY;
1592
1593         LDLM_DEBUG(lock, "client completion callback handler START");
1594
1595         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
1596                 int to = cfs_time_seconds(1);
1597                 while (to > 0) {
1598                         cfs_schedule_timeout_and_set_state(
1599                                 CFS_TASK_INTERRUPTIBLE, to);
1600                         if (lock->l_granted_mode == lock->l_req_mode ||
1601                             lock->l_destroyed)
1602                                 break;
1603                 }
1604         }
1605
1606         lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
1607         if (lvb_len > 0) {
1608                 if (lock->l_lvb_len > 0) {
1609                         /* for extent lock, lvb contains ost_lvb{}. */
1610                         LASSERT(lock->l_lvb_data != NULL);
1611                         LASSERTF(lock->l_lvb_len == lvb_len,
1612                                 "preallocated %d, actual %d.\n",
1613                                 lock->l_lvb_len, lvb_len);
1614                 } else { /* for layout lock, lvb has variable length */
1615                         void *lvb_data;
1616
1617                         OBD_ALLOC(lvb_data, lvb_len);
1618                         if (lvb_data == NULL)
1619                                 LDLM_ERROR(lock, "no memory.\n");
1620
1621                         lock_res_and_lock(lock);
1622                         if (lvb_data == NULL) {
1623                                 lock->l_flags |= LDLM_FL_FAILED;
1624                         } else {
1625                                 LASSERT(lock->l_lvb_data == NULL);
1626                                 lock->l_lvb_data = lvb_data;
1627                                 lock->l_lvb_len = lvb_len;
1628                         }
1629                         unlock_res_and_lock(lock);
1630                 }
1631         }
1632
1633         lock_res_and_lock(lock);
1634         if (lock->l_destroyed ||
1635             lock->l_granted_mode == lock->l_req_mode) {
1636                 /* bug 11300: the lock has already been granted */
1637                 unlock_res_and_lock(lock);
1638                 LDLM_DEBUG(lock, "Double grant race happened");
1639                 LDLM_LOCK_RELEASE(lock);
1640                 EXIT;
1641                 return;
1642         }
1643
1644         /* If we receive the completion AST before the actual enqueue returned,
1645          * then we might need to switch lock modes, resources, or extents. */
1646         if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1647                 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1648                 LDLM_DEBUG(lock, "completion AST, new lock mode");
1649         }
1650
1651         if (lock->l_resource->lr_type != LDLM_PLAIN) {
1652                 ldlm_convert_policy_to_local(req->rq_export,
1653                                           dlm_req->lock_desc.l_resource.lr_type,
1654                                           &dlm_req->lock_desc.l_policy_data,
1655                                           &lock->l_policy_data);
1656                 LDLM_DEBUG(lock, "completion AST, new policy data");
1657         }
1658
1659         ldlm_resource_unlink_lock(lock);
1660         if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1661                    &lock->l_resource->lr_name,
1662                    sizeof(lock->l_resource->lr_name)) != 0) {
1663                 unlock_res_and_lock(lock);
1664                 if (ldlm_lock_change_resource(ns, lock,
1665                                 &dlm_req->lock_desc.l_resource.lr_name) != 0) {
1666                         LDLM_ERROR(lock, "Failed to allocate resource");
1667                         LDLM_LOCK_RELEASE(lock);
1668                         EXIT;
1669                         return;
1670                 }
1671                 LDLM_DEBUG(lock, "completion AST, new resource");
1672                 CERROR("change resource!\n");
1673                 lock_res_and_lock(lock);
1674         }
1675
1676         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1677                 /* BL_AST locks are not needed in lru.
1678                  * let ldlm_cancel_lru() be fast. */
1679                 ldlm_lock_remove_from_lru(lock);
1680                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
1681                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1682         }
1683
1684         if (lock->l_lvb_len) {
1685                 if (req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
1686                                          RCL_CLIENT) < lock->l_lvb_len) {
1687                         LDLM_ERROR(lock, "completion AST did not contain "
1688                                    "expected LVB!");
1689                 } else {
1690                         void *lvb = req_capsule_client_get(&req->rq_pill,
1691                                                            &RMF_DLM_LVB);
1692                         memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1693                 }
1694         }
1695
1696         ldlm_grant_lock(lock, &ast_list);
1697         unlock_res_and_lock(lock);
1698
1699         LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1700
1701         /* Let Enqueue to call osc_lock_upcall() and initialize
1702          * l_ast_data */
1703         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
1704
1705         ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
1706
1707         LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1708                           lock);
1709         LDLM_LOCK_RELEASE(lock);
1710         EXIT;
1711 }
1712
1713 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1714                                     struct ldlm_namespace *ns,
1715                                     struct ldlm_request *dlm_req,
1716                                     struct ldlm_lock *lock)
1717 {
1718         int rc = -ENOSYS;
1719         ENTRY;
1720
1721         LDLM_DEBUG(lock, "client glimpse AST callback handler");
1722
1723         if (lock->l_glimpse_ast != NULL)
1724                 rc = lock->l_glimpse_ast(lock, req);
1725
1726         if (req->rq_repmsg != NULL) {
1727                 ptlrpc_reply(req);
1728         } else {
1729                 req->rq_status = rc;
1730                 ptlrpc_error(req);
1731         }
1732
1733         lock_res_and_lock(lock);
1734         if (lock->l_granted_mode == LCK_PW &&
1735             !lock->l_readers && !lock->l_writers &&
1736             cfs_time_after(cfs_time_current(),
1737                            cfs_time_add(lock->l_last_used,
1738                                         cfs_time_seconds(10)))) {
1739                 unlock_res_and_lock(lock);
1740                 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
1741                         ldlm_handle_bl_callback(ns, NULL, lock);
1742
1743                 EXIT;
1744                 return;
1745         }
1746         unlock_res_and_lock(lock);
1747         LDLM_LOCK_RELEASE(lock);
1748         EXIT;
1749 }
1750
1751 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1752 {
1753         if (req->rq_no_reply)
1754                 return 0;
1755
1756         req->rq_status = rc;
1757         if (!req->rq_packed_final) {
1758                 rc = lustre_pack_reply(req, 1, NULL, NULL);
1759                 if (rc)
1760                         return rc;
1761         }
1762         return ptlrpc_reply(req);
1763 }
1764
1765 #ifdef __KERNEL__
1766 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode)
1767 {
1768         struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1769         ENTRY;
1770
1771         cfs_spin_lock(&blp->blp_lock);
1772         if (blwi->blwi_lock && blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
1773                 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
1774                 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
1775         } else {
1776                 /* other blocking callbacks are added to the regular list */
1777                 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1778         }
1779         cfs_spin_unlock(&blp->blp_lock);
1780
1781         cfs_waitq_signal(&blp->blp_waitq);
1782
1783         /* can not use blwi->blwi_mode as blwi could be already freed in
1784            LDLM_ASYNC mode */
1785         if (mode == LDLM_SYNC)
1786                 cfs_wait_for_completion(&blwi->blwi_comp);
1787
1788         RETURN(0);
1789 }
1790
1791 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
1792                              struct ldlm_namespace *ns,
1793                              struct ldlm_lock_desc *ld,
1794                              cfs_list_t *cancels, int count,
1795                              struct ldlm_lock *lock,
1796                              int mode)
1797 {
1798         cfs_init_completion(&blwi->blwi_comp);
1799         CFS_INIT_LIST_HEAD(&blwi->blwi_head);
1800
1801         if (cfs_memory_pressure_get())
1802                 blwi->blwi_mem_pressure = 1;
1803
1804         blwi->blwi_ns = ns;
1805         blwi->blwi_mode = mode;
1806         if (ld != NULL)
1807                 blwi->blwi_ld = *ld;
1808         if (count) {
1809                 cfs_list_add(&blwi->blwi_head, cancels);
1810                 cfs_list_del_init(cancels);
1811                 blwi->blwi_count = count;
1812         } else {
1813                 blwi->blwi_lock = lock;
1814         }
1815 }
1816
1817 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
1818                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
1819                              cfs_list_t *cancels, int count, int mode)
1820 {
1821         ENTRY;
1822
1823         if (cancels && count == 0)
1824                 RETURN(0);
1825
1826         if (mode == LDLM_SYNC) {
1827                 /* if it is synchronous call do minimum mem alloc, as it could
1828                  * be triggered from kernel shrinker
1829                  */
1830                 struct ldlm_bl_work_item blwi;
1831                 memset(&blwi, 0, sizeof(blwi));
1832                 init_blwi(&blwi, ns, ld, cancels, count, lock, LDLM_SYNC);
1833                 RETURN(__ldlm_bl_to_thread(&blwi, LDLM_SYNC));
1834         } else {
1835                 struct ldlm_bl_work_item *blwi;
1836                 OBD_ALLOC(blwi, sizeof(*blwi));
1837                 if (blwi == NULL)
1838                         RETURN(-ENOMEM);
1839                 init_blwi(blwi, ns, ld, cancels, count, lock, LDLM_ASYNC);
1840
1841                 RETURN(__ldlm_bl_to_thread(blwi, LDLM_ASYNC));
1842         }
1843 }
1844
1845 #endif
1846
1847 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1848                            struct ldlm_lock *lock)
1849 {
1850 #ifdef __KERNEL__
1851         RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LDLM_ASYNC));
1852 #else
1853         RETURN(-ENOSYS);
1854 #endif
1855 }
1856
1857 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1858                            cfs_list_t *cancels, int count, int mode)
1859 {
1860 #ifdef __KERNEL__
1861         RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count, mode));
1862 #else
1863         RETURN(-ENOSYS);
1864 #endif
1865 }
1866
1867 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
1868 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
1869 {
1870         struct obd_device *obd = req->rq_export->exp_obd;
1871         char *key;
1872         void *val;
1873         int keylen, vallen;
1874         int rc = -ENOSYS;
1875         ENTRY;
1876
1877         DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
1878
1879         req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
1880
1881         key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1882         if (key == NULL) {
1883                 DEBUG_REQ(D_IOCTL, req, "no set_info key");
1884                 RETURN(-EFAULT);
1885         }
1886         keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
1887                                       RCL_CLIENT);
1888         val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1889         if (val == NULL) {
1890                 DEBUG_REQ(D_IOCTL, req, "no set_info val");
1891                 RETURN(-EFAULT);
1892         }
1893         vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
1894                                       RCL_CLIENT);
1895
1896         /* We are responsible for swabbing contents of val */
1897
1898         if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
1899                 /* Pass it on to mdc (the "export" in this case) */
1900                 rc = obd_set_info_async(req->rq_svc_thread->t_env,
1901                                         req->rq_export,
1902                                         sizeof(KEY_HSM_COPYTOOL_SEND),
1903                                         KEY_HSM_COPYTOOL_SEND,
1904                                         vallen, val, NULL);
1905         else
1906                 DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
1907
1908         return rc;
1909 }
1910
1911 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
1912                                         const char *msg, int rc,
1913                                         struct lustre_handle *handle)
1914 {
1915         DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
1916                   "%s: [nid %s] [rc %d] [lock "LPX64"]",
1917                   msg, libcfs_id2str(req->rq_peer), rc,
1918                   handle ? handle->cookie : 0);
1919         if (req->rq_no_reply)
1920                 CWARN("No reply was sent, maybe cause bug 21636.\n");
1921         else if (rc)
1922                 CWARN("Send reply failed, maybe cause bug 21636.\n");
1923 }
1924
1925 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
1926 static int ldlm_callback_handler(struct ptlrpc_request *req)
1927 {
1928         struct ldlm_namespace *ns;
1929         struct ldlm_request *dlm_req;
1930         struct ldlm_lock *lock;
1931         int rc;
1932         ENTRY;
1933
1934         /* Requests arrive in sender's byte order.  The ptlrpc service
1935          * handler has already checked and, if necessary, byte-swapped the
1936          * incoming request message body, but I am responsible for the
1937          * message buffers. */
1938
1939         /* do nothing for sec context finalize */
1940         if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
1941                 RETURN(0);
1942
1943         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
1944
1945         if (req->rq_export == NULL) {
1946                 rc = ldlm_callback_reply(req, -ENOTCONN);
1947                 ldlm_callback_errmsg(req, "Operate on unconnected server",
1948                                      rc, NULL);
1949                 RETURN(0);
1950         }
1951
1952         LASSERT(req->rq_export != NULL);
1953         LASSERT(req->rq_export->exp_obd != NULL);
1954
1955         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1956         case LDLM_BL_CALLBACK:
1957                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK))
1958                         RETURN(0);
1959                 break;
1960         case LDLM_CP_CALLBACK:
1961                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK))
1962                         RETURN(0);
1963                 break;
1964         case LDLM_GL_CALLBACK:
1965                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK))
1966                         RETURN(0);
1967                 break;
1968         case LDLM_SET_INFO:
1969                 rc = ldlm_handle_setinfo(req);
1970                 ldlm_callback_reply(req, rc);
1971                 RETURN(0);
1972         case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
1973                 CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
1974                 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
1975                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
1976                         RETURN(0);
1977                 rc = llog_origin_handle_cancel(req);
1978                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
1979                         RETURN(0);
1980                 ldlm_callback_reply(req, rc);
1981                 RETURN(0);
1982         case OBD_QC_CALLBACK:
1983                 req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
1984                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
1985                         RETURN(0);
1986                 rc = target_handle_qc_callback(req);
1987                 ldlm_callback_reply(req, rc);
1988                 RETURN(0);
1989         case QUOTA_DQACQ:
1990         case QUOTA_DQREL:
1991                 /* reply in handler */
1992                 req_capsule_set(&req->rq_pill, &RQF_MDS_QUOTA_DQACQ);
1993                 rc = target_handle_dqacq_callback(req);
1994                 RETURN(0);
1995         case LLOG_ORIGIN_HANDLE_CREATE:
1996                 req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
1997                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1998                         RETURN(0);
1999                 rc = llog_origin_handle_open(req);
2000                 ldlm_callback_reply(req, rc);
2001                 RETURN(0);
2002         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2003                 req_capsule_set(&req->rq_pill,
2004                                 &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2005                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2006                         RETURN(0);
2007                 rc = llog_origin_handle_next_block(req);
2008                 ldlm_callback_reply(req, rc);
2009                 RETURN(0);
2010         case LLOG_ORIGIN_HANDLE_READ_HEADER:
2011                 req_capsule_set(&req->rq_pill,
2012                                 &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2013                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2014                         RETURN(0);
2015                 rc = llog_origin_handle_read_header(req);
2016                 ldlm_callback_reply(req, rc);
2017                 RETURN(0);
2018         case LLOG_ORIGIN_HANDLE_CLOSE:
2019                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2020                         RETURN(0);
2021                 rc = llog_origin_handle_close(req);
2022                 ldlm_callback_reply(req, rc);
2023                 RETURN(0);
2024         default:
2025                 CERROR("unknown opcode %u\n",
2026                        lustre_msg_get_opc(req->rq_reqmsg));
2027                 ldlm_callback_reply(req, -EPROTO);
2028                 RETURN(0);
2029         }
2030
2031         ns = req->rq_export->exp_obd->obd_namespace;
2032         LASSERT(ns != NULL);
2033
2034         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2035
2036         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2037         if (dlm_req == NULL) {
2038                 rc = ldlm_callback_reply(req, -EPROTO);
2039                 ldlm_callback_errmsg(req, "Operate without parameter", rc,
2040                                      NULL);
2041                 RETURN(0);
2042         }
2043
2044         /* Force a known safe race, send a cancel to the server for a lock
2045          * which the server has already started a blocking callback on. */
2046         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
2047             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2048                 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0]);
2049                 if (rc < 0)
2050                         CERROR("ldlm_cli_cancel: %d\n", rc);
2051         }
2052
2053         lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
2054         if (!lock) {
2055                 CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
2056                        "disappeared\n", dlm_req->lock_handle[0].cookie);
2057                 rc = ldlm_callback_reply(req, -EINVAL);
2058                 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
2059                                      &dlm_req->lock_handle[0]);
2060                 RETURN(0);
2061         }
2062
2063         if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
2064             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
2065                 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
2066
2067         /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
2068         lock_res_and_lock(lock);
2069         lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
2070         if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2071                 /* If somebody cancels lock and cache is already dropped,
2072                  * or lock is failed before cp_ast received on client,
2073                  * we can tell the server we have no lock. Otherwise, we
2074                  * should send cancel after dropping the cache. */
2075                 if (((lock->l_flags & LDLM_FL_CANCELING) &&
2076                     (lock->l_flags & LDLM_FL_BL_DONE)) ||
2077                     (lock->l_flags & LDLM_FL_FAILED)) {
2078                         LDLM_DEBUG(lock, "callback on lock "
2079                                    LPX64" - lock disappeared\n",
2080                                    dlm_req->lock_handle[0].cookie);
2081                         unlock_res_and_lock(lock);
2082                         LDLM_LOCK_RELEASE(lock);
2083                         rc = ldlm_callback_reply(req, -EINVAL);
2084                         ldlm_callback_errmsg(req, "Operate on stale lock", rc,
2085                                              &dlm_req->lock_handle[0]);
2086                         RETURN(0);
2087                 }
2088                 /* BL_AST locks are not needed in lru.
2089                  * let ldlm_cancel_lru() be fast. */
2090                 ldlm_lock_remove_from_lru(lock);
2091                 lock->l_flags |= LDLM_FL_BL_AST;
2092         }
2093         unlock_res_and_lock(lock);
2094
2095         /* We want the ost thread to get this reply so that it can respond
2096          * to ost requests (write cache writeback) that might be triggered
2097          * in the callback.
2098          *
2099          * But we'd also like to be able to indicate in the reply that we're
2100          * cancelling right now, because it's unused, or have an intent result
2101          * in the reply, so we might have to push the responsibility for sending
2102          * the reply down into the AST handlers, alas. */
2103
2104         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2105         case LDLM_BL_CALLBACK:
2106                 CDEBUG(D_INODE, "blocking ast\n");
2107                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
2108                 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
2109                         rc = ldlm_callback_reply(req, 0);
2110                         if (req->rq_no_reply || rc)
2111                                 ldlm_callback_errmsg(req, "Normal process", rc,
2112                                                      &dlm_req->lock_handle[0]);
2113                 }
2114                 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
2115                         ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
2116                 break;
2117         case LDLM_CP_CALLBACK:
2118                 CDEBUG(D_INODE, "completion ast\n");
2119                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
2120                 ldlm_callback_reply(req, 0);
2121                 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
2122                 break;
2123         case LDLM_GL_CALLBACK:
2124                 CDEBUG(D_INODE, "glimpse ast\n");
2125                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
2126                 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
2127                 break;
2128         default:
2129                 LBUG();                         /* checked above */
2130         }
2131
2132         RETURN(0);
2133 }
2134
2135 #ifdef HAVE_SERVER_SUPPORT
2136 static int ldlm_cancel_handler(struct ptlrpc_request *req)
2137 {
2138         int rc;
2139         ENTRY;
2140
2141         /* Requests arrive in sender's byte order.  The ptlrpc service
2142          * handler has already checked and, if necessary, byte-swapped the
2143          * incoming request message body, but I am responsible for the
2144          * message buffers. */
2145
2146         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2147
2148         if (req->rq_export == NULL) {
2149                 struct ldlm_request *dlm_req;
2150
2151                 CERROR("%s from %s arrived at %lu with bad export cookie "
2152                        LPU64"\n",
2153                        ll_opcode2str(lustre_msg_get_opc(req->rq_reqmsg)),
2154                        libcfs_nid2str(req->rq_peer.nid),
2155                        req->rq_arrival_time.tv_sec,
2156                        lustre_msg_get_handle(req->rq_reqmsg)->cookie);
2157
2158                 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_CANCEL) {
2159                         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2160                         dlm_req = req_capsule_client_get(&req->rq_pill,
2161                                                          &RMF_DLM_REQ);
2162                         if (dlm_req != NULL)
2163                                 ldlm_lock_dump_handle(D_ERROR,
2164                                                       &dlm_req->lock_handle[0]);
2165                 }
2166                 ldlm_callback_reply(req, -ENOTCONN);
2167                 RETURN(0);
2168         }
2169
2170         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2171
2172         /* XXX FIXME move this back to mds/handler.c, bug 249 */
2173         case LDLM_CANCEL:
2174                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2175                 CDEBUG(D_INODE, "cancel\n");
2176                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL))
2177                         RETURN(0);
2178                 rc = ldlm_handle_cancel(req);
2179                 if (rc)
2180                         break;
2181                 RETURN(0);
2182         case OBD_LOG_CANCEL:
2183                 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
2184                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
2185                         RETURN(0);
2186                 rc = llog_origin_handle_cancel(req);
2187                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
2188                         RETURN(0);
2189                 ldlm_callback_reply(req, rc);
2190                 RETURN(0);
2191         default:
2192                 CERROR("invalid opcode %d\n",
2193                        lustre_msg_get_opc(req->rq_reqmsg));
2194                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2195                 ldlm_callback_reply(req, -EINVAL);
2196         }
2197
2198         RETURN(0);
2199 }
2200
2201 static int ldlm_cancel_hpreq_lock_match(struct ptlrpc_request *req,
2202                                         struct ldlm_lock *lock)
2203 {
2204         struct ldlm_request *dlm_req;
2205         struct lustre_handle lockh;
2206         int rc = 0;
2207         int i;
2208         ENTRY;
2209
2210         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2211         if (dlm_req == NULL)
2212                 RETURN(0);
2213
2214         ldlm_lock2handle(lock, &lockh);
2215         for (i = 0; i < dlm_req->lock_count; i++) {
2216                 if (lustre_handle_equal(&dlm_req->lock_handle[i],
2217                                         &lockh)) {
2218                         DEBUG_REQ(D_RPCTRACE, req,
2219                                   "Prio raised by lock "LPX64".", lockh.cookie);
2220
2221                         rc = 1;
2222                         break;
2223                 }
2224         }
2225
2226         RETURN(rc);
2227
2228 }
2229
2230 static int ldlm_cancel_hpreq_check(struct ptlrpc_request *req)
2231 {
2232         struct ldlm_request *dlm_req;
2233         int rc = 0;
2234         int i;
2235         ENTRY;
2236
2237         /* no prolong in recovery */
2238         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
2239                 RETURN(0);
2240
2241         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2242         if (dlm_req == NULL)
2243                 RETURN(-EFAULT);
2244
2245         for (i = 0; i < dlm_req->lock_count; i++) {
2246                 struct ldlm_lock *lock;
2247
2248                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
2249                 if (lock == NULL)
2250                         continue;
2251
2252                 rc = !!(lock->l_flags & LDLM_FL_AST_SENT);
2253                 if (rc)
2254                         LDLM_DEBUG(lock, "hpreq cancel lock");
2255                 LDLM_LOCK_PUT(lock);
2256
2257                 if (rc)
2258                         break;
2259         }
2260
2261         RETURN(rc);
2262 }
2263
2264 static struct ptlrpc_hpreq_ops ldlm_cancel_hpreq_ops = {
2265         .hpreq_lock_match = ldlm_cancel_hpreq_lock_match,
2266         .hpreq_check      = ldlm_cancel_hpreq_check,
2267         .hpreq_fini       = NULL,
2268 };
2269
2270 static int ldlm_hpreq_handler(struct ptlrpc_request *req)
2271 {
2272         ENTRY;
2273
2274         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2275
2276         if (req->rq_export == NULL)
2277                 RETURN(0);
2278
2279         if (LDLM_CANCEL == lustre_msg_get_opc(req->rq_reqmsg)) {
2280                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2281                 req->rq_ops = &ldlm_cancel_hpreq_ops;
2282         }
2283         RETURN(0);
2284 }
2285
2286 int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
2287                         cfs_hlist_node_t *hnode, void *data)
2288
2289 {
2290         cfs_list_t         *rpc_list = data;
2291         struct ldlm_lock   *lock = cfs_hash_object(hs, hnode);
2292
2293         lock_res_and_lock(lock);
2294
2295         if (lock->l_req_mode != lock->l_granted_mode) {
2296                 unlock_res_and_lock(lock);
2297                 return 0;
2298         }
2299
2300         LASSERT(lock->l_resource);
2301         if (lock->l_resource->lr_type != LDLM_IBITS &&
2302             lock->l_resource->lr_type != LDLM_PLAIN) {
2303                 unlock_res_and_lock(lock);
2304                 return 0;
2305         }
2306
2307         if (lock->l_flags & LDLM_FL_AST_SENT) {
2308                 unlock_res_and_lock(lock);
2309                 return 0;
2310         }
2311
2312         LASSERT(lock->l_blocking_ast);
2313         LASSERT(!lock->l_blocking_lock);
2314
2315         lock->l_flags |= LDLM_FL_AST_SENT;
2316         if (lock->l_export && lock->l_export->exp_lock_hash) {
2317                 /* NB: it's safe to call cfs_hash_del() even lock isn't
2318                  * in exp_lock_hash. */
2319                 cfs_hash_del(lock->l_export->exp_lock_hash,
2320                              &lock->l_remote_handle, &lock->l_exp_hash);
2321         }
2322
2323         cfs_list_add_tail(&lock->l_rk_ast, rpc_list);
2324         LDLM_LOCK_GET(lock);
2325
2326         unlock_res_and_lock(lock);
2327         return 0;
2328 }
2329
2330 void ldlm_revoke_export_locks(struct obd_export *exp)
2331 {
2332         cfs_list_t  rpc_list;
2333         ENTRY;
2334
2335         CFS_INIT_LIST_HEAD(&rpc_list);
2336         cfs_hash_for_each_empty(exp->exp_lock_hash,
2337                                 ldlm_revoke_lock_cb, &rpc_list);
2338         ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
2339                           LDLM_WORK_REVOKE_AST);
2340
2341         EXIT;
2342 }
2343 #endif /* HAVE_SERVER_SUPPORT */
2344
2345 #ifdef __KERNEL__
2346 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
2347 {
2348         struct ldlm_bl_work_item *blwi = NULL;
2349         static unsigned int num_bl = 0;
2350
2351         cfs_spin_lock(&blp->blp_lock);
2352         /* process a request from the blp_list at least every blp_num_threads */
2353         if (!cfs_list_empty(&blp->blp_list) &&
2354             (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
2355                 blwi = cfs_list_entry(blp->blp_list.next,
2356                                       struct ldlm_bl_work_item, blwi_entry);
2357         else
2358                 if (!cfs_list_empty(&blp->blp_prio_list))
2359                         blwi = cfs_list_entry(blp->blp_prio_list.next,
2360                                               struct ldlm_bl_work_item,
2361                                               blwi_entry);
2362
2363         if (blwi) {
2364                 if (++num_bl >= cfs_atomic_read(&blp->blp_num_threads))
2365                         num_bl = 0;
2366                 cfs_list_del(&blwi->blwi_entry);
2367         }
2368         cfs_spin_unlock(&blp->blp_lock);
2369
2370         return blwi;
2371 }
2372
2373 /* This only contains temporary data until the thread starts */
2374 struct ldlm_bl_thread_data {
2375         char                    bltd_name[CFS_CURPROC_COMM_MAX];
2376         struct ldlm_bl_pool     *bltd_blp;
2377         cfs_completion_t        bltd_comp;
2378         int                     bltd_num;
2379 };
2380
2381 static int ldlm_bl_thread_main(void *arg);
2382
2383 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
2384 {
2385         struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
2386         int rc;
2387
2388         cfs_init_completion(&bltd.bltd_comp);
2389         rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
2390         if (rc < 0) {
2391                 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
2392                        cfs_atomic_read(&blp->blp_num_threads), rc);
2393                 return rc;
2394         }
2395         cfs_wait_for_completion(&bltd.bltd_comp);
2396
2397         return 0;
2398 }
2399
2400 static int ldlm_bl_thread_main(void *arg)
2401 {
2402         struct ldlm_bl_pool *blp;
2403         ENTRY;
2404
2405         {
2406                 struct ldlm_bl_thread_data *bltd = arg;
2407
2408                 blp = bltd->bltd_blp;
2409
2410                 bltd->bltd_num =
2411                         cfs_atomic_inc_return(&blp->blp_num_threads) - 1;
2412                 cfs_atomic_inc(&blp->blp_busy_threads);
2413
2414                 snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
2415                         "ldlm_bl_%02d", bltd->bltd_num);
2416                 cfs_daemonize(bltd->bltd_name);
2417
2418                 cfs_complete(&bltd->bltd_comp);
2419                 /* cannot use bltd after this, it is only on caller's stack */
2420         }
2421
2422         while (1) {
2423                 struct l_wait_info lwi = { 0 };
2424                 struct ldlm_bl_work_item *blwi = NULL;
2425                 int busy;
2426
2427                 blwi = ldlm_bl_get_work(blp);
2428
2429                 if (blwi == NULL) {
2430                         cfs_atomic_dec(&blp->blp_busy_threads);
2431                         l_wait_event_exclusive(blp->blp_waitq,
2432                                          (blwi = ldlm_bl_get_work(blp)) != NULL,
2433                                          &lwi);
2434                         busy = cfs_atomic_inc_return(&blp->blp_busy_threads);
2435                 } else {
2436                         busy = cfs_atomic_read(&blp->blp_busy_threads);
2437                 }
2438
2439                 if (blwi->blwi_ns == NULL)
2440                         /* added by ldlm_cleanup() */
2441                         break;
2442
2443                 /* Not fatal if racy and have a few too many threads */
2444                 if (unlikely(busy < blp->blp_max_threads &&
2445                              busy >= cfs_atomic_read(&blp->blp_num_threads) &&
2446                              !blwi->blwi_mem_pressure))
2447                         /* discard the return value, we tried */
2448                         ldlm_bl_thread_start(blp);
2449
2450                 if (blwi->blwi_mem_pressure)
2451                         cfs_memory_pressure_set();
2452
2453                 if (blwi->blwi_count) {
2454                         int count;
2455                         /* The special case when we cancel locks in lru
2456                          * asynchronously, we pass the list of locks here.
2457                          * Thus locks are marked LDLM_FL_CANCELING, but NOT
2458                          * canceled locally yet. */
2459                         count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
2460                                                            blwi->blwi_count,
2461                                                            LCF_BL_AST);
2462                         ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0);
2463                 } else {
2464                         ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
2465                                                 blwi->blwi_lock);
2466                 }
2467                 if (blwi->blwi_mem_pressure)
2468                         cfs_memory_pressure_clr();
2469
2470                 if (blwi->blwi_mode == LDLM_ASYNC)
2471                         OBD_FREE(blwi, sizeof(*blwi));
2472                 else
2473                         cfs_complete(&blwi->blwi_comp);
2474         }
2475
2476         cfs_atomic_dec(&blp->blp_busy_threads);
2477         cfs_atomic_dec(&blp->blp_num_threads);
2478         cfs_complete(&blp->blp_comp);
2479         RETURN(0);
2480 }
2481
2482 #endif
2483
2484 static int ldlm_setup(void);
2485 static int ldlm_cleanup(void);
2486
2487 int ldlm_get_ref(void)
2488 {
2489         int rc = 0;
2490         ENTRY;
2491         cfs_mutex_lock(&ldlm_ref_mutex);
2492         if (++ldlm_refcount == 1) {
2493                 rc = ldlm_setup();
2494                 if (rc)
2495                         ldlm_refcount--;
2496         }
2497         cfs_mutex_unlock(&ldlm_ref_mutex);
2498
2499         RETURN(rc);
2500 }
2501
2502 void ldlm_put_ref(void)
2503 {
2504         ENTRY;
2505         cfs_mutex_lock(&ldlm_ref_mutex);
2506         if (ldlm_refcount == 1) {
2507                 int rc = ldlm_cleanup();
2508                 if (rc)
2509                         CERROR("ldlm_cleanup failed: %d\n", rc);
2510                 else
2511                         ldlm_refcount--;
2512         } else {
2513                 ldlm_refcount--;
2514         }
2515         cfs_mutex_unlock(&ldlm_ref_mutex);
2516
2517         EXIT;
2518 }
2519
2520 /*
2521  * Export handle<->lock hash operations.
2522  */
2523 static unsigned
2524 ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
2525 {
2526         return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
2527 }
2528
2529 static void *
2530 ldlm_export_lock_key(cfs_hlist_node_t *hnode)
2531 {
2532         struct ldlm_lock *lock;
2533
2534         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2535         return &lock->l_remote_handle;
2536 }
2537
2538 static void
2539 ldlm_export_lock_keycpy(cfs_hlist_node_t *hnode, void *key)
2540 {
2541         struct ldlm_lock     *lock;
2542
2543         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2544         lock->l_remote_handle = *(struct lustre_handle *)key;
2545 }
2546
2547 static int
2548 ldlm_export_lock_keycmp(const void *key, cfs_hlist_node_t *hnode)
2549 {
2550         return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
2551 }
2552
2553 static void *
2554 ldlm_export_lock_object(cfs_hlist_node_t *hnode)
2555 {
2556         return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2557 }
2558
2559 static void
2560 ldlm_export_lock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2561 {
2562         struct ldlm_lock *lock;
2563
2564         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2565         LDLM_LOCK_GET(lock);
2566 }
2567
2568 static void
2569 ldlm_export_lock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2570 {
2571         struct ldlm_lock *lock;
2572
2573         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2574         LDLM_LOCK_RELEASE(lock);
2575 }
2576
2577 static cfs_hash_ops_t ldlm_export_lock_ops = {
2578         .hs_hash        = ldlm_export_lock_hash,
2579         .hs_key         = ldlm_export_lock_key,
2580         .hs_keycmp      = ldlm_export_lock_keycmp,
2581         .hs_keycpy      = ldlm_export_lock_keycpy,
2582         .hs_object      = ldlm_export_lock_object,
2583         .hs_get         = ldlm_export_lock_get,
2584         .hs_put         = ldlm_export_lock_put,
2585         .hs_put_locked  = ldlm_export_lock_put,
2586 };
2587
2588 int ldlm_init_export(struct obd_export *exp)
2589 {
2590         ENTRY;
2591
2592         exp->exp_lock_hash =
2593                 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
2594                                 HASH_EXP_LOCK_CUR_BITS,
2595                                 HASH_EXP_LOCK_MAX_BITS,
2596                                 HASH_EXP_LOCK_BKT_BITS, 0,
2597                                 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
2598                                 &ldlm_export_lock_ops,
2599                                 CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
2600                                 CFS_HASH_NBLK_CHANGE);
2601
2602         if (!exp->exp_lock_hash)
2603                 RETURN(-ENOMEM);
2604
2605         RETURN(0);
2606 }
2607 EXPORT_SYMBOL(ldlm_init_export);
2608
2609 void ldlm_destroy_export(struct obd_export *exp)
2610 {
2611         ENTRY;
2612         cfs_hash_putref(exp->exp_lock_hash);
2613         exp->exp_lock_hash = NULL;
2614
2615         ldlm_destroy_flock_export(exp);
2616         EXIT;
2617 }
2618 EXPORT_SYMBOL(ldlm_destroy_export);
2619
2620 static int ldlm_setup(void)
2621 {
2622         static struct ptlrpc_service_conf       conf;
2623         struct ldlm_bl_pool                     *blp = NULL;
2624         int rc = 0;
2625 #ifdef __KERNEL__
2626         int i;
2627 #endif
2628         ENTRY;
2629
2630         if (ldlm_state != NULL)
2631                 RETURN(-EALREADY);
2632
2633         OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
2634         if (ldlm_state == NULL)
2635                 RETURN(-ENOMEM);
2636
2637 #ifdef LPROCFS
2638         rc = ldlm_proc_setup();
2639         if (rc != 0)
2640                 GOTO(out, rc);
2641 #endif
2642
2643         memset(&conf, 0, sizeof(conf));
2644         conf = (typeof(conf)) {
2645                 .psc_name               = "ldlm_cbd",
2646                 .psc_watchdog_factor    = 2,
2647                 .psc_buf                = {
2648                         .bc_nbufs               = LDLM_NBUFS,
2649                         .bc_buf_size            = LDLM_BUFSIZE,
2650                         .bc_req_max_size        = LDLM_MAXREQSIZE,
2651                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
2652                         .bc_req_portal          = LDLM_CB_REQUEST_PORTAL,
2653                         .bc_rep_portal          = LDLM_CB_REPLY_PORTAL,
2654                 },
2655                 .psc_thr                = {
2656                         .tc_thr_name            = "ldlm_cb",
2657                         .tc_thr_factor          = LDLM_THR_FACTOR,
2658                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
2659                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
2660                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
2661                         .tc_nthrs_user          = ldlm_num_threads,
2662                         .tc_cpu_affinity        = 1,
2663                         .tc_ctx_tags            = LCT_MD_THREAD | LCT_DT_THREAD,
2664                 },
2665                 .psc_cpt                = {
2666                         .cc_pattern             = ldlm_cpts,
2667                 },
2668                 .psc_ops                = {
2669                         .so_req_handler         = ldlm_callback_handler,
2670                 },
2671         };
2672         ldlm_state->ldlm_cb_service = \
2673                         ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
2674         if (IS_ERR(ldlm_state->ldlm_cb_service)) {
2675                 CERROR("failed to start service\n");
2676                 rc = PTR_ERR(ldlm_state->ldlm_cb_service);
2677                 ldlm_state->ldlm_cb_service = NULL;
2678                 GOTO(out, rc);
2679         }
2680
2681 #ifdef HAVE_SERVER_SUPPORT
2682         memset(&conf, 0, sizeof(conf));
2683         conf = (typeof(conf)) {
2684                 .psc_name               = "ldlm_canceld",
2685                 .psc_watchdog_factor    = 6,
2686                 .psc_buf                = {
2687                         .bc_nbufs               = LDLM_NBUFS,
2688                         .bc_buf_size            = LDLM_BUFSIZE,
2689                         .bc_req_max_size        = LDLM_MAXREQSIZE,
2690                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
2691                         .bc_req_portal          = LDLM_CANCEL_REQUEST_PORTAL,
2692                         .bc_rep_portal          = LDLM_CANCEL_REPLY_PORTAL,
2693
2694                 },
2695                 .psc_thr                = {
2696                         .tc_thr_name            = "ldlm_cn",
2697                         .tc_thr_factor          = LDLM_THR_FACTOR,
2698                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
2699                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
2700                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
2701                         .tc_nthrs_user          = ldlm_num_threads,
2702                         .tc_cpu_affinity        = 1,
2703                         .tc_ctx_tags            = LCT_MD_THREAD | \
2704                                                   LCT_DT_THREAD | \
2705                                                   LCT_CL_THREAD,
2706                 },
2707                 .psc_cpt                = {
2708                         .cc_pattern             = ldlm_cpts,
2709                 },
2710                 .psc_ops                = {
2711                         .so_req_handler         = ldlm_cancel_handler,
2712                         .so_hpreq_handler       = ldlm_hpreq_handler,
2713                 },
2714         };
2715         ldlm_state->ldlm_cancel_service = \
2716                         ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
2717         if (IS_ERR(ldlm_state->ldlm_cancel_service)) {
2718                 CERROR("failed to start service\n");
2719                 rc = PTR_ERR(ldlm_state->ldlm_cancel_service);
2720                 ldlm_state->ldlm_cancel_service = NULL;
2721                 GOTO(out, rc);
2722         }
2723 #endif
2724
2725         OBD_ALLOC(blp, sizeof(*blp));
2726         if (blp == NULL)
2727                 GOTO(out, rc = -ENOMEM);
2728         ldlm_state->ldlm_bl_pool = blp;
2729
2730         cfs_spin_lock_init(&blp->blp_lock);
2731         CFS_INIT_LIST_HEAD(&blp->blp_list);
2732         CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
2733         cfs_waitq_init(&blp->blp_waitq);
2734         cfs_atomic_set(&blp->blp_num_threads, 0);
2735         cfs_atomic_set(&blp->blp_busy_threads, 0);
2736
2737 #ifdef __KERNEL__
2738         if (ldlm_num_threads == 0) {
2739                 blp->blp_min_threads = LDLM_NTHRS_INIT;
2740                 blp->blp_max_threads = LDLM_NTHRS_MAX;
2741         } else {
2742                 blp->blp_min_threads = blp->blp_max_threads = \
2743                         min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
2744                                                          ldlm_num_threads));
2745         }
2746
2747         for (i = 0; i < blp->blp_min_threads; i++) {
2748                 rc = ldlm_bl_thread_start(blp);
2749                 if (rc < 0)
2750                         GOTO(out, rc);
2751         }
2752
2753 # ifdef HAVE_SERVER_SUPPORT
2754         CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
2755         expired_lock_thread.elt_state = ELT_STOPPED;
2756         cfs_waitq_init(&expired_lock_thread.elt_waitq);
2757
2758         CFS_INIT_LIST_HEAD(&waiting_locks_list);
2759         cfs_spin_lock_init(&waiting_locks_spinlock);
2760         cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
2761
2762         rc = cfs_create_thread(expired_lock_main, NULL, CFS_DAEMON_FLAGS);
2763         if (rc < 0) {
2764                 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
2765                 GOTO(out, rc);
2766         }
2767
2768         cfs_wait_event(expired_lock_thread.elt_waitq,
2769                        expired_lock_thread.elt_state == ELT_READY);
2770 # endif /* HAVE_SERVER_SUPPORT */
2771
2772         rc = ldlm_pools_init();
2773         if (rc) {
2774                 CERROR("Failed to initialize LDLM pools: %d\n", rc);
2775                 GOTO(out, rc);
2776         }
2777 #endif
2778         RETURN(0);
2779
2780  out:
2781         ldlm_cleanup();
2782         RETURN(rc);
2783 }
2784
2785 static int ldlm_cleanup(void)
2786 {
2787         ENTRY;
2788
2789         if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
2790             !cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
2791                 CERROR("ldlm still has namespaces; clean these up first.\n");
2792                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
2793                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
2794                 RETURN(-EBUSY);
2795         }
2796
2797 #ifdef __KERNEL__
2798         ldlm_pools_fini();
2799
2800         if (ldlm_state->ldlm_bl_pool != NULL) {
2801                 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
2802
2803                 while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
2804                         struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
2805
2806                         cfs_init_completion(&blp->blp_comp);
2807
2808                         cfs_spin_lock(&blp->blp_lock);
2809                         cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
2810                         cfs_waitq_signal(&blp->blp_waitq);
2811                         cfs_spin_unlock(&blp->blp_lock);
2812
2813                         cfs_wait_for_completion(&blp->blp_comp);
2814                 }
2815
2816                 OBD_FREE(blp, sizeof(*blp));
2817         }
2818 #endif /* __KERNEL__ */
2819
2820         if (ldlm_state->ldlm_cb_service != NULL)
2821                 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2822 # ifdef HAVE_SERVER_SUPPORT
2823         if (ldlm_state->ldlm_cancel_service != NULL)
2824                 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2825 # endif
2826
2827 #ifdef __KERNEL__
2828         ldlm_proc_cleanup();
2829
2830 # ifdef HAVE_SERVER_SUPPORT
2831         if (expired_lock_thread.elt_state != ELT_STOPPED) {
2832                 expired_lock_thread.elt_state = ELT_TERMINATE;
2833                 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
2834                 cfs_wait_event(expired_lock_thread.elt_waitq,
2835                                expired_lock_thread.elt_state == ELT_STOPPED);
2836         }
2837 # endif
2838 #endif /* __KERNEL__ */
2839
2840         OBD_FREE(ldlm_state, sizeof(*ldlm_state));
2841         ldlm_state = NULL;
2842
2843         RETURN(0);
2844 }
2845
2846 int ldlm_init(void)
2847 {
2848         cfs_mutex_init(&ldlm_ref_mutex);
2849         cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
2850         cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
2851         ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
2852                                                sizeof(struct ldlm_resource), 0,
2853                                                CFS_SLAB_HWCACHE_ALIGN);
2854         if (ldlm_resource_slab == NULL)
2855                 return -ENOMEM;
2856
2857         ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
2858                               sizeof(struct ldlm_lock), 0,
2859                               CFS_SLAB_HWCACHE_ALIGN | CFS_SLAB_DESTROY_BY_RCU);
2860         if (ldlm_lock_slab == NULL) {
2861                 cfs_mem_cache_destroy(ldlm_resource_slab);
2862                 return -ENOMEM;
2863         }
2864
2865         ldlm_interval_slab = cfs_mem_cache_create("interval_node",
2866                                         sizeof(struct ldlm_interval),
2867                                         0, CFS_SLAB_HWCACHE_ALIGN);
2868         if (ldlm_interval_slab == NULL) {
2869                 cfs_mem_cache_destroy(ldlm_resource_slab);
2870                 cfs_mem_cache_destroy(ldlm_lock_slab);
2871                 return -ENOMEM;
2872         }
2873 #if LUSTRE_TRACKS_LOCK_EXP_REFS
2874         class_export_dump_hook = ldlm_dump_export_locks;
2875 #endif
2876         return 0;
2877 }
2878
2879 void ldlm_exit(void)
2880 {
2881         int rc;
2882         if (ldlm_refcount)
2883                 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
2884         rc = cfs_mem_cache_destroy(ldlm_resource_slab);
2885         LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
2886 #ifdef __KERNEL__
2887         /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
2888          * synchronize_rcu() to wait a grace period elapsed, so that
2889          * ldlm_lock_free() get a chance to be called. */
2890         synchronize_rcu();
2891 #endif
2892         rc = cfs_mem_cache_destroy(ldlm_lock_slab);
2893         LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
2894         rc = cfs_mem_cache_destroy(ldlm_interval_slab);
2895         LASSERTF(rc == 0, "couldn't free interval node slab\n");
2896 }
2897
2898 /* ldlm_extent.c */
2899 EXPORT_SYMBOL(ldlm_extent_shift_kms);
2900
2901 /* ldlm_lock.c */
2902 #ifdef HAVE_SERVER_SUPPORT
2903 EXPORT_SYMBOL(ldlm_get_processing_policy);
2904 #endif
2905 EXPORT_SYMBOL(ldlm_lock2desc);
2906 EXPORT_SYMBOL(ldlm_register_intent);
2907 EXPORT_SYMBOL(ldlm_lockname);
2908 EXPORT_SYMBOL(ldlm_typename);
2909 EXPORT_SYMBOL(ldlm_lock2handle);
2910 EXPORT_SYMBOL(__ldlm_handle2lock);
2911 EXPORT_SYMBOL(ldlm_lock_get);
2912 EXPORT_SYMBOL(ldlm_lock_put);
2913 EXPORT_SYMBOL(ldlm_lock_match);
2914 EXPORT_SYMBOL(ldlm_lock_cancel);
2915 EXPORT_SYMBOL(ldlm_lock_addref);
2916 EXPORT_SYMBOL(ldlm_lock_addref_try);
2917 EXPORT_SYMBOL(ldlm_lock_decref);
2918 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
2919 EXPORT_SYMBOL(ldlm_lock_change_resource);
2920 EXPORT_SYMBOL(ldlm_it2str);
2921 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2922 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
2923 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
2924 EXPORT_SYMBOL(ldlm_lock_allow_match);
2925 EXPORT_SYMBOL(ldlm_lock_downgrade);
2926 EXPORT_SYMBOL(ldlm_lock_convert);
2927
2928 /* ldlm_request.c */
2929 EXPORT_SYMBOL(ldlm_completion_ast_async);
2930 EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
2931 EXPORT_SYMBOL(ldlm_completion_ast);
2932 EXPORT_SYMBOL(ldlm_blocking_ast);
2933 EXPORT_SYMBOL(ldlm_glimpse_ast);
2934 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2935 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2936 EXPORT_SYMBOL(ldlm_prep_elc_req);
2937 EXPORT_SYMBOL(ldlm_cli_convert);
2938 EXPORT_SYMBOL(ldlm_cli_enqueue);
2939 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2940 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2941 EXPORT_SYMBOL(ldlm_cli_cancel);
2942 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2943 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2944 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2945 EXPORT_SYMBOL(ldlm_replay_locks);
2946 EXPORT_SYMBOL(ldlm_resource_foreach);
2947 EXPORT_SYMBOL(ldlm_namespace_foreach);
2948 EXPORT_SYMBOL(ldlm_resource_iterate);
2949 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2950 EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
2951 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2952
2953 /* ldlm_lockd.c */
2954 #ifdef HAVE_SERVER_SUPPORT
2955 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2956 EXPORT_SYMBOL(ldlm_server_completion_ast);
2957 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2958 EXPORT_SYMBOL(ldlm_glimpse_locks);
2959 EXPORT_SYMBOL(ldlm_handle_enqueue);
2960 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2961 EXPORT_SYMBOL(ldlm_handle_cancel);
2962 EXPORT_SYMBOL(ldlm_request_cancel);
2963 EXPORT_SYMBOL(ldlm_handle_convert);
2964 EXPORT_SYMBOL(ldlm_handle_convert0);
2965 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2966 #endif
2967 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2968 EXPORT_SYMBOL(ldlm_get_ref);
2969 EXPORT_SYMBOL(ldlm_put_ref);
2970 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2971
2972 /* ldlm_resource.c */
2973 EXPORT_SYMBOL(ldlm_namespace_new);
2974 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2975 EXPORT_SYMBOL(ldlm_namespace_free);
2976 EXPORT_SYMBOL(ldlm_namespace_dump);
2977 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2978 EXPORT_SYMBOL(ldlm_resource_get);
2979 EXPORT_SYMBOL(ldlm_resource_putref);
2980 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
2981
2982 /* ldlm_lib.c */
2983 EXPORT_SYMBOL(client_import_add_conn);
2984 EXPORT_SYMBOL(client_import_del_conn);
2985 EXPORT_SYMBOL(client_obd_setup);
2986 EXPORT_SYMBOL(client_obd_cleanup);
2987 EXPORT_SYMBOL(client_connect_import);
2988 EXPORT_SYMBOL(client_disconnect_export);
2989 EXPORT_SYMBOL(target_send_reply);
2990 EXPORT_SYMBOL(target_pack_pool_reply);
2991
2992 #ifdef HAVE_SERVER_SUPPORT
2993 EXPORT_SYMBOL(server_disconnect_export);
2994 EXPORT_SYMBOL(target_stop_recovery_thread);
2995 EXPORT_SYMBOL(target_handle_connect);
2996 EXPORT_SYMBOL(target_cleanup_recovery);
2997 EXPORT_SYMBOL(target_destroy_export);
2998 EXPORT_SYMBOL(target_cancel_recovery_timer);
2999 EXPORT_SYMBOL(target_queue_recovery_request);
3000 EXPORT_SYMBOL(target_handle_ping);
3001 EXPORT_SYMBOL(target_handle_disconnect);
3002 #endif
3003
3004 /* l_lock.c */
3005 EXPORT_SYMBOL(lock_res_and_lock);
3006 EXPORT_SYMBOL(unlock_res_and_lock);