Whamcloud - gitweb
LU-1876 ldlm: extend ldlm_valblock_ops{}
[fs/lustre-release.git] / lustre / ldlm / ldlm_lockd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_lockd.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 #else
47 # include <liblustre.h>
48 #endif
49
50 #include <lustre_dlm.h>
51 #include <obd_class.h>
52 #include <libcfs/list.h>
53 #include "ldlm_internal.h"
54
55 static int ldlm_num_threads;
56 CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
57                 "number of DLM service threads to start");
58
59 static char *ldlm_cpts;
60 CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444,
61                 "CPU partitions ldlm threads should run on");
62
63 extern cfs_mem_cache_t *ldlm_resource_slab;
64 extern cfs_mem_cache_t *ldlm_lock_slab;
65 static cfs_mutex_t      ldlm_ref_mutex;
66 static int ldlm_refcount;
67
68 struct ldlm_cb_async_args {
69         struct ldlm_cb_set_arg *ca_set_arg;
70         struct ldlm_lock       *ca_lock;
71 };
72
73 /* LDLM state */
74
75 static struct ldlm_state *ldlm_state;
76
77 inline cfs_time_t round_timeout(cfs_time_t timeout)
78 {
79         return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
80 }
81
82 /* timeout for initial callback (AST) reply (bz10399) */
83 static inline unsigned int ldlm_get_rq_timeout(void)
84 {
85         /* Non-AT value */
86         unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
87
88         return timeout < 1 ? 1 : timeout;
89 }
90
91 #define ELT_STOPPED   0
92 #define ELT_READY     1
93 #define ELT_TERMINATE 2
94
95 struct ldlm_bl_pool {
96         cfs_spinlock_t          blp_lock;
97
98         /*
99          * blp_prio_list is used for callbacks that should be handled
100          * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
101          * see bug 13843
102          */
103         cfs_list_t              blp_prio_list;
104
105         /*
106          * blp_list is used for all other callbacks which are likely
107          * to take longer to process.
108          */
109         cfs_list_t              blp_list;
110
111         cfs_waitq_t             blp_waitq;
112         cfs_completion_t        blp_comp;
113         cfs_atomic_t            blp_num_threads;
114         cfs_atomic_t            blp_busy_threads;
115         int                     blp_min_threads;
116         int                     blp_max_threads;
117 };
118
119 struct ldlm_bl_work_item {
120         cfs_list_t              blwi_entry;
121         struct ldlm_namespace  *blwi_ns;
122         struct ldlm_lock_desc   blwi_ld;
123         struct ldlm_lock       *blwi_lock;
124         cfs_list_t              blwi_head;
125         int                     blwi_count;
126         cfs_completion_t        blwi_comp;
127         int                     blwi_mode;
128         int                     blwi_mem_pressure;
129 };
130
131 #if defined(HAVE_SERVER_SUPPORT) && defined(__KERNEL__)
132
133 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
134 static cfs_spinlock_t waiting_locks_spinlock;   /* BH lock (timer) */
135 static cfs_list_t waiting_locks_list;
136 static cfs_timer_t waiting_locks_timer;
137
138 static struct expired_lock_thread {
139         cfs_waitq_t             elt_waitq;
140         int                     elt_state;
141         int                     elt_dump;
142         cfs_list_t              elt_expired_locks;
143 } expired_lock_thread;
144
145 static inline int have_expired_locks(void)
146 {
147         int need_to_run;
148
149         ENTRY;
150         cfs_spin_lock_bh(&waiting_locks_spinlock);
151         need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
152         cfs_spin_unlock_bh(&waiting_locks_spinlock);
153
154         RETURN(need_to_run);
155 }
156
157 static int expired_lock_main(void *arg)
158 {
159         cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
160         struct l_wait_info lwi = { 0 };
161         int do_dump;
162
163         ENTRY;
164         cfs_daemonize("ldlm_elt");
165
166         expired_lock_thread.elt_state = ELT_READY;
167         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
168
169         while (1) {
170                 l_wait_event(expired_lock_thread.elt_waitq,
171                              have_expired_locks() ||
172                              expired_lock_thread.elt_state == ELT_TERMINATE,
173                              &lwi);
174
175                 cfs_spin_lock_bh(&waiting_locks_spinlock);
176                 if (expired_lock_thread.elt_dump) {
177                         struct libcfs_debug_msg_data msgdata = {
178                                 .msg_file = __FILE__,
179                                 .msg_fn = "waiting_locks_callback",
180                                 .msg_line = expired_lock_thread.elt_dump };
181                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
182
183                         /* from waiting_locks_callback, but not in timer */
184                         libcfs_debug_dumplog();
185                         libcfs_run_lbug_upcall(&msgdata);
186
187                         cfs_spin_lock_bh(&waiting_locks_spinlock);
188                         expired_lock_thread.elt_dump = 0;
189                 }
190
191                 do_dump = 0;
192
193                 while (!cfs_list_empty(expired)) {
194                         struct obd_export *export;
195                         struct ldlm_lock *lock;
196
197                         lock = cfs_list_entry(expired->next, struct ldlm_lock,
198                                           l_pending_chain);
199                         if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
200                             (void *)lock >= LP_POISON) {
201                                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
202                                 CERROR("free lock on elt list %p\n", lock);
203                                 LBUG();
204                         }
205                         cfs_list_del_init(&lock->l_pending_chain);
206                         if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
207                             (void *)lock->l_export >= LP_POISON) {
208                                 CERROR("lock with free export on elt list %p\n",
209                                        lock->l_export);
210                                 lock->l_export = NULL;
211                                 LDLM_ERROR(lock, "free export");
212                                 /* release extra ref grabbed by
213                                  * ldlm_add_waiting_lock() or
214                                  * ldlm_failed_ast() */
215                                 LDLM_LOCK_RELEASE(lock);
216                                 continue;
217                         }
218
219                         if (lock->l_destroyed) {
220                                 /* release the lock refcount where
221                                  * waiting_locks_callback() founds */
222                                 LDLM_LOCK_RELEASE(lock);
223                                 continue;
224                         }
225                         export = class_export_lock_get(lock->l_export, lock);
226                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
227
228                         do_dump++;
229                         class_fail_export(export);
230                         class_export_lock_put(export, lock);
231
232                         /* release extra ref grabbed by ldlm_add_waiting_lock()
233                          * or ldlm_failed_ast() */
234                         LDLM_LOCK_RELEASE(lock);
235
236                         cfs_spin_lock_bh(&waiting_locks_spinlock);
237                 }
238                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
239
240                 if (do_dump && obd_dump_on_eviction) {
241                         CERROR("dump the log upon eviction\n");
242                         libcfs_debug_dumplog();
243                 }
244
245                 if (expired_lock_thread.elt_state == ELT_TERMINATE)
246                         break;
247         }
248
249         expired_lock_thread.elt_state = ELT_STOPPED;
250         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
251         RETURN(0);
252 }
253
254 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
255 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds);
256
257 /**
258  * Check if there is a request in the export request list
259  * which prevents the lock canceling.
260  */
261 static int ldlm_lock_busy(struct ldlm_lock *lock)
262 {
263         struct ptlrpc_request *req;
264         int match = 0;
265         ENTRY;
266
267         if (lock->l_export == NULL)
268                 return 0;
269
270         cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
271         cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
272                                 rq_exp_list) {
273                 if (req->rq_ops->hpreq_lock_match) {
274                         match = req->rq_ops->hpreq_lock_match(req, lock);
275                         if (match)
276                                 break;
277                 }
278         }
279         cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
280         RETURN(match);
281 }
282
283 /* This is called from within a timer interrupt and cannot schedule */
284 static void waiting_locks_callback(unsigned long unused)
285 {
286         struct ldlm_lock        *lock;
287         int                     need_dump = 0;
288
289         cfs_spin_lock_bh(&waiting_locks_spinlock);
290         while (!cfs_list_empty(&waiting_locks_list)) {
291                 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
292                                       l_pending_chain);
293                 if (cfs_time_after(lock->l_callback_timeout,
294                                    cfs_time_current()) ||
295                     (lock->l_req_mode == LCK_GROUP))
296                         break;
297
298                 if (ptlrpc_check_suspend()) {
299                         /* there is a case when we talk to one mds, holding
300                          * lock from another mds. this way we easily can get
301                          * here, if second mds is being recovered. so, we
302                          * suspend timeouts. bug 6019 */
303
304                         LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
305                                    lock->l_export->exp_client_uuid.uuid,
306                                    lock->l_export->exp_connection->c_remote_uuid.uuid,
307                                    libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
308
309                         cfs_list_del_init(&lock->l_pending_chain);
310                         if (lock->l_destroyed) {
311                                 /* relay the lock refcount decrease to
312                                  * expired lock thread */
313                                 cfs_list_add(&lock->l_pending_chain,
314                                         &expired_lock_thread.elt_expired_locks);
315                         } else {
316                                 __ldlm_add_waiting_lock(lock,
317                                                 ldlm_get_enq_timeout(lock));
318                         }
319                         continue;
320                 }
321
322                 /* if timeout overlaps the activation time of suspended timeouts
323                  * then extend it to give a chance for client to reconnect */
324                 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
325                                                  cfs_time_seconds(obd_timeout)/2),
326                                     ptlrpc_suspend_wakeup_time())) {
327                         LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
328                                    lock->l_export->exp_client_uuid.uuid,
329                                    lock->l_export->exp_connection->c_remote_uuid.uuid,
330                                    libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
331
332                         cfs_list_del_init(&lock->l_pending_chain);
333                         if (lock->l_destroyed) {
334                                 /* relay the lock refcount decrease to
335                                  * expired lock thread */
336                                 cfs_list_add(&lock->l_pending_chain,
337                                         &expired_lock_thread.elt_expired_locks);
338                         } else {
339                                 __ldlm_add_waiting_lock(lock,
340                                                 ldlm_get_enq_timeout(lock));
341                         }
342                         continue;
343                 }
344
345                 /* Check if we need to prolong timeout */
346                 if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
347                     ldlm_lock_busy(lock)) {
348                         int cont = 1;
349
350                         if (lock->l_pending_chain.next == &waiting_locks_list)
351                                 cont = 0;
352
353                         LDLM_LOCK_GET(lock);
354
355                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
356                         LDLM_DEBUG(lock, "prolong the busy lock");
357                         ldlm_refresh_waiting_lock(lock,
358                                                   ldlm_get_enq_timeout(lock));
359                         cfs_spin_lock_bh(&waiting_locks_spinlock);
360
361                         if (!cont) {
362                                 LDLM_LOCK_RELEASE(lock);
363                                 break;
364                         }
365
366                         LDLM_LOCK_RELEASE(lock);
367                         continue;
368                 }
369                 ldlm_lock_to_ns(lock)->ns_timeouts++;
370                 LDLM_ERROR(lock, "lock callback timer expired after %lds: "
371                            "evicting client at %s ",
372                            cfs_time_current_sec()- lock->l_last_activity,
373                            libcfs_nid2str(
374                                    lock->l_export->exp_connection->c_peer.nid));
375
376                 /* no needs to take an extra ref on the lock since it was in
377                  * the waiting_locks_list and ldlm_add_waiting_lock()
378                  * already grabbed a ref */
379                 cfs_list_del(&lock->l_pending_chain);
380                 cfs_list_add(&lock->l_pending_chain,
381                              &expired_lock_thread.elt_expired_locks);
382                 need_dump = 1;
383         }
384
385         if (!cfs_list_empty(&expired_lock_thread.elt_expired_locks)) {
386                 if (obd_dump_on_timeout && need_dump)
387                         expired_lock_thread.elt_dump = __LINE__;
388
389                 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
390         }
391
392         /*
393          * Make sure the timer will fire again if we have any locks
394          * left.
395          */
396         if (!cfs_list_empty(&waiting_locks_list)) {
397                 cfs_time_t timeout_rounded;
398                 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
399                                       l_pending_chain);
400                 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
401                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
402         }
403         cfs_spin_unlock_bh(&waiting_locks_spinlock);
404 }
405
406 /*
407  * Indicate that we're waiting for a client to call us back cancelling a given
408  * lock.  We add it to the pending-callback chain, and schedule the lock-timeout
409  * timer to fire appropriately.  (We round up to the next second, to avoid
410  * floods of timer firings during periods of high lock contention and traffic).
411  * As done by ldlm_add_waiting_lock(), the caller must grab a lock reference
412  * if it has been added to the waiting list (1 is returned).
413  *
414  * Called with the namespace lock held.
415  */
416 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
417 {
418         cfs_time_t timeout;
419         cfs_time_t timeout_rounded;
420
421         if (!cfs_list_empty(&lock->l_pending_chain))
422                 return 0;
423
424         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
425             OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
426                 seconds = 1;
427
428         timeout = cfs_time_shift(seconds);
429         if (likely(cfs_time_after(timeout, lock->l_callback_timeout)))
430                 lock->l_callback_timeout = timeout;
431
432         timeout_rounded = round_timeout(lock->l_callback_timeout);
433
434         if (cfs_time_before(timeout_rounded,
435                             cfs_timer_deadline(&waiting_locks_timer)) ||
436             !cfs_timer_is_armed(&waiting_locks_timer)) {
437                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
438         }
439         /* if the new lock has a shorter timeout than something earlier on
440            the list, we'll wait the longer amount of time; no big deal. */
441         /* FIFO */
442         cfs_list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
443         return 1;
444 }
445
446 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
447 {
448         int ret;
449         int timeout = ldlm_get_enq_timeout(lock);
450
451         /* NB: must be called with hold of lock_res_and_lock() */
452         LASSERT(lock->l_res_locked);
453         lock->l_waited = 1;
454
455         LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
456
457         cfs_spin_lock_bh(&waiting_locks_spinlock);
458         if (lock->l_destroyed) {
459                 static cfs_time_t next;
460                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
461                 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
462                 if (cfs_time_after(cfs_time_current(), next)) {
463                         next = cfs_time_shift(14400);
464                         libcfs_debug_dumpstack(NULL);
465                 }
466                 return 0;
467         }
468
469         ret = __ldlm_add_waiting_lock(lock, timeout);
470         if (ret) {
471                 /* grab ref on the lock if it has been added to the
472                  * waiting list */
473                 LDLM_LOCK_GET(lock);
474         }
475         cfs_spin_unlock_bh(&waiting_locks_spinlock);
476
477         if (ret) {
478                 cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
479                 if (cfs_list_empty(&lock->l_exp_list))
480                         cfs_list_add(&lock->l_exp_list,
481                                      &lock->l_export->exp_bl_list);
482                 cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
483         }
484
485         LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
486                    ret == 0 ? "not re-" : "", timeout,
487                    AT_OFF ? "off" : "on");
488         return ret;
489 }
490
491 /*
492  * Remove a lock from the pending list, likely because it had its cancellation
493  * callback arrive without incident.  This adjusts the lock-timeout timer if
494  * needed.  Returns 0 if the lock wasn't pending after all, 1 if it was.
495  * As done by ldlm_del_waiting_lock(), the caller must release the lock
496  * reference when the lock is removed from any list (1 is returned).
497  *
498  * Called with namespace lock held.
499  */
500 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
501 {
502         cfs_list_t *list_next;
503
504         if (cfs_list_empty(&lock->l_pending_chain))
505                 return 0;
506
507         list_next = lock->l_pending_chain.next;
508         if (lock->l_pending_chain.prev == &waiting_locks_list) {
509                 /* Removing the head of the list, adjust timer. */
510                 if (list_next == &waiting_locks_list) {
511                         /* No more, just cancel. */
512                         cfs_timer_disarm(&waiting_locks_timer);
513                 } else {
514                         struct ldlm_lock *next;
515                         next = cfs_list_entry(list_next, struct ldlm_lock,
516                                               l_pending_chain);
517                         cfs_timer_arm(&waiting_locks_timer,
518                                       round_timeout(next->l_callback_timeout));
519                 }
520         }
521         cfs_list_del_init(&lock->l_pending_chain);
522
523         return 1;
524 }
525
526 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
527 {
528         int ret;
529
530         if (lock->l_export == NULL) {
531                 /* We don't have a "waiting locks list" on clients. */
532                 CDEBUG(D_DLMTRACE, "Client lock %p : no-op\n", lock);
533                 return 0;
534         }
535
536         cfs_spin_lock_bh(&waiting_locks_spinlock);
537         ret = __ldlm_del_waiting_lock(lock);
538         cfs_spin_unlock_bh(&waiting_locks_spinlock);
539
540         /* remove the lock out of export blocking list */
541         cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
542         cfs_list_del_init(&lock->l_exp_list);
543         cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
544
545         if (ret) {
546                 /* release lock ref if it has indeed been removed
547                  * from a list */
548                 LDLM_LOCK_RELEASE(lock);
549         }
550
551         LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
552         return ret;
553 }
554
555 /*
556  * Prolong the lock
557  *
558  * Called with namespace lock held.
559  */
560 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
561 {
562         if (lock->l_export == NULL) {
563                 /* We don't have a "waiting locks list" on clients. */
564                 LDLM_DEBUG(lock, "client lock: no-op");
565                 return 0;
566         }
567
568         cfs_spin_lock_bh(&waiting_locks_spinlock);
569
570         if (cfs_list_empty(&lock->l_pending_chain)) {
571                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
572                 LDLM_DEBUG(lock, "wasn't waiting");
573                 return 0;
574         }
575
576         /* we remove/add the lock to the waiting list, so no needs to
577          * release/take a lock reference */
578         __ldlm_del_waiting_lock(lock);
579         __ldlm_add_waiting_lock(lock, timeout);
580         cfs_spin_unlock_bh(&waiting_locks_spinlock);
581
582         LDLM_DEBUG(lock, "refreshed");
583         return 1;
584 }
585
586 #else /* !HAVE_SERVER_SUPPORT ||  !__KERNEL__ */
587
588 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
589 {
590         RETURN(0);
591 }
592
593 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
594 {
595         RETURN(0);
596 }
597
598 # ifdef HAVE_SERVER_SUPPORT
599 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
600 {
601         LASSERT(lock->l_res_locked);
602         LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
603         RETURN(1);
604 }
605
606 # endif
607 #endif /* HAVE_SERVER_SUPPORT && __KERNEL__ */
608
609 #ifdef HAVE_SERVER_SUPPORT
610
611 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
612                             const char *ast_type)
613 {
614         LCONSOLE_ERROR_MSG(0x138, "%s: A client on nid %s was evicted due "
615                            "to a lock %s callback time out: rc %d\n",
616                            lock->l_export->exp_obd->obd_name,
617                            obd_export_nid2str(lock->l_export), ast_type, rc);
618
619         if (obd_dump_on_timeout)
620                 libcfs_debug_dumplog();
621 #ifdef __KERNEL__
622         cfs_spin_lock_bh(&waiting_locks_spinlock);
623         if (__ldlm_del_waiting_lock(lock) == 0)
624                 /* the lock was not in any list, grab an extra ref before adding
625                  * the lock to the expired list */
626                 LDLM_LOCK_GET(lock);
627         cfs_list_add(&lock->l_pending_chain,
628                      &expired_lock_thread.elt_expired_locks);
629         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
630         cfs_spin_unlock_bh(&waiting_locks_spinlock);
631 #else
632         class_fail_export(lock->l_export);
633 #endif
634 }
635
636 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
637                                  struct ptlrpc_request *req, int rc,
638                                  const char *ast_type)
639 {
640         lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
641
642         if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
643                 LASSERT(lock->l_export);
644                 if (lock->l_export->exp_libclient) {
645                         LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
646                                    " timeout, just cancelling lock", ast_type,
647                                    libcfs_nid2str(peer.nid));
648                         ldlm_lock_cancel(lock);
649                         rc = -ERESTART;
650                 } else if (lock->l_flags & LDLM_FL_CANCEL) {
651                         LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
652                                    "cancel was received (AST reply lost?)",
653                                    ast_type, libcfs_nid2str(peer.nid));
654                         ldlm_lock_cancel(lock);
655                         rc = -ERESTART;
656                 } else {
657                         ldlm_del_waiting_lock(lock);
658                         ldlm_failed_ast(lock, rc, ast_type);
659                 }
660         } else if (rc) {
661                 if (rc == -EINVAL) {
662                         struct ldlm_resource *res = lock->l_resource;
663                         LDLM_DEBUG(lock, "client (nid %s) returned %d"
664                                " from %s AST - normal race",
665                                libcfs_nid2str(peer.nid),
666                                req->rq_repmsg ?
667                                lustre_msg_get_status(req->rq_repmsg) : -1,
668                                ast_type);
669                         if (res) {
670                                 /* update lvbo to return proper attributes.
671                                  * see bug 23174 */
672                                 ldlm_resource_getref(res);
673                                 ldlm_res_lvbo_update(res, NULL, 1);
674                                 ldlm_resource_putref(res);
675                         }
676
677                 } else {
678                         LDLM_ERROR(lock, "client (nid %s) returned %d "
679                                    "from %s AST", libcfs_nid2str(peer.nid),
680                                    (req->rq_repmsg != NULL) ?
681                                    lustre_msg_get_status(req->rq_repmsg) : 0,
682                                    ast_type);
683                 }
684                 ldlm_lock_cancel(lock);
685                 /* Server-side AST functions are called from ldlm_reprocess_all,
686                  * which needs to be told to please restart its reprocessing. */
687                 rc = -ERESTART;
688         }
689
690         return rc;
691 }
692
693 static int ldlm_cb_interpret(const struct lu_env *env,
694                              struct ptlrpc_request *req, void *data, int rc)
695 {
696         struct ldlm_cb_async_args *ca   = data;
697         struct ldlm_lock          *lock = ca->ca_lock;
698         struct ldlm_cb_set_arg    *arg  = ca->ca_set_arg;
699         ENTRY;
700
701         LASSERT(lock != NULL);
702
703         switch (arg->type) {
704         case LDLM_GL_CALLBACK:
705                 /* Update the LVB from disk if the AST failed
706                  * (this is a legal race)
707                  *
708                  * - Glimpse callback of local lock just returns
709                  *   -ELDLM_NO_LOCK_DATA.
710                  * - Glimpse callback of remote lock might return
711                  *   -ELDLM_NO_LOCK_DATA when inode is cleared. LU-274
712                  */
713                 if (rc == -ELDLM_NO_LOCK_DATA) {
714                         LDLM_DEBUG(lock, "lost race - client has a lock but no "
715                                    "inode");
716                         ldlm_res_lvbo_update(lock->l_resource, NULL, 1);
717                 } else if (rc != 0) {
718                         rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
719                 } else {
720                         rc = ldlm_res_lvbo_update(lock->l_resource, req, 1);
721                 }
722                 break;
723         case LDLM_BL_CALLBACK:
724                 if (rc != 0)
725                         rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
726                 break;
727         case LDLM_CP_CALLBACK:
728                 if (rc != 0)
729                         rc = ldlm_handle_ast_error(lock, req, rc, "completion");
730                 break;
731         default:
732                 LDLM_ERROR(lock, "invalid opcode for lock callback %d",
733                            arg->type);
734                 LBUG();
735         }
736
737         /* release extra reference taken in ldlm_ast_fini() */
738         LDLM_LOCK_RELEASE(lock);
739
740         if (rc == -ERESTART)
741                 cfs_atomic_inc(&arg->restart);
742
743         RETURN(0);
744 }
745
746 static inline int ldlm_ast_fini(struct ptlrpc_request *req,
747                                 struct ldlm_cb_set_arg *arg,
748                                 struct ldlm_lock *lock,
749                                 int instant_cancel)
750 {
751         int rc = 0;
752         ENTRY;
753
754         if (unlikely(instant_cancel)) {
755                 rc = ptl_send_rpc(req, 1);
756                 ptlrpc_req_finished(req);
757                 if (rc == 0)
758                         cfs_atomic_inc(&arg->restart);
759         } else {
760                 LDLM_LOCK_GET(lock);
761                 ptlrpc_set_add_req(arg->set, req);
762         }
763
764         RETURN(rc);
765 }
766
767 /**
768  * Check if there are requests in the export request list which prevent
769  * the lock canceling and make these requests high priority ones.
770  */
771 static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
772 {
773         struct ptlrpc_request *req;
774         ENTRY;
775
776         if (lock->l_export == NULL) {
777                 LDLM_DEBUG(lock, "client lock: no-op");
778                 RETURN_EXIT;
779         }
780
781         cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
782         cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
783                                 rq_exp_list) {
784                 /* Do not process requests that were not yet added to there
785                  * incoming queue or were already removed from there for
786                  * processing */
787                 if (!req->rq_hp && !cfs_list_empty(&req->rq_list) &&
788                     req->rq_ops->hpreq_lock_match &&
789                     req->rq_ops->hpreq_lock_match(req, lock))
790                         ptlrpc_hpreq_reorder(req);
791         }
792         cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
793         EXIT;
794 }
795
796 /*
797  * ->l_blocking_ast() method for server-side locks. This is invoked when newly
798  * enqueued server lock conflicts with given one.
799  *
800  * Sends blocking ast rpc to the client owning that lock; arms timeout timer
801  * to wait for client response.
802  */
803 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
804                              struct ldlm_lock_desc *desc,
805                              void *data, int flag)
806 {
807         struct ldlm_cb_async_args *ca;
808         struct ldlm_cb_set_arg *arg = data;
809         struct ldlm_request    *body;
810         struct ptlrpc_request  *req;
811         int                     instant_cancel = 0;
812         int                     rc = 0;
813         ENTRY;
814
815         if (flag == LDLM_CB_CANCELING)
816                 /* Don't need to do anything here. */
817                 RETURN(0);
818
819         LASSERT(lock);
820         LASSERT(data != NULL);
821         if (lock->l_export->exp_obd->obd_recovering != 0)
822                 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
823
824         ldlm_lock_reorder_req(lock);
825
826         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
827                                         &RQF_LDLM_BL_CALLBACK,
828                                         LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
829         if (req == NULL)
830                 RETURN(-ENOMEM);
831
832         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
833         ca = ptlrpc_req_async_args(req);
834         ca->ca_set_arg = arg;
835         ca->ca_lock = lock;
836
837         req->rq_interpret_reply = ldlm_cb_interpret;
838         req->rq_no_resend = 1;
839
840         lock_res_and_lock(lock);
841         if (lock->l_granted_mode != lock->l_req_mode) {
842                 /* this blocking AST will be communicated as part of the
843                  * completion AST instead */
844                 unlock_res_and_lock(lock);
845
846                 ptlrpc_req_finished(req);
847                 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
848                 RETURN(0);
849         }
850
851         if (lock->l_destroyed) {
852                 /* What's the point? */
853                 unlock_res_and_lock(lock);
854                 ptlrpc_req_finished(req);
855                 RETURN(0);
856         }
857
858         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
859                 instant_cancel = 1;
860
861         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
862         body->lock_handle[0] = lock->l_remote_handle;
863         body->lock_desc = *desc;
864         body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
865
866         LDLM_DEBUG(lock, "server preparing blocking AST");
867
868         ptlrpc_request_set_replen(req);
869         if (instant_cancel) {
870                 unlock_res_and_lock(lock);
871                 ldlm_lock_cancel(lock);
872         } else {
873                 LASSERT(lock->l_granted_mode == lock->l_req_mode);
874                 ldlm_add_waiting_lock(lock);
875                 unlock_res_and_lock(lock);
876         }
877
878         req->rq_send_state = LUSTRE_IMP_FULL;
879         /* ptlrpc_request_alloc_pack already set timeout */
880         if (AT_OFF)
881                 req->rq_timeout = ldlm_get_rq_timeout();
882
883         if (lock->l_export && lock->l_export->exp_nid_stats &&
884             lock->l_export->exp_nid_stats->nid_ldlm_stats)
885                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
886                                      LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
887
888         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
889
890         RETURN(rc);
891 }
892
893 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
894 {
895         struct ldlm_cb_set_arg *arg = data;
896         struct ldlm_request    *body;
897         struct ptlrpc_request  *req;
898         struct ldlm_cb_async_args *ca;
899         long                    total_enqueue_wait;
900         int                     instant_cancel = 0;
901         int                     rc = 0;
902         int                     lvb_len;
903         ENTRY;
904
905         LASSERT(lock != NULL);
906         LASSERT(data != NULL);
907
908         total_enqueue_wait = cfs_time_sub(cfs_time_current_sec(),
909                                           lock->l_last_activity);
910
911         req = ptlrpc_request_alloc(lock->l_export->exp_imp_reverse,
912                                     &RQF_LDLM_CP_CALLBACK);
913         if (req == NULL)
914                 RETURN(-ENOMEM);
915
916         /* server namespace, doesn't need lock */
917         lvb_len = ldlm_lvbo_size(lock);
918         if (lvb_len > 0)
919                  req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT,
920                                       lvb_len);
921
922         rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
923         if (rc) {
924                 ptlrpc_request_free(req);
925                 RETURN(rc);
926         }
927
928         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
929         ca = ptlrpc_req_async_args(req);
930         ca->ca_set_arg = arg;
931         ca->ca_lock = lock;
932
933         req->rq_interpret_reply = ldlm_cb_interpret;
934         req->rq_no_resend = 1;
935         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
936
937         body->lock_handle[0] = lock->l_remote_handle;
938         body->lock_flags = flags;
939         ldlm_lock2desc(lock, &body->lock_desc);
940         if (lvb_len > 0) {
941                 void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
942
943                 lvb_len = ldlm_lvbo_fill(lock, lvb, lvb_len);
944                 req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB,
945                                    lvb_len, RCL_CLIENT);
946         }
947
948         LDLM_DEBUG(lock, "server preparing completion AST (after %lds wait)",
949                    total_enqueue_wait);
950
951         /* Server-side enqueue wait time estimate, used in
952             __ldlm_add_waiting_lock to set future enqueue timers */
953         if (total_enqueue_wait < ldlm_get_enq_timeout(lock))
954                 at_measured(ldlm_lock_to_ns_at(lock),
955                             total_enqueue_wait);
956         else
957                 /* bz18618. Don't add lock enqueue time we spend waiting for a
958                    previous callback to fail. Locks waiting legitimately will
959                    get extended by ldlm_refresh_waiting_lock regardless of the
960                    estimate, so it's okay to underestimate here. */
961                 LDLM_DEBUG(lock, "lock completed after %lus; estimate was %ds. "
962                        "It is likely that a previous callback timed out.",
963                        total_enqueue_wait,
964                        at_get(ldlm_lock_to_ns_at(lock)));
965
966         ptlrpc_request_set_replen(req);
967
968         req->rq_send_state = LUSTRE_IMP_FULL;
969         /* ptlrpc_request_pack already set timeout */
970         if (AT_OFF)
971                 req->rq_timeout = ldlm_get_rq_timeout();
972
973         /* We only send real blocking ASTs after the lock is granted */
974         lock_res_and_lock(lock);
975         if (lock->l_flags & LDLM_FL_AST_SENT) {
976                 body->lock_flags |= LDLM_FL_AST_SENT;
977                 /* copy ast flags like LDLM_FL_DISCARD_DATA */
978                 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
979
980                 /* We might get here prior to ldlm_handle_enqueue setting
981                  * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
982                  * into waiting list, but this is safe and similar code in
983                  * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
984                  * that would not only cancel the lock, but will also remove
985                  * it from waiting list */
986                 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
987                         unlock_res_and_lock(lock);
988                         ldlm_lock_cancel(lock);
989                         instant_cancel = 1;
990                         lock_res_and_lock(lock);
991                 } else {
992                         /* start the lock-timeout clock */
993                         ldlm_add_waiting_lock(lock);
994                 }
995         }
996         unlock_res_and_lock(lock);
997
998         if (lock->l_export && lock->l_export->exp_nid_stats &&
999             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1000                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1001                                      LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
1002
1003         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
1004
1005         RETURN(rc);
1006 }
1007
1008 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
1009 {
1010         struct ldlm_cb_set_arg          *arg = data;
1011         struct ldlm_request             *body;
1012         struct ptlrpc_request           *req;
1013         struct ldlm_cb_async_args       *ca;
1014         int                              rc;
1015         ENTRY;
1016
1017         LASSERT(lock != NULL);
1018
1019         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
1020                                         &RQF_LDLM_GL_CALLBACK,
1021                                         LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK);
1022
1023         if (req == NULL)
1024                 RETURN(-ENOMEM);
1025
1026         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1027         body->lock_handle[0] = lock->l_remote_handle;
1028         ldlm_lock2desc(lock, &body->lock_desc);
1029
1030         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
1031         ca = ptlrpc_req_async_args(req);
1032         ca->ca_set_arg = arg;
1033         ca->ca_lock = lock;
1034
1035         /* server namespace, doesn't need lock */
1036         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
1037                              ldlm_lvbo_size(lock));
1038         ptlrpc_request_set_replen(req);
1039
1040         req->rq_send_state = LUSTRE_IMP_FULL;
1041         /* ptlrpc_request_alloc_pack already set timeout */
1042         if (AT_OFF)
1043                 req->rq_timeout = ldlm_get_rq_timeout();
1044
1045         req->rq_interpret_reply = ldlm_cb_interpret;
1046
1047         if (lock->l_export && lock->l_export->exp_nid_stats &&
1048             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1049                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1050                                      LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
1051
1052         rc = ldlm_ast_fini(req, arg, lock, 0);
1053
1054         RETURN(rc);
1055 }
1056
1057 int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list)
1058 {
1059         int     rc;
1060         ENTRY;
1061
1062         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), gl_work_list,
1063                                LDLM_WORK_GL_AST);
1064         if (rc == -ERESTART)
1065                 ldlm_reprocess_all(res);
1066
1067         RETURN(rc);
1068 }
1069
1070 static void ldlm_svc_get_eopc(const struct ldlm_request *dlm_req,
1071                        struct lprocfs_stats *srv_stats)
1072 {
1073         int lock_type = 0, op = 0;
1074
1075         lock_type = dlm_req->lock_desc.l_resource.lr_type;
1076
1077         switch (lock_type) {
1078         case LDLM_PLAIN:
1079                 op = PTLRPC_LAST_CNTR + LDLM_PLAIN_ENQUEUE;
1080                 break;
1081         case LDLM_EXTENT:
1082                 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT)
1083                         op = PTLRPC_LAST_CNTR + LDLM_GLIMPSE_ENQUEUE;
1084                 else
1085                         op = PTLRPC_LAST_CNTR + LDLM_EXTENT_ENQUEUE;
1086                 break;
1087         case LDLM_FLOCK:
1088                 op = PTLRPC_LAST_CNTR + LDLM_FLOCK_ENQUEUE;
1089                 break;
1090         case LDLM_IBITS:
1091                 op = PTLRPC_LAST_CNTR + LDLM_IBITS_ENQUEUE;
1092                 break;
1093         default:
1094                 op = 0;
1095                 break;
1096         }
1097
1098         if (op)
1099                 lprocfs_counter_incr(srv_stats, op);
1100
1101         return;
1102 }
1103
1104 /*
1105  * Main server-side entry point into LDLM. This is called by ptlrpc service
1106  * threads to carry out client lock enqueueing requests.
1107  */
1108 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
1109                          struct ptlrpc_request *req,
1110                          const struct ldlm_request *dlm_req,
1111                          const struct ldlm_callback_suite *cbs)
1112 {
1113         struct ldlm_reply *dlm_rep;
1114         __u32 flags;
1115         ldlm_error_t err = ELDLM_OK;
1116         struct ldlm_lock *lock = NULL;
1117         void *cookie = NULL;
1118         int rc = 0;
1119         ENTRY;
1120
1121         LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
1122
1123         ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
1124         flags = dlm_req->lock_flags;
1125
1126         LASSERT(req->rq_export);
1127
1128         if (ptlrpc_req2svc(req)->srv_stats != NULL)
1129                 ldlm_svc_get_eopc(dlm_req, ptlrpc_req2svc(req)->srv_stats);
1130
1131         if (req->rq_export && req->rq_export->exp_nid_stats &&
1132             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1133                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1134                                      LDLM_ENQUEUE - LDLM_FIRST_OPC);
1135
1136         if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
1137                      dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
1138                 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
1139                           dlm_req->lock_desc.l_resource.lr_type);
1140                 GOTO(out, rc = -EFAULT);
1141         }
1142
1143         if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
1144                      dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
1145                      dlm_req->lock_desc.l_req_mode &
1146                      (dlm_req->lock_desc.l_req_mode-1))) {
1147                 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
1148                           dlm_req->lock_desc.l_req_mode);
1149                 GOTO(out, rc = -EFAULT);
1150         }
1151
1152         if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
1153                 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1154                              LDLM_PLAIN)) {
1155                         DEBUG_REQ(D_ERROR, req,
1156                                   "PLAIN lock request from IBITS client?");
1157                         GOTO(out, rc = -EPROTO);
1158                 }
1159         } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1160                             LDLM_IBITS)) {
1161                 DEBUG_REQ(D_ERROR, req,
1162                           "IBITS lock request from unaware client?");
1163                 GOTO(out, rc = -EPROTO);
1164         }
1165
1166 #if 0
1167         /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
1168            against server's _CONNECT_SUPPORTED flags? (I don't want to use
1169            ibits for mgc/mgs) */
1170
1171         /* INODEBITS_INTEROP: Perform conversion from plain lock to
1172          * inodebits lock if client does not support them. */
1173         if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
1174             (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
1175                 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
1176                 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
1177                         MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
1178                 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
1179                         dlm_req->lock_desc.l_req_mode = LCK_CR;
1180         }
1181 #endif
1182
1183         if (unlikely(flags & LDLM_FL_REPLAY)) {
1184                 /* Find an existing lock in the per-export lock hash */
1185                 /* In the function below, .hs_keycmp resolves to
1186                  * ldlm_export_lock_keycmp() */
1187                 /* coverity[overrun-buffer-val] */
1188                 lock = cfs_hash_lookup(req->rq_export->exp_lock_hash,
1189                                        (void *)&dlm_req->lock_handle[0]);
1190                 if (lock != NULL) {
1191                         DEBUG_REQ(D_DLMTRACE, req, "found existing lock cookie "
1192                                   LPX64, lock->l_handle.h_cookie);
1193                         GOTO(existing_lock, rc = 0);
1194                 }
1195         }
1196
1197         /* The lock's callback data might be set in the policy function */
1198         lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
1199                                 dlm_req->lock_desc.l_resource.lr_type,
1200                                 dlm_req->lock_desc.l_req_mode,
1201                                 cbs, NULL, 0);
1202
1203         if (!lock)
1204                 GOTO(out, rc = -ENOMEM);
1205
1206         lock->l_last_activity = cfs_time_current_sec();
1207         lock->l_remote_handle = dlm_req->lock_handle[0];
1208         LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
1209
1210         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
1211         /* Don't enqueue a lock onto the export if it is been disonnected
1212          * due to eviction (bug 3822) or server umount (bug 24324).
1213          * Cancel it now instead. */
1214         if (req->rq_export->exp_disconnected) {
1215                 LDLM_ERROR(lock, "lock on disconnected export %p",
1216                            req->rq_export);
1217                 GOTO(out, rc = -ENOTCONN);
1218         }
1219
1220         lock->l_export = class_export_lock_get(req->rq_export, lock);
1221         if (lock->l_export->exp_lock_hash)
1222                 cfs_hash_add(lock->l_export->exp_lock_hash,
1223                              &lock->l_remote_handle,
1224                              &lock->l_exp_hash);
1225
1226 existing_lock:
1227
1228         if (flags & LDLM_FL_HAS_INTENT) {
1229                 /* In this case, the reply buffer is allocated deep in
1230                  * local_lock_enqueue by the policy function. */
1231                 cookie = req;
1232         } else {
1233                 /* based on the assumption that lvb size never changes during
1234                  * resource life time otherwise it need resource->lr_lock's
1235                  * protection */
1236                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
1237                                      RCL_SERVER, ldlm_lvbo_size(lock));
1238
1239                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
1240                         GOTO(out, rc = -ENOMEM);
1241
1242                 rc = req_capsule_server_pack(&req->rq_pill);
1243                 if (rc)
1244                         GOTO(out, rc);
1245         }
1246
1247         if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
1248                 ldlm_convert_policy_to_local(req->rq_export,
1249                                           dlm_req->lock_desc.l_resource.lr_type,
1250                                           &dlm_req->lock_desc.l_policy_data,
1251                                           &lock->l_policy_data);
1252         if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
1253                 lock->l_req_extent = lock->l_policy_data.l_extent;
1254
1255         err = ldlm_lock_enqueue(ns, &lock, cookie, (int *)&flags);
1256         if (err)
1257                 GOTO(out, err);
1258
1259         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1260         dlm_rep->lock_flags = flags;
1261
1262         ldlm_lock2desc(lock, &dlm_rep->lock_desc);
1263         ldlm_lock2handle(lock, &dlm_rep->lock_handle);
1264
1265         /* We never send a blocking AST until the lock is granted, but
1266          * we can tell it right now */
1267         lock_res_and_lock(lock);
1268
1269         /* Now take into account flags to be inherited from original lock
1270            request both in reply to client and in our own lock flags. */
1271         dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1272         lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1273
1274         /* Don't move a pending lock onto the export if it has already been
1275          * disconnected due to eviction (bug 5683) or server umount (bug 24324).
1276          * Cancel it now instead. */
1277         if (unlikely(req->rq_export->exp_disconnected ||
1278                      OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
1279                 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
1280                 rc = -ENOTCONN;
1281         } else if (lock->l_flags & LDLM_FL_AST_SENT) {
1282                 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
1283                 if (lock->l_granted_mode == lock->l_req_mode) {
1284                         /*
1285                          * Only cancel lock if it was granted, because it would
1286                          * be destroyed immediately and would never be granted
1287                          * in the future, causing timeouts on client.  Not
1288                          * granted lock will be cancelled immediately after
1289                          * sending completion AST.
1290                          */
1291                         if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
1292                                 unlock_res_and_lock(lock);
1293                                 ldlm_lock_cancel(lock);
1294                                 lock_res_and_lock(lock);
1295                         } else
1296                                 ldlm_add_waiting_lock(lock);
1297                 }
1298         }
1299         /* Make sure we never ever grant usual metadata locks to liblustre
1300            clients */
1301         if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
1302             dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
1303              req->rq_export->exp_libclient) {
1304                 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
1305                              !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
1306                         CERROR("Granting sync lock to libclient. "
1307                                "req fl %d, rep fl %d, lock fl "LPX64"\n",
1308                                dlm_req->lock_flags, dlm_rep->lock_flags,
1309                                lock->l_flags);
1310                         LDLM_ERROR(lock, "sync lock");
1311                         if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
1312                                 struct ldlm_intent *it;
1313
1314                                 it = req_capsule_client_get(&req->rq_pill,
1315                                                             &RMF_LDLM_INTENT);
1316                                 if (it != NULL) {
1317                                         CERROR("This is intent %s ("LPU64")\n",
1318                                                ldlm_it2str(it->opc), it->opc);
1319                                 }
1320                         }
1321                 }
1322         }
1323
1324         unlock_res_and_lock(lock);
1325
1326         EXIT;
1327  out:
1328         req->rq_status = rc ?: err; /* return either error - bug 11190 */
1329         if (!req->rq_packed_final) {
1330                 err = lustre_pack_reply(req, 1, NULL, NULL);
1331                 if (rc == 0)
1332                         rc = err;
1333         }
1334
1335         /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1336          * ldlm_reprocess_all.  If this moves, revisit that code. -phil */
1337         if (lock) {
1338                 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
1339                            "(err=%d, rc=%d)", err, rc);
1340
1341                 if (rc == 0) {
1342                         int lvb_len = ldlm_lvbo_size(lock);
1343
1344                         if (lvb_len > 0) {
1345                                 void *buf;
1346                                 int buflen;
1347
1348                                 buf = req_capsule_server_get(&req->rq_pill,
1349                                                              &RMF_DLM_LVB);
1350                                 LASSERTF(buf != NULL, "req %p, lock %p\n",
1351                                          req, lock);
1352                                 buflen = req_capsule_get_size(&req->rq_pill,
1353                                                 &RMF_DLM_LVB, RCL_SERVER);
1354                                 buflen = ldlm_lvbo_fill(lock, buf, buflen);
1355                                 req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB,
1356                                                    buflen, RCL_SERVER);
1357                         }
1358                 } else {
1359                         lock_res_and_lock(lock);
1360                         ldlm_resource_unlink_lock(lock);
1361                         ldlm_lock_destroy_nolock(lock);
1362                         unlock_res_and_lock(lock);
1363                 }
1364
1365                 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1366                         ldlm_reprocess_all(lock->l_resource);
1367
1368                 LDLM_LOCK_RELEASE(lock);
1369         }
1370
1371         LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1372                           lock, rc);
1373
1374         return rc;
1375 }
1376
1377 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1378                         ldlm_completion_callback completion_callback,
1379                         ldlm_blocking_callback blocking_callback,
1380                         ldlm_glimpse_callback glimpse_callback)
1381 {
1382         struct ldlm_request *dlm_req;
1383         struct ldlm_callback_suite cbs = {
1384                 .lcs_completion = completion_callback,
1385                 .lcs_blocking   = blocking_callback,
1386                 .lcs_glimpse    = glimpse_callback
1387         };
1388         int rc;
1389
1390         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1391         if (dlm_req != NULL) {
1392                 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1393                                           req, dlm_req, &cbs);
1394         } else {
1395                 rc = -EFAULT;
1396         }
1397         return rc;
1398 }
1399
1400 int ldlm_handle_convert0(struct ptlrpc_request *req,
1401                          const struct ldlm_request *dlm_req)
1402 {
1403         struct ldlm_reply *dlm_rep;
1404         struct ldlm_lock *lock;
1405         int rc;
1406         ENTRY;
1407
1408         if (req->rq_export && req->rq_export->exp_nid_stats &&
1409             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1410                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1411                                      LDLM_CONVERT - LDLM_FIRST_OPC);
1412
1413         rc = req_capsule_server_pack(&req->rq_pill);
1414         if (rc)
1415                 RETURN(rc);
1416
1417         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1418         dlm_rep->lock_flags = dlm_req->lock_flags;
1419
1420         lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1421         if (!lock) {
1422                 req->rq_status = EINVAL;
1423         } else {
1424                 void *res = NULL;
1425
1426                 LDLM_DEBUG(lock, "server-side convert handler START");
1427
1428                 lock->l_last_activity = cfs_time_current_sec();
1429                 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1430                                         &dlm_rep->lock_flags);
1431                 if (res) {
1432                         if (ldlm_del_waiting_lock(lock))
1433                                 LDLM_DEBUG(lock, "converted waiting lock");
1434                         req->rq_status = 0;
1435                 } else {
1436                         req->rq_status = EDEADLOCK;
1437                 }
1438         }
1439
1440         if (lock) {
1441                 if (!req->rq_status)
1442                         ldlm_reprocess_all(lock->l_resource);
1443                 LDLM_DEBUG(lock, "server-side convert handler END");
1444                 LDLM_LOCK_PUT(lock);
1445         } else
1446                 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1447
1448         RETURN(0);
1449 }
1450
1451 int ldlm_handle_convert(struct ptlrpc_request *req)
1452 {
1453         int rc;
1454         struct ldlm_request *dlm_req;
1455
1456         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1457         if (dlm_req != NULL) {
1458                 rc = ldlm_handle_convert0(req, dlm_req);
1459         } else {
1460                 CERROR ("Can't unpack dlm_req\n");
1461                 rc = -EFAULT;
1462         }
1463         return rc;
1464 }
1465
1466 /* Cancel all the locks whos handles are packed into ldlm_request */
1467 int ldlm_request_cancel(struct ptlrpc_request *req,
1468                         const struct ldlm_request *dlm_req, int first)
1469 {
1470         struct ldlm_resource *res, *pres = NULL;
1471         struct ldlm_lock *lock;
1472         int i, count, done = 0;
1473         ENTRY;
1474
1475         count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1476         if (first >= count)
1477                 RETURN(0);
1478
1479         /* There is no lock on the server at the replay time,
1480          * skip lock cancelling to make replay tests to pass. */
1481         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1482                 RETURN(0);
1483
1484         LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
1485                           "starting at %d", count, first);
1486
1487         for (i = first; i < count; i++) {
1488                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1489                 if (!lock) {
1490                         LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1491                                           "lock (cookie "LPU64")",
1492                                           dlm_req->lock_handle[i].cookie);
1493                         continue;
1494                 }
1495
1496                 res = lock->l_resource;
1497                 done++;
1498
1499                 if (res != pres) {
1500                         if (pres != NULL) {
1501                                 ldlm_reprocess_all(pres);
1502                                 LDLM_RESOURCE_DELREF(pres);
1503                                 ldlm_resource_putref(pres);
1504                         }
1505                         if (res != NULL) {
1506                                 ldlm_resource_getref(res);
1507                                 LDLM_RESOURCE_ADDREF(res);
1508                                 ldlm_res_lvbo_update(res, NULL, 1);
1509                         }
1510                         pres = res;
1511                 }
1512                 ldlm_lock_cancel(lock);
1513                 LDLM_LOCK_PUT(lock);
1514         }
1515         if (pres != NULL) {
1516                 ldlm_reprocess_all(pres);
1517                 LDLM_RESOURCE_DELREF(pres);
1518                 ldlm_resource_putref(pres);
1519         }
1520         LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1521         RETURN(done);
1522 }
1523
1524 int ldlm_handle_cancel(struct ptlrpc_request *req)
1525 {
1526         struct ldlm_request *dlm_req;
1527         int rc;
1528         ENTRY;
1529
1530         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1531         if (dlm_req == NULL) {
1532                 CDEBUG(D_INFO, "bad request buffer for cancel\n");
1533                 RETURN(-EFAULT);
1534         }
1535
1536         if (req->rq_export && req->rq_export->exp_nid_stats &&
1537             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1538                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1539                                      LDLM_CANCEL - LDLM_FIRST_OPC);
1540
1541         rc = req_capsule_server_pack(&req->rq_pill);
1542         if (rc)
1543                 RETURN(rc);
1544
1545         if (!ldlm_request_cancel(req, dlm_req, 0))
1546                 req->rq_status = ESTALE;
1547
1548         RETURN(ptlrpc_reply(req));
1549 }
1550 #endif /* HAVE_SERVER_SUPPORT */
1551
1552 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1553                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1554 {
1555         int do_ast;
1556         ENTRY;
1557
1558         LDLM_DEBUG(lock, "client blocking AST callback handler");
1559
1560         lock_res_and_lock(lock);
1561         lock->l_flags |= LDLM_FL_CBPENDING;
1562
1563         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1564                 lock->l_flags |= LDLM_FL_CANCEL;
1565
1566         do_ast = (!lock->l_readers && !lock->l_writers);
1567         unlock_res_and_lock(lock);
1568
1569         if (do_ast) {
1570                 CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
1571                        lock, lock->l_blocking_ast);
1572                 if (lock->l_blocking_ast != NULL)
1573                         lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1574                                              LDLM_CB_BLOCKING);
1575         } else {
1576                 CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
1577                        lock);
1578         }
1579
1580         LDLM_DEBUG(lock, "client blocking callback handler END");
1581         LDLM_LOCK_RELEASE(lock);
1582         EXIT;
1583 }
1584
1585 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1586                                     struct ldlm_namespace *ns,
1587                                     struct ldlm_request *dlm_req,
1588                                     struct ldlm_lock *lock)
1589 {
1590         int lvb_len;
1591         CFS_LIST_HEAD(ast_list);
1592         ENTRY;
1593
1594         LDLM_DEBUG(lock, "client completion callback handler START");
1595
1596         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
1597                 int to = cfs_time_seconds(1);
1598                 while (to > 0) {
1599                         cfs_schedule_timeout_and_set_state(
1600                                 CFS_TASK_INTERRUPTIBLE, to);
1601                         if (lock->l_granted_mode == lock->l_req_mode ||
1602                             lock->l_destroyed)
1603                                 break;
1604                 }
1605         }
1606
1607         lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
1608         if (lvb_len > 0) {
1609                 if (lock->l_lvb_len > 0) {
1610                         /* for extent lock, lvb contains ost_lvb{}. */
1611                         LASSERT(lock->l_lvb_data != NULL);
1612                         LASSERTF(lock->l_lvb_len == lvb_len,
1613                                 "preallocated %d, actual %d.\n",
1614                                 lock->l_lvb_len, lvb_len);
1615                 } else { /* for layout lock, lvb has variable length */
1616                         void *lvb_data;
1617
1618                         OBD_ALLOC(lvb_data, lvb_len);
1619                         if (lvb_data == NULL)
1620                                 LDLM_ERROR(lock, "no memory.\n");
1621
1622                         lock_res_and_lock(lock);
1623                         if (lvb_data == NULL) {
1624                                 lock->l_flags |= LDLM_FL_FAILED;
1625                         } else {
1626                                 LASSERT(lock->l_lvb_data == NULL);
1627                                 lock->l_lvb_data = lvb_data;
1628                                 lock->l_lvb_len = lvb_len;
1629                         }
1630                         unlock_res_and_lock(lock);
1631                 }
1632         }
1633
1634         lock_res_and_lock(lock);
1635         if (lock->l_destroyed ||
1636             lock->l_granted_mode == lock->l_req_mode) {
1637                 /* bug 11300: the lock has already been granted */
1638                 unlock_res_and_lock(lock);
1639                 LDLM_DEBUG(lock, "Double grant race happened");
1640                 LDLM_LOCK_RELEASE(lock);
1641                 EXIT;
1642                 return;
1643         }
1644
1645         /* If we receive the completion AST before the actual enqueue returned,
1646          * then we might need to switch lock modes, resources, or extents. */
1647         if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1648                 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1649                 LDLM_DEBUG(lock, "completion AST, new lock mode");
1650         }
1651
1652         if (lock->l_resource->lr_type != LDLM_PLAIN) {
1653                 ldlm_convert_policy_to_local(req->rq_export,
1654                                           dlm_req->lock_desc.l_resource.lr_type,
1655                                           &dlm_req->lock_desc.l_policy_data,
1656                                           &lock->l_policy_data);
1657                 LDLM_DEBUG(lock, "completion AST, new policy data");
1658         }
1659
1660         ldlm_resource_unlink_lock(lock);
1661         if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1662                    &lock->l_resource->lr_name,
1663                    sizeof(lock->l_resource->lr_name)) != 0) {
1664                 unlock_res_and_lock(lock);
1665                 if (ldlm_lock_change_resource(ns, lock,
1666                                 &dlm_req->lock_desc.l_resource.lr_name) != 0) {
1667                         LDLM_ERROR(lock, "Failed to allocate resource");
1668                         LDLM_LOCK_RELEASE(lock);
1669                         EXIT;
1670                         return;
1671                 }
1672                 LDLM_DEBUG(lock, "completion AST, new resource");
1673                 CERROR("change resource!\n");
1674                 lock_res_and_lock(lock);
1675         }
1676
1677         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1678                 /* BL_AST locks are not needed in lru.
1679                  * let ldlm_cancel_lru() be fast. */
1680                 ldlm_lock_remove_from_lru(lock);
1681                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
1682                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1683         }
1684
1685         if (lock->l_lvb_len) {
1686                 if (req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
1687                                          RCL_CLIENT) < lock->l_lvb_len) {
1688                         LDLM_ERROR(lock, "completion AST did not contain "
1689                                    "expected LVB!");
1690                 } else {
1691                         void *lvb = req_capsule_client_get(&req->rq_pill,
1692                                                            &RMF_DLM_LVB);
1693                         memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1694                 }
1695         }
1696
1697         ldlm_grant_lock(lock, &ast_list);
1698         unlock_res_and_lock(lock);
1699
1700         LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1701
1702         /* Let Enqueue to call osc_lock_upcall() and initialize
1703          * l_ast_data */
1704         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
1705
1706         ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
1707
1708         LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1709                           lock);
1710         LDLM_LOCK_RELEASE(lock);
1711         EXIT;
1712 }
1713
1714 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1715                                     struct ldlm_namespace *ns,
1716                                     struct ldlm_request *dlm_req,
1717                                     struct ldlm_lock *lock)
1718 {
1719         int rc = -ENOSYS;
1720         ENTRY;
1721
1722         LDLM_DEBUG(lock, "client glimpse AST callback handler");
1723
1724         if (lock->l_glimpse_ast != NULL)
1725                 rc = lock->l_glimpse_ast(lock, req);
1726
1727         if (req->rq_repmsg != NULL) {
1728                 ptlrpc_reply(req);
1729         } else {
1730                 req->rq_status = rc;
1731                 ptlrpc_error(req);
1732         }
1733
1734         lock_res_and_lock(lock);
1735         if (lock->l_granted_mode == LCK_PW &&
1736             !lock->l_readers && !lock->l_writers &&
1737             cfs_time_after(cfs_time_current(),
1738                            cfs_time_add(lock->l_last_used,
1739                                         cfs_time_seconds(10)))) {
1740                 unlock_res_and_lock(lock);
1741                 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
1742                         ldlm_handle_bl_callback(ns, NULL, lock);
1743
1744                 EXIT;
1745                 return;
1746         }
1747         unlock_res_and_lock(lock);
1748         LDLM_LOCK_RELEASE(lock);
1749         EXIT;
1750 }
1751
1752 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1753 {
1754         if (req->rq_no_reply)
1755                 return 0;
1756
1757         req->rq_status = rc;
1758         if (!req->rq_packed_final) {
1759                 rc = lustre_pack_reply(req, 1, NULL, NULL);
1760                 if (rc)
1761                         return rc;
1762         }
1763         return ptlrpc_reply(req);
1764 }
1765
1766 #ifdef __KERNEL__
1767 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode)
1768 {
1769         struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1770         ENTRY;
1771
1772         cfs_spin_lock(&blp->blp_lock);
1773         if (blwi->blwi_lock && blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
1774                 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
1775                 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
1776         } else {
1777                 /* other blocking callbacks are added to the regular list */
1778                 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1779         }
1780         cfs_spin_unlock(&blp->blp_lock);
1781
1782         cfs_waitq_signal(&blp->blp_waitq);
1783
1784         /* can not use blwi->blwi_mode as blwi could be already freed in
1785            LDLM_ASYNC mode */
1786         if (mode == LDLM_SYNC)
1787                 cfs_wait_for_completion(&blwi->blwi_comp);
1788
1789         RETURN(0);
1790 }
1791
1792 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
1793                              struct ldlm_namespace *ns,
1794                              struct ldlm_lock_desc *ld,
1795                              cfs_list_t *cancels, int count,
1796                              struct ldlm_lock *lock,
1797                              int mode)
1798 {
1799         cfs_init_completion(&blwi->blwi_comp);
1800         CFS_INIT_LIST_HEAD(&blwi->blwi_head);
1801
1802         if (cfs_memory_pressure_get())
1803                 blwi->blwi_mem_pressure = 1;
1804
1805         blwi->blwi_ns = ns;
1806         blwi->blwi_mode = mode;
1807         if (ld != NULL)
1808                 blwi->blwi_ld = *ld;
1809         if (count) {
1810                 cfs_list_add(&blwi->blwi_head, cancels);
1811                 cfs_list_del_init(cancels);
1812                 blwi->blwi_count = count;
1813         } else {
1814                 blwi->blwi_lock = lock;
1815         }
1816 }
1817
1818 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
1819                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
1820                              cfs_list_t *cancels, int count, int mode)
1821 {
1822         ENTRY;
1823
1824         if (cancels && count == 0)
1825                 RETURN(0);
1826
1827         if (mode == LDLM_SYNC) {
1828                 /* if it is synchronous call do minimum mem alloc, as it could
1829                  * be triggered from kernel shrinker
1830                  */
1831                 struct ldlm_bl_work_item blwi;
1832                 memset(&blwi, 0, sizeof(blwi));
1833                 init_blwi(&blwi, ns, ld, cancels, count, lock, LDLM_SYNC);
1834                 RETURN(__ldlm_bl_to_thread(&blwi, LDLM_SYNC));
1835         } else {
1836                 struct ldlm_bl_work_item *blwi;
1837                 OBD_ALLOC(blwi, sizeof(*blwi));
1838                 if (blwi == NULL)
1839                         RETURN(-ENOMEM);
1840                 init_blwi(blwi, ns, ld, cancels, count, lock, LDLM_ASYNC);
1841
1842                 RETURN(__ldlm_bl_to_thread(blwi, LDLM_ASYNC));
1843         }
1844 }
1845
1846 #endif
1847
1848 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1849                            struct ldlm_lock *lock)
1850 {
1851 #ifdef __KERNEL__
1852         RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LDLM_ASYNC));
1853 #else
1854         RETURN(-ENOSYS);
1855 #endif
1856 }
1857
1858 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1859                            cfs_list_t *cancels, int count, int mode)
1860 {
1861 #ifdef __KERNEL__
1862         RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count, mode));
1863 #else
1864         RETURN(-ENOSYS);
1865 #endif
1866 }
1867
1868 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
1869 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
1870 {
1871         struct obd_device *obd = req->rq_export->exp_obd;
1872         char *key;
1873         void *val;
1874         int keylen, vallen;
1875         int rc = -ENOSYS;
1876         ENTRY;
1877
1878         DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
1879
1880         req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
1881
1882         key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1883         if (key == NULL) {
1884                 DEBUG_REQ(D_IOCTL, req, "no set_info key");
1885                 RETURN(-EFAULT);
1886         }
1887         keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
1888                                       RCL_CLIENT);
1889         val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1890         if (val == NULL) {
1891                 DEBUG_REQ(D_IOCTL, req, "no set_info val");
1892                 RETURN(-EFAULT);
1893         }
1894         vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
1895                                       RCL_CLIENT);
1896
1897         /* We are responsible for swabbing contents of val */
1898
1899         if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
1900                 /* Pass it on to mdc (the "export" in this case) */
1901                 rc = obd_set_info_async(req->rq_svc_thread->t_env,
1902                                         req->rq_export,
1903                                         sizeof(KEY_HSM_COPYTOOL_SEND),
1904                                         KEY_HSM_COPYTOOL_SEND,
1905                                         vallen, val, NULL);
1906         else
1907                 DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
1908
1909         return rc;
1910 }
1911
1912 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
1913                                         const char *msg, int rc,
1914                                         struct lustre_handle *handle)
1915 {
1916         DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
1917                   "%s: [nid %s] [rc %d] [lock "LPX64"]",
1918                   msg, libcfs_id2str(req->rq_peer), rc,
1919                   handle ? handle->cookie : 0);
1920         if (req->rq_no_reply)
1921                 CWARN("No reply was sent, maybe cause bug 21636.\n");
1922         else if (rc)
1923                 CWARN("Send reply failed, maybe cause bug 21636.\n");
1924 }
1925
1926 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
1927 static int ldlm_callback_handler(struct ptlrpc_request *req)
1928 {
1929         struct ldlm_namespace *ns;
1930         struct ldlm_request *dlm_req;
1931         struct ldlm_lock *lock;
1932         int rc;
1933         ENTRY;
1934
1935         /* Requests arrive in sender's byte order.  The ptlrpc service
1936          * handler has already checked and, if necessary, byte-swapped the
1937          * incoming request message body, but I am responsible for the
1938          * message buffers. */
1939
1940         /* do nothing for sec context finalize */
1941         if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
1942                 RETURN(0);
1943
1944         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
1945
1946         if (req->rq_export == NULL) {
1947                 rc = ldlm_callback_reply(req, -ENOTCONN);
1948                 ldlm_callback_errmsg(req, "Operate on unconnected server",
1949                                      rc, NULL);
1950                 RETURN(0);
1951         }
1952
1953         LASSERT(req->rq_export != NULL);
1954         LASSERT(req->rq_export->exp_obd != NULL);
1955
1956         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1957         case LDLM_BL_CALLBACK:
1958                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK))
1959                         RETURN(0);
1960                 break;
1961         case LDLM_CP_CALLBACK:
1962                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK))
1963                         RETURN(0);
1964                 break;
1965         case LDLM_GL_CALLBACK:
1966                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK))
1967                         RETURN(0);
1968                 break;
1969         case LDLM_SET_INFO:
1970                 rc = ldlm_handle_setinfo(req);
1971                 ldlm_callback_reply(req, rc);
1972                 RETURN(0);
1973         case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
1974                 CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
1975                 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
1976                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
1977                         RETURN(0);
1978                 rc = llog_origin_handle_cancel(req);
1979                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
1980                         RETURN(0);
1981                 ldlm_callback_reply(req, rc);
1982                 RETURN(0);
1983         case OBD_QC_CALLBACK:
1984                 req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
1985                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
1986                         RETURN(0);
1987                 rc = target_handle_qc_callback(req);
1988                 ldlm_callback_reply(req, rc);
1989                 RETURN(0);
1990         case QUOTA_DQACQ:
1991         case QUOTA_DQREL:
1992                 /* reply in handler */
1993                 req_capsule_set(&req->rq_pill, &RQF_MDS_QUOTA_DQACQ);
1994                 rc = target_handle_dqacq_callback(req);
1995                 RETURN(0);
1996         case LLOG_ORIGIN_HANDLE_CREATE:
1997                 req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
1998                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1999                         RETURN(0);
2000                 rc = llog_origin_handle_open(req);
2001                 ldlm_callback_reply(req, rc);
2002                 RETURN(0);
2003         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2004                 req_capsule_set(&req->rq_pill,
2005                                 &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2006                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2007                         RETURN(0);
2008                 rc = llog_origin_handle_next_block(req);
2009                 ldlm_callback_reply(req, rc);
2010                 RETURN(0);
2011         case LLOG_ORIGIN_HANDLE_READ_HEADER:
2012                 req_capsule_set(&req->rq_pill,
2013                                 &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2014                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2015                         RETURN(0);
2016                 rc = llog_origin_handle_read_header(req);
2017                 ldlm_callback_reply(req, rc);
2018                 RETURN(0);
2019         case LLOG_ORIGIN_HANDLE_CLOSE:
2020                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2021                         RETURN(0);
2022                 rc = llog_origin_handle_close(req);
2023                 ldlm_callback_reply(req, rc);
2024                 RETURN(0);
2025         default:
2026                 CERROR("unknown opcode %u\n",
2027                        lustre_msg_get_opc(req->rq_reqmsg));
2028                 ldlm_callback_reply(req, -EPROTO);
2029                 RETURN(0);
2030         }
2031
2032         ns = req->rq_export->exp_obd->obd_namespace;
2033         LASSERT(ns != NULL);
2034
2035         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2036
2037         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2038         if (dlm_req == NULL) {
2039                 rc = ldlm_callback_reply(req, -EPROTO);
2040                 ldlm_callback_errmsg(req, "Operate without parameter", rc,
2041                                      NULL);
2042                 RETURN(0);
2043         }
2044
2045         /* Force a known safe race, send a cancel to the server for a lock
2046          * which the server has already started a blocking callback on. */
2047         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
2048             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2049                 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0]);
2050                 if (rc < 0)
2051                         CERROR("ldlm_cli_cancel: %d\n", rc);
2052         }
2053
2054         lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
2055         if (!lock) {
2056                 CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
2057                        "disappeared\n", dlm_req->lock_handle[0].cookie);
2058                 rc = ldlm_callback_reply(req, -EINVAL);
2059                 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
2060                                      &dlm_req->lock_handle[0]);
2061                 RETURN(0);
2062         }
2063
2064         if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
2065             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
2066                 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
2067
2068         /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
2069         lock_res_and_lock(lock);
2070         lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
2071         if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2072                 /* If somebody cancels lock and cache is already dropped,
2073                  * or lock is failed before cp_ast received on client,
2074                  * we can tell the server we have no lock. Otherwise, we
2075                  * should send cancel after dropping the cache. */
2076                 if (((lock->l_flags & LDLM_FL_CANCELING) &&
2077                     (lock->l_flags & LDLM_FL_BL_DONE)) ||
2078                     (lock->l_flags & LDLM_FL_FAILED)) {
2079                         LDLM_DEBUG(lock, "callback on lock "
2080                                    LPX64" - lock disappeared\n",
2081                                    dlm_req->lock_handle[0].cookie);
2082                         unlock_res_and_lock(lock);
2083                         LDLM_LOCK_RELEASE(lock);
2084                         rc = ldlm_callback_reply(req, -EINVAL);
2085                         ldlm_callback_errmsg(req, "Operate on stale lock", rc,
2086                                              &dlm_req->lock_handle[0]);
2087                         RETURN(0);
2088                 }
2089                 /* BL_AST locks are not needed in lru.
2090                  * let ldlm_cancel_lru() be fast. */
2091                 ldlm_lock_remove_from_lru(lock);
2092                 lock->l_flags |= LDLM_FL_BL_AST;
2093         }
2094         unlock_res_and_lock(lock);
2095
2096         /* We want the ost thread to get this reply so that it can respond
2097          * to ost requests (write cache writeback) that might be triggered
2098          * in the callback.
2099          *
2100          * But we'd also like to be able to indicate in the reply that we're
2101          * cancelling right now, because it's unused, or have an intent result
2102          * in the reply, so we might have to push the responsibility for sending
2103          * the reply down into the AST handlers, alas. */
2104
2105         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2106         case LDLM_BL_CALLBACK:
2107                 CDEBUG(D_INODE, "blocking ast\n");
2108                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
2109                 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
2110                         rc = ldlm_callback_reply(req, 0);
2111                         if (req->rq_no_reply || rc)
2112                                 ldlm_callback_errmsg(req, "Normal process", rc,
2113                                                      &dlm_req->lock_handle[0]);
2114                 }
2115                 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
2116                         ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
2117                 break;
2118         case LDLM_CP_CALLBACK:
2119                 CDEBUG(D_INODE, "completion ast\n");
2120                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
2121                 ldlm_callback_reply(req, 0);
2122                 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
2123                 break;
2124         case LDLM_GL_CALLBACK:
2125                 CDEBUG(D_INODE, "glimpse ast\n");
2126                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
2127                 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
2128                 break;
2129         default:
2130                 LBUG();                         /* checked above */
2131         }
2132
2133         RETURN(0);
2134 }
2135
2136 #ifdef HAVE_SERVER_SUPPORT
2137 static int ldlm_cancel_handler(struct ptlrpc_request *req)
2138 {
2139         int rc;
2140         ENTRY;
2141
2142         /* Requests arrive in sender's byte order.  The ptlrpc service
2143          * handler has already checked and, if necessary, byte-swapped the
2144          * incoming request message body, but I am responsible for the
2145          * message buffers. */
2146
2147         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2148
2149         if (req->rq_export == NULL) {
2150                 struct ldlm_request *dlm_req;
2151
2152                 CERROR("%s from %s arrived at %lu with bad export cookie "
2153                        LPU64"\n",
2154                        ll_opcode2str(lustre_msg_get_opc(req->rq_reqmsg)),
2155                        libcfs_nid2str(req->rq_peer.nid),
2156                        req->rq_arrival_time.tv_sec,
2157                        lustre_msg_get_handle(req->rq_reqmsg)->cookie);
2158
2159                 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_CANCEL) {
2160                         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2161                         dlm_req = req_capsule_client_get(&req->rq_pill,
2162                                                          &RMF_DLM_REQ);
2163                         if (dlm_req != NULL)
2164                                 ldlm_lock_dump_handle(D_ERROR,
2165                                                       &dlm_req->lock_handle[0]);
2166                 }
2167                 ldlm_callback_reply(req, -ENOTCONN);
2168                 RETURN(0);
2169         }
2170
2171         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2172
2173         /* XXX FIXME move this back to mds/handler.c, bug 249 */
2174         case LDLM_CANCEL:
2175                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2176                 CDEBUG(D_INODE, "cancel\n");
2177                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL))
2178                         RETURN(0);
2179                 rc = ldlm_handle_cancel(req);
2180                 if (rc)
2181                         break;
2182                 RETURN(0);
2183         case OBD_LOG_CANCEL:
2184                 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
2185                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
2186                         RETURN(0);
2187                 rc = llog_origin_handle_cancel(req);
2188                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
2189                         RETURN(0);
2190                 ldlm_callback_reply(req, rc);
2191                 RETURN(0);
2192         default:
2193                 CERROR("invalid opcode %d\n",
2194                        lustre_msg_get_opc(req->rq_reqmsg));
2195                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2196                 ldlm_callback_reply(req, -EINVAL);
2197         }
2198
2199         RETURN(0);
2200 }
2201
2202 static int ldlm_cancel_hpreq_lock_match(struct ptlrpc_request *req,
2203                                         struct ldlm_lock *lock)
2204 {
2205         struct ldlm_request *dlm_req;
2206         struct lustre_handle lockh;
2207         int rc = 0;
2208         int i;
2209         ENTRY;
2210
2211         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2212         if (dlm_req == NULL)
2213                 RETURN(0);
2214
2215         ldlm_lock2handle(lock, &lockh);
2216         for (i = 0; i < dlm_req->lock_count; i++) {
2217                 if (lustre_handle_equal(&dlm_req->lock_handle[i],
2218                                         &lockh)) {
2219                         DEBUG_REQ(D_RPCTRACE, req,
2220                                   "Prio raised by lock "LPX64".", lockh.cookie);
2221
2222                         rc = 1;
2223                         break;
2224                 }
2225         }
2226
2227         RETURN(rc);
2228
2229 }
2230
2231 static int ldlm_cancel_hpreq_check(struct ptlrpc_request *req)
2232 {
2233         struct ldlm_request *dlm_req;
2234         int rc = 0;
2235         int i;
2236         ENTRY;
2237
2238         /* no prolong in recovery */
2239         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
2240                 RETURN(0);
2241
2242         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2243         if (dlm_req == NULL)
2244                 RETURN(-EFAULT);
2245
2246         for (i = 0; i < dlm_req->lock_count; i++) {
2247                 struct ldlm_lock *lock;
2248
2249                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
2250                 if (lock == NULL)
2251                         continue;
2252
2253                 rc = !!(lock->l_flags & LDLM_FL_AST_SENT);
2254                 if (rc)
2255                         LDLM_DEBUG(lock, "hpreq cancel lock");
2256                 LDLM_LOCK_PUT(lock);
2257
2258                 if (rc)
2259                         break;
2260         }
2261
2262         RETURN(rc);
2263 }
2264
2265 static struct ptlrpc_hpreq_ops ldlm_cancel_hpreq_ops = {
2266         .hpreq_lock_match = ldlm_cancel_hpreq_lock_match,
2267         .hpreq_check      = ldlm_cancel_hpreq_check,
2268         .hpreq_fini       = NULL,
2269 };
2270
2271 static int ldlm_hpreq_handler(struct ptlrpc_request *req)
2272 {
2273         ENTRY;
2274
2275         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2276
2277         if (req->rq_export == NULL)
2278                 RETURN(0);
2279
2280         if (LDLM_CANCEL == lustre_msg_get_opc(req->rq_reqmsg)) {
2281                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2282                 req->rq_ops = &ldlm_cancel_hpreq_ops;
2283         }
2284         RETURN(0);
2285 }
2286
2287 int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
2288                         cfs_hlist_node_t *hnode, void *data)
2289
2290 {
2291         cfs_list_t         *rpc_list = data;
2292         struct ldlm_lock   *lock = cfs_hash_object(hs, hnode);
2293
2294         lock_res_and_lock(lock);
2295
2296         if (lock->l_req_mode != lock->l_granted_mode) {
2297                 unlock_res_and_lock(lock);
2298                 return 0;
2299         }
2300
2301         LASSERT(lock->l_resource);
2302         if (lock->l_resource->lr_type != LDLM_IBITS &&
2303             lock->l_resource->lr_type != LDLM_PLAIN) {
2304                 unlock_res_and_lock(lock);
2305                 return 0;
2306         }
2307
2308         if (lock->l_flags & LDLM_FL_AST_SENT) {
2309                 unlock_res_and_lock(lock);
2310                 return 0;
2311         }
2312
2313         LASSERT(lock->l_blocking_ast);
2314         LASSERT(!lock->l_blocking_lock);
2315
2316         lock->l_flags |= LDLM_FL_AST_SENT;
2317         if (lock->l_export && lock->l_export->exp_lock_hash) {
2318                 /* NB: it's safe to call cfs_hash_del() even lock isn't
2319                  * in exp_lock_hash. */
2320                 /* In the function below, .hs_keycmp resolves to
2321                  * ldlm_export_lock_keycmp() */
2322                 /* coverity[overrun-buffer-val] */
2323                 cfs_hash_del(lock->l_export->exp_lock_hash,
2324                              &lock->l_remote_handle, &lock->l_exp_hash);
2325         }
2326
2327         cfs_list_add_tail(&lock->l_rk_ast, rpc_list);
2328         LDLM_LOCK_GET(lock);
2329
2330         unlock_res_and_lock(lock);
2331         return 0;
2332 }
2333
2334 void ldlm_revoke_export_locks(struct obd_export *exp)
2335 {
2336         cfs_list_t  rpc_list;
2337         ENTRY;
2338
2339         CFS_INIT_LIST_HEAD(&rpc_list);
2340         cfs_hash_for_each_empty(exp->exp_lock_hash,
2341                                 ldlm_revoke_lock_cb, &rpc_list);
2342         ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
2343                           LDLM_WORK_REVOKE_AST);
2344
2345         EXIT;
2346 }
2347 #endif /* HAVE_SERVER_SUPPORT */
2348
2349 #ifdef __KERNEL__
2350 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
2351 {
2352         struct ldlm_bl_work_item *blwi = NULL;
2353         static unsigned int num_bl = 0;
2354
2355         cfs_spin_lock(&blp->blp_lock);
2356         /* process a request from the blp_list at least every blp_num_threads */
2357         if (!cfs_list_empty(&blp->blp_list) &&
2358             (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
2359                 blwi = cfs_list_entry(blp->blp_list.next,
2360                                       struct ldlm_bl_work_item, blwi_entry);
2361         else
2362                 if (!cfs_list_empty(&blp->blp_prio_list))
2363                         blwi = cfs_list_entry(blp->blp_prio_list.next,
2364                                               struct ldlm_bl_work_item,
2365                                               blwi_entry);
2366
2367         if (blwi) {
2368                 if (++num_bl >= cfs_atomic_read(&blp->blp_num_threads))
2369                         num_bl = 0;
2370                 cfs_list_del(&blwi->blwi_entry);
2371         }
2372         cfs_spin_unlock(&blp->blp_lock);
2373
2374         return blwi;
2375 }
2376
2377 /* This only contains temporary data until the thread starts */
2378 struct ldlm_bl_thread_data {
2379         char                    bltd_name[CFS_CURPROC_COMM_MAX];
2380         struct ldlm_bl_pool     *bltd_blp;
2381         cfs_completion_t        bltd_comp;
2382         int                     bltd_num;
2383 };
2384
2385 static int ldlm_bl_thread_main(void *arg);
2386
2387 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
2388 {
2389         struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
2390         int rc;
2391
2392         cfs_init_completion(&bltd.bltd_comp);
2393         rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
2394         if (rc < 0) {
2395                 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
2396                        cfs_atomic_read(&blp->blp_num_threads), rc);
2397                 return rc;
2398         }
2399         cfs_wait_for_completion(&bltd.bltd_comp);
2400
2401         return 0;
2402 }
2403
2404 static int ldlm_bl_thread_main(void *arg)
2405 {
2406         struct ldlm_bl_pool *blp;
2407         ENTRY;
2408
2409         {
2410                 struct ldlm_bl_thread_data *bltd = arg;
2411
2412                 blp = bltd->bltd_blp;
2413
2414                 bltd->bltd_num =
2415                         cfs_atomic_inc_return(&blp->blp_num_threads) - 1;
2416                 cfs_atomic_inc(&blp->blp_busy_threads);
2417
2418                 snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
2419                         "ldlm_bl_%02d", bltd->bltd_num);
2420                 cfs_daemonize(bltd->bltd_name);
2421
2422                 cfs_complete(&bltd->bltd_comp);
2423                 /* cannot use bltd after this, it is only on caller's stack */
2424         }
2425
2426         while (1) {
2427                 struct l_wait_info lwi = { 0 };
2428                 struct ldlm_bl_work_item *blwi = NULL;
2429                 int busy;
2430
2431                 blwi = ldlm_bl_get_work(blp);
2432
2433                 if (blwi == NULL) {
2434                         cfs_atomic_dec(&blp->blp_busy_threads);
2435                         l_wait_event_exclusive(blp->blp_waitq,
2436                                          (blwi = ldlm_bl_get_work(blp)) != NULL,
2437                                          &lwi);
2438                         busy = cfs_atomic_inc_return(&blp->blp_busy_threads);
2439                 } else {
2440                         busy = cfs_atomic_read(&blp->blp_busy_threads);
2441                 }
2442
2443                 if (blwi->blwi_ns == NULL)
2444                         /* added by ldlm_cleanup() */
2445                         break;
2446
2447                 /* Not fatal if racy and have a few too many threads */
2448                 if (unlikely(busy < blp->blp_max_threads &&
2449                              busy >= cfs_atomic_read(&blp->blp_num_threads) &&
2450                              !blwi->blwi_mem_pressure))
2451                         /* discard the return value, we tried */
2452                         ldlm_bl_thread_start(blp);
2453
2454                 if (blwi->blwi_mem_pressure)
2455                         cfs_memory_pressure_set();
2456
2457                 if (blwi->blwi_count) {
2458                         int count;
2459                         /* The special case when we cancel locks in lru
2460                          * asynchronously, we pass the list of locks here.
2461                          * Thus locks are marked LDLM_FL_CANCELING, but NOT
2462                          * canceled locally yet. */
2463                         count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
2464                                                            blwi->blwi_count,
2465                                                            LCF_BL_AST);
2466                         ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0);
2467                 } else {
2468                         ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
2469                                                 blwi->blwi_lock);
2470                 }
2471                 if (blwi->blwi_mem_pressure)
2472                         cfs_memory_pressure_clr();
2473
2474                 if (blwi->blwi_mode == LDLM_ASYNC)
2475                         OBD_FREE(blwi, sizeof(*blwi));
2476                 else
2477                         cfs_complete(&blwi->blwi_comp);
2478         }
2479
2480         cfs_atomic_dec(&blp->blp_busy_threads);
2481         cfs_atomic_dec(&blp->blp_num_threads);
2482         cfs_complete(&blp->blp_comp);
2483         RETURN(0);
2484 }
2485
2486 #endif
2487
2488 static int ldlm_setup(void);
2489 static int ldlm_cleanup(void);
2490
2491 int ldlm_get_ref(void)
2492 {
2493         int rc = 0;
2494         ENTRY;
2495         cfs_mutex_lock(&ldlm_ref_mutex);
2496         if (++ldlm_refcount == 1) {
2497                 rc = ldlm_setup();
2498                 if (rc)
2499                         ldlm_refcount--;
2500         }
2501         cfs_mutex_unlock(&ldlm_ref_mutex);
2502
2503         RETURN(rc);
2504 }
2505
2506 void ldlm_put_ref(void)
2507 {
2508         ENTRY;
2509         cfs_mutex_lock(&ldlm_ref_mutex);
2510         if (ldlm_refcount == 1) {
2511                 int rc = ldlm_cleanup();
2512                 if (rc)
2513                         CERROR("ldlm_cleanup failed: %d\n", rc);
2514                 else
2515                         ldlm_refcount--;
2516         } else {
2517                 ldlm_refcount--;
2518         }
2519         cfs_mutex_unlock(&ldlm_ref_mutex);
2520
2521         EXIT;
2522 }
2523
2524 /*
2525  * Export handle<->lock hash operations.
2526  */
2527 static unsigned
2528 ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
2529 {
2530         return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
2531 }
2532
2533 static void *
2534 ldlm_export_lock_key(cfs_hlist_node_t *hnode)
2535 {
2536         struct ldlm_lock *lock;
2537
2538         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2539         return &lock->l_remote_handle;
2540 }
2541
2542 static void
2543 ldlm_export_lock_keycpy(cfs_hlist_node_t *hnode, void *key)
2544 {
2545         struct ldlm_lock     *lock;
2546
2547         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2548         lock->l_remote_handle = *(struct lustre_handle *)key;
2549 }
2550
2551 static int
2552 ldlm_export_lock_keycmp(const void *key, cfs_hlist_node_t *hnode)
2553 {
2554         return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
2555 }
2556
2557 static void *
2558 ldlm_export_lock_object(cfs_hlist_node_t *hnode)
2559 {
2560         return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2561 }
2562
2563 static void
2564 ldlm_export_lock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2565 {
2566         struct ldlm_lock *lock;
2567
2568         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2569         LDLM_LOCK_GET(lock);
2570 }
2571
2572 static void
2573 ldlm_export_lock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2574 {
2575         struct ldlm_lock *lock;
2576
2577         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2578         LDLM_LOCK_RELEASE(lock);
2579 }
2580
2581 static cfs_hash_ops_t ldlm_export_lock_ops = {
2582         .hs_hash        = ldlm_export_lock_hash,
2583         .hs_key         = ldlm_export_lock_key,
2584         .hs_keycmp      = ldlm_export_lock_keycmp,
2585         .hs_keycpy      = ldlm_export_lock_keycpy,
2586         .hs_object      = ldlm_export_lock_object,
2587         .hs_get         = ldlm_export_lock_get,
2588         .hs_put         = ldlm_export_lock_put,
2589         .hs_put_locked  = ldlm_export_lock_put,
2590 };
2591
2592 int ldlm_init_export(struct obd_export *exp)
2593 {
2594         ENTRY;
2595
2596         exp->exp_lock_hash =
2597                 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
2598                                 HASH_EXP_LOCK_CUR_BITS,
2599                                 HASH_EXP_LOCK_MAX_BITS,
2600                                 HASH_EXP_LOCK_BKT_BITS, 0,
2601                                 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
2602                                 &ldlm_export_lock_ops,
2603                                 CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
2604                                 CFS_HASH_NBLK_CHANGE);
2605
2606         if (!exp->exp_lock_hash)
2607                 RETURN(-ENOMEM);
2608
2609         RETURN(0);
2610 }
2611 EXPORT_SYMBOL(ldlm_init_export);
2612
2613 void ldlm_destroy_export(struct obd_export *exp)
2614 {
2615         ENTRY;
2616         cfs_hash_putref(exp->exp_lock_hash);
2617         exp->exp_lock_hash = NULL;
2618
2619         ldlm_destroy_flock_export(exp);
2620         EXIT;
2621 }
2622 EXPORT_SYMBOL(ldlm_destroy_export);
2623
2624 static int ldlm_setup(void)
2625 {
2626         static struct ptlrpc_service_conf       conf;
2627         struct ldlm_bl_pool                     *blp = NULL;
2628         int rc = 0;
2629 #ifdef __KERNEL__
2630         int i;
2631 #endif
2632         ENTRY;
2633
2634         if (ldlm_state != NULL)
2635                 RETURN(-EALREADY);
2636
2637         OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
2638         if (ldlm_state == NULL)
2639                 RETURN(-ENOMEM);
2640
2641 #ifdef LPROCFS
2642         rc = ldlm_proc_setup();
2643         if (rc != 0)
2644                 GOTO(out, rc);
2645 #endif
2646
2647         memset(&conf, 0, sizeof(conf));
2648         conf = (typeof(conf)) {
2649                 .psc_name               = "ldlm_cbd",
2650                 .psc_watchdog_factor    = 2,
2651                 .psc_buf                = {
2652                         .bc_nbufs               = LDLM_NBUFS,
2653                         .bc_buf_size            = LDLM_BUFSIZE,
2654                         .bc_req_max_size        = LDLM_MAXREQSIZE,
2655                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
2656                         .bc_req_portal          = LDLM_CB_REQUEST_PORTAL,
2657                         .bc_rep_portal          = LDLM_CB_REPLY_PORTAL,
2658                 },
2659                 .psc_thr                = {
2660                         .tc_thr_name            = "ldlm_cb",
2661                         .tc_thr_factor          = LDLM_THR_FACTOR,
2662                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
2663                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
2664                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
2665                         .tc_nthrs_user          = ldlm_num_threads,
2666                         .tc_cpu_affinity        = 1,
2667                         .tc_ctx_tags            = LCT_MD_THREAD | LCT_DT_THREAD,
2668                 },
2669                 .psc_cpt                = {
2670                         .cc_pattern             = ldlm_cpts,
2671                 },
2672                 .psc_ops                = {
2673                         .so_req_handler         = ldlm_callback_handler,
2674                 },
2675         };
2676         ldlm_state->ldlm_cb_service = \
2677                         ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
2678         if (IS_ERR(ldlm_state->ldlm_cb_service)) {
2679                 CERROR("failed to start service\n");
2680                 rc = PTR_ERR(ldlm_state->ldlm_cb_service);
2681                 ldlm_state->ldlm_cb_service = NULL;
2682                 GOTO(out, rc);
2683         }
2684
2685 #ifdef HAVE_SERVER_SUPPORT
2686         memset(&conf, 0, sizeof(conf));
2687         conf = (typeof(conf)) {
2688                 .psc_name               = "ldlm_canceld",
2689                 .psc_watchdog_factor    = 6,
2690                 .psc_buf                = {
2691                         .bc_nbufs               = LDLM_NBUFS,
2692                         .bc_buf_size            = LDLM_BUFSIZE,
2693                         .bc_req_max_size        = LDLM_MAXREQSIZE,
2694                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
2695                         .bc_req_portal          = LDLM_CANCEL_REQUEST_PORTAL,
2696                         .bc_rep_portal          = LDLM_CANCEL_REPLY_PORTAL,
2697
2698                 },
2699                 .psc_thr                = {
2700                         .tc_thr_name            = "ldlm_cn",
2701                         .tc_thr_factor          = LDLM_THR_FACTOR,
2702                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
2703                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
2704                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
2705                         .tc_nthrs_user          = ldlm_num_threads,
2706                         .tc_cpu_affinity        = 1,
2707                         .tc_ctx_tags            = LCT_MD_THREAD | \
2708                                                   LCT_DT_THREAD | \
2709                                                   LCT_CL_THREAD,
2710                 },
2711                 .psc_cpt                = {
2712                         .cc_pattern             = ldlm_cpts,
2713                 },
2714                 .psc_ops                = {
2715                         .so_req_handler         = ldlm_cancel_handler,
2716                         .so_hpreq_handler       = ldlm_hpreq_handler,
2717                 },
2718         };
2719         ldlm_state->ldlm_cancel_service = \
2720                         ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
2721         if (IS_ERR(ldlm_state->ldlm_cancel_service)) {
2722                 CERROR("failed to start service\n");
2723                 rc = PTR_ERR(ldlm_state->ldlm_cancel_service);
2724                 ldlm_state->ldlm_cancel_service = NULL;
2725                 GOTO(out, rc);
2726         }
2727 #endif
2728
2729         OBD_ALLOC(blp, sizeof(*blp));
2730         if (blp == NULL)
2731                 GOTO(out, rc = -ENOMEM);
2732         ldlm_state->ldlm_bl_pool = blp;
2733
2734         cfs_spin_lock_init(&blp->blp_lock);
2735         CFS_INIT_LIST_HEAD(&blp->blp_list);
2736         CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
2737         cfs_waitq_init(&blp->blp_waitq);
2738         cfs_atomic_set(&blp->blp_num_threads, 0);
2739         cfs_atomic_set(&blp->blp_busy_threads, 0);
2740
2741 #ifdef __KERNEL__
2742         if (ldlm_num_threads == 0) {
2743                 blp->blp_min_threads = LDLM_NTHRS_INIT;
2744                 blp->blp_max_threads = LDLM_NTHRS_MAX;
2745         } else {
2746                 blp->blp_min_threads = blp->blp_max_threads = \
2747                         min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
2748                                                          ldlm_num_threads));
2749         }
2750
2751         for (i = 0; i < blp->blp_min_threads; i++) {
2752                 rc = ldlm_bl_thread_start(blp);
2753                 if (rc < 0)
2754                         GOTO(out, rc);
2755         }
2756
2757 # ifdef HAVE_SERVER_SUPPORT
2758         CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
2759         expired_lock_thread.elt_state = ELT_STOPPED;
2760         cfs_waitq_init(&expired_lock_thread.elt_waitq);
2761
2762         CFS_INIT_LIST_HEAD(&waiting_locks_list);
2763         cfs_spin_lock_init(&waiting_locks_spinlock);
2764         cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
2765
2766         rc = cfs_create_thread(expired_lock_main, NULL, CFS_DAEMON_FLAGS);
2767         if (rc < 0) {
2768                 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
2769                 GOTO(out, rc);
2770         }
2771
2772         cfs_wait_event(expired_lock_thread.elt_waitq,
2773                        expired_lock_thread.elt_state == ELT_READY);
2774 # endif /* HAVE_SERVER_SUPPORT */
2775
2776         rc = ldlm_pools_init();
2777         if (rc) {
2778                 CERROR("Failed to initialize LDLM pools: %d\n", rc);
2779                 GOTO(out, rc);
2780         }
2781 #endif
2782         RETURN(0);
2783
2784  out:
2785         ldlm_cleanup();
2786         RETURN(rc);
2787 }
2788
2789 static int ldlm_cleanup(void)
2790 {
2791         ENTRY;
2792
2793         if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
2794             !cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
2795                 CERROR("ldlm still has namespaces; clean these up first.\n");
2796                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
2797                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
2798                 RETURN(-EBUSY);
2799         }
2800
2801 #ifdef __KERNEL__
2802         ldlm_pools_fini();
2803
2804         if (ldlm_state->ldlm_bl_pool != NULL) {
2805                 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
2806
2807                 while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
2808                         struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
2809
2810                         cfs_init_completion(&blp->blp_comp);
2811
2812                         cfs_spin_lock(&blp->blp_lock);
2813                         cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
2814                         cfs_waitq_signal(&blp->blp_waitq);
2815                         cfs_spin_unlock(&blp->blp_lock);
2816
2817                         cfs_wait_for_completion(&blp->blp_comp);
2818                 }
2819
2820                 OBD_FREE(blp, sizeof(*blp));
2821         }
2822 #endif /* __KERNEL__ */
2823
2824         if (ldlm_state->ldlm_cb_service != NULL)
2825                 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2826 # ifdef HAVE_SERVER_SUPPORT
2827         if (ldlm_state->ldlm_cancel_service != NULL)
2828                 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2829 # endif
2830
2831 #ifdef __KERNEL__
2832         ldlm_proc_cleanup();
2833
2834 # ifdef HAVE_SERVER_SUPPORT
2835         if (expired_lock_thread.elt_state != ELT_STOPPED) {
2836                 expired_lock_thread.elt_state = ELT_TERMINATE;
2837                 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
2838                 cfs_wait_event(expired_lock_thread.elt_waitq,
2839                                expired_lock_thread.elt_state == ELT_STOPPED);
2840         }
2841 # endif
2842 #endif /* __KERNEL__ */
2843
2844         OBD_FREE(ldlm_state, sizeof(*ldlm_state));
2845         ldlm_state = NULL;
2846
2847         RETURN(0);
2848 }
2849
2850 int ldlm_init(void)
2851 {
2852         cfs_mutex_init(&ldlm_ref_mutex);
2853         cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
2854         cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
2855         ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
2856                                                sizeof(struct ldlm_resource), 0,
2857                                                CFS_SLAB_HWCACHE_ALIGN);
2858         if (ldlm_resource_slab == NULL)
2859                 return -ENOMEM;
2860
2861         ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
2862                               sizeof(struct ldlm_lock), 0,
2863                               CFS_SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU);
2864         if (ldlm_lock_slab == NULL) {
2865                 cfs_mem_cache_destroy(ldlm_resource_slab);
2866                 return -ENOMEM;
2867         }
2868
2869         ldlm_interval_slab = cfs_mem_cache_create("interval_node",
2870                                         sizeof(struct ldlm_interval),
2871                                         0, CFS_SLAB_HWCACHE_ALIGN);
2872         if (ldlm_interval_slab == NULL) {
2873                 cfs_mem_cache_destroy(ldlm_resource_slab);
2874                 cfs_mem_cache_destroy(ldlm_lock_slab);
2875                 return -ENOMEM;
2876         }
2877 #if LUSTRE_TRACKS_LOCK_EXP_REFS
2878         class_export_dump_hook = ldlm_dump_export_locks;
2879 #endif
2880         return 0;
2881 }
2882
2883 void ldlm_exit(void)
2884 {
2885         int rc;
2886         if (ldlm_refcount)
2887                 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
2888         rc = cfs_mem_cache_destroy(ldlm_resource_slab);
2889         LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
2890 #ifdef __KERNEL__
2891         /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
2892          * synchronize_rcu() to wait a grace period elapsed, so that
2893          * ldlm_lock_free() get a chance to be called. */
2894         synchronize_rcu();
2895 #endif
2896         rc = cfs_mem_cache_destroy(ldlm_lock_slab);
2897         LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
2898         rc = cfs_mem_cache_destroy(ldlm_interval_slab);
2899         LASSERTF(rc == 0, "couldn't free interval node slab\n");
2900 }
2901
2902 /* ldlm_extent.c */
2903 EXPORT_SYMBOL(ldlm_extent_shift_kms);
2904
2905 /* ldlm_lock.c */
2906 #ifdef HAVE_SERVER_SUPPORT
2907 EXPORT_SYMBOL(ldlm_get_processing_policy);
2908 #endif
2909 EXPORT_SYMBOL(ldlm_lock2desc);
2910 EXPORT_SYMBOL(ldlm_register_intent);
2911 EXPORT_SYMBOL(ldlm_lockname);
2912 EXPORT_SYMBOL(ldlm_typename);
2913 EXPORT_SYMBOL(ldlm_lock2handle);
2914 EXPORT_SYMBOL(__ldlm_handle2lock);
2915 EXPORT_SYMBOL(ldlm_lock_get);
2916 EXPORT_SYMBOL(ldlm_lock_put);
2917 EXPORT_SYMBOL(ldlm_lock_match);
2918 EXPORT_SYMBOL(ldlm_lock_cancel);
2919 EXPORT_SYMBOL(ldlm_lock_addref);
2920 EXPORT_SYMBOL(ldlm_lock_addref_try);
2921 EXPORT_SYMBOL(ldlm_lock_decref);
2922 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
2923 EXPORT_SYMBOL(ldlm_lock_change_resource);
2924 EXPORT_SYMBOL(ldlm_it2str);
2925 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2926 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
2927 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
2928 EXPORT_SYMBOL(ldlm_lock_allow_match);
2929 EXPORT_SYMBOL(ldlm_lock_downgrade);
2930 EXPORT_SYMBOL(ldlm_lock_convert);
2931
2932 /* ldlm_request.c */
2933 EXPORT_SYMBOL(ldlm_completion_ast_async);
2934 EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
2935 EXPORT_SYMBOL(ldlm_completion_ast);
2936 EXPORT_SYMBOL(ldlm_blocking_ast);
2937 EXPORT_SYMBOL(ldlm_glimpse_ast);
2938 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2939 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2940 EXPORT_SYMBOL(ldlm_prep_elc_req);
2941 EXPORT_SYMBOL(ldlm_cli_convert);
2942 EXPORT_SYMBOL(ldlm_cli_enqueue);
2943 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2944 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2945 EXPORT_SYMBOL(ldlm_cli_cancel);
2946 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2947 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2948 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2949 EXPORT_SYMBOL(ldlm_replay_locks);
2950 EXPORT_SYMBOL(ldlm_resource_foreach);
2951 EXPORT_SYMBOL(ldlm_namespace_foreach);
2952 EXPORT_SYMBOL(ldlm_resource_iterate);
2953 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2954 EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
2955 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2956
2957 /* ldlm_lockd.c */
2958 #ifdef HAVE_SERVER_SUPPORT
2959 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2960 EXPORT_SYMBOL(ldlm_server_completion_ast);
2961 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2962 EXPORT_SYMBOL(ldlm_glimpse_locks);
2963 EXPORT_SYMBOL(ldlm_handle_enqueue);
2964 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2965 EXPORT_SYMBOL(ldlm_handle_cancel);
2966 EXPORT_SYMBOL(ldlm_request_cancel);
2967 EXPORT_SYMBOL(ldlm_handle_convert);
2968 EXPORT_SYMBOL(ldlm_handle_convert0);
2969 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2970 #endif
2971 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2972 EXPORT_SYMBOL(ldlm_get_ref);
2973 EXPORT_SYMBOL(ldlm_put_ref);
2974 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2975
2976 /* ldlm_resource.c */
2977 EXPORT_SYMBOL(ldlm_namespace_new);
2978 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2979 EXPORT_SYMBOL(ldlm_namespace_free);
2980 EXPORT_SYMBOL(ldlm_namespace_dump);
2981 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2982 EXPORT_SYMBOL(ldlm_resource_get);
2983 EXPORT_SYMBOL(ldlm_resource_putref);
2984 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
2985
2986 /* ldlm_lib.c */
2987 EXPORT_SYMBOL(client_import_add_conn);
2988 EXPORT_SYMBOL(client_import_del_conn);
2989 EXPORT_SYMBOL(client_obd_setup);
2990 EXPORT_SYMBOL(client_obd_cleanup);
2991 EXPORT_SYMBOL(client_connect_import);
2992 EXPORT_SYMBOL(client_disconnect_export);
2993 EXPORT_SYMBOL(target_send_reply);
2994 EXPORT_SYMBOL(target_pack_pool_reply);
2995
2996 #ifdef HAVE_SERVER_SUPPORT
2997 EXPORT_SYMBOL(server_disconnect_export);
2998 EXPORT_SYMBOL(target_stop_recovery_thread);
2999 EXPORT_SYMBOL(target_handle_connect);
3000 EXPORT_SYMBOL(target_cleanup_recovery);
3001 EXPORT_SYMBOL(target_destroy_export);
3002 EXPORT_SYMBOL(target_cancel_recovery_timer);
3003 EXPORT_SYMBOL(target_queue_recovery_request);
3004 EXPORT_SYMBOL(target_handle_ping);
3005 EXPORT_SYMBOL(target_handle_disconnect);
3006 #endif
3007
3008 /* l_lock.c */
3009 EXPORT_SYMBOL(lock_res_and_lock);
3010 EXPORT_SYMBOL(unlock_res_and_lock);