Whamcloud - gitweb
LU-1907 build: avoid function resolution mistakes by Coverity
[fs/lustre-release.git] / lustre / ldlm / ldlm_lockd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_lockd.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 #else
47 # include <liblustre.h>
48 #endif
49
50 #include <lustre_dlm.h>
51 #include <obd_class.h>
52 #include <libcfs/list.h>
53 #include "ldlm_internal.h"
54
55 static int ldlm_num_threads;
56 CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
57                 "number of DLM service threads to start");
58
59 static char *ldlm_cpts;
60 CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444,
61                 "CPU partitions ldlm threads should run on");
62
63 extern cfs_mem_cache_t *ldlm_resource_slab;
64 extern cfs_mem_cache_t *ldlm_lock_slab;
65 static cfs_mutex_t      ldlm_ref_mutex;
66 static int ldlm_refcount;
67
68 struct ldlm_cb_async_args {
69         struct ldlm_cb_set_arg *ca_set_arg;
70         struct ldlm_lock       *ca_lock;
71 };
72
73 /* LDLM state */
74
75 static struct ldlm_state *ldlm_state;
76
77 inline cfs_time_t round_timeout(cfs_time_t timeout)
78 {
79         return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
80 }
81
82 /* timeout for initial callback (AST) reply (bz10399) */
83 static inline unsigned int ldlm_get_rq_timeout(void)
84 {
85         /* Non-AT value */
86         unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
87
88         return timeout < 1 ? 1 : timeout;
89 }
90
91 #define ELT_STOPPED   0
92 #define ELT_READY     1
93 #define ELT_TERMINATE 2
94
95 struct ldlm_bl_pool {
96         cfs_spinlock_t          blp_lock;
97
98         /*
99          * blp_prio_list is used for callbacks that should be handled
100          * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
101          * see bug 13843
102          */
103         cfs_list_t              blp_prio_list;
104
105         /*
106          * blp_list is used for all other callbacks which are likely
107          * to take longer to process.
108          */
109         cfs_list_t              blp_list;
110
111         cfs_waitq_t             blp_waitq;
112         cfs_completion_t        blp_comp;
113         cfs_atomic_t            blp_num_threads;
114         cfs_atomic_t            blp_busy_threads;
115         int                     blp_min_threads;
116         int                     blp_max_threads;
117 };
118
119 struct ldlm_bl_work_item {
120         cfs_list_t              blwi_entry;
121         struct ldlm_namespace  *blwi_ns;
122         struct ldlm_lock_desc   blwi_ld;
123         struct ldlm_lock       *blwi_lock;
124         cfs_list_t              blwi_head;
125         int                     blwi_count;
126         cfs_completion_t        blwi_comp;
127         int                     blwi_mode;
128         int                     blwi_mem_pressure;
129 };
130
131 #if defined(HAVE_SERVER_SUPPORT) && defined(__KERNEL__)
132
133 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
134 static cfs_spinlock_t waiting_locks_spinlock;   /* BH lock (timer) */
135 static cfs_list_t waiting_locks_list;
136 static cfs_timer_t waiting_locks_timer;
137
138 static struct expired_lock_thread {
139         cfs_waitq_t             elt_waitq;
140         int                     elt_state;
141         int                     elt_dump;
142         cfs_list_t              elt_expired_locks;
143 } expired_lock_thread;
144
145 static inline int have_expired_locks(void)
146 {
147         int need_to_run;
148
149         ENTRY;
150         cfs_spin_lock_bh(&waiting_locks_spinlock);
151         need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
152         cfs_spin_unlock_bh(&waiting_locks_spinlock);
153
154         RETURN(need_to_run);
155 }
156
157 static int expired_lock_main(void *arg)
158 {
159         cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
160         struct l_wait_info lwi = { 0 };
161         int do_dump;
162
163         ENTRY;
164         cfs_daemonize("ldlm_elt");
165
166         expired_lock_thread.elt_state = ELT_READY;
167         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
168
169         while (1) {
170                 l_wait_event(expired_lock_thread.elt_waitq,
171                              have_expired_locks() ||
172                              expired_lock_thread.elt_state == ELT_TERMINATE,
173                              &lwi);
174
175                 cfs_spin_lock_bh(&waiting_locks_spinlock);
176                 if (expired_lock_thread.elt_dump) {
177                         struct libcfs_debug_msg_data msgdata = {
178                                 .msg_file = __FILE__,
179                                 .msg_fn = "waiting_locks_callback",
180                                 .msg_line = expired_lock_thread.elt_dump };
181                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
182
183                         /* from waiting_locks_callback, but not in timer */
184                         libcfs_debug_dumplog();
185                         libcfs_run_lbug_upcall(&msgdata);
186
187                         cfs_spin_lock_bh(&waiting_locks_spinlock);
188                         expired_lock_thread.elt_dump = 0;
189                 }
190
191                 do_dump = 0;
192
193                 while (!cfs_list_empty(expired)) {
194                         struct obd_export *export;
195                         struct ldlm_lock *lock;
196
197                         lock = cfs_list_entry(expired->next, struct ldlm_lock,
198                                           l_pending_chain);
199                         if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
200                             (void *)lock >= LP_POISON) {
201                                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
202                                 CERROR("free lock on elt list %p\n", lock);
203                                 LBUG();
204                         }
205                         cfs_list_del_init(&lock->l_pending_chain);
206                         if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
207                             (void *)lock->l_export >= LP_POISON) {
208                                 CERROR("lock with free export on elt list %p\n",
209                                        lock->l_export);
210                                 lock->l_export = NULL;
211                                 LDLM_ERROR(lock, "free export");
212                                 /* release extra ref grabbed by
213                                  * ldlm_add_waiting_lock() or
214                                  * ldlm_failed_ast() */
215                                 LDLM_LOCK_RELEASE(lock);
216                                 continue;
217                         }
218
219                         if (lock->l_destroyed) {
220                                 /* release the lock refcount where
221                                  * waiting_locks_callback() founds */
222                                 LDLM_LOCK_RELEASE(lock);
223                                 continue;
224                         }
225                         export = class_export_lock_get(lock->l_export, lock);
226                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
227
228                         do_dump++;
229                         class_fail_export(export);
230                         class_export_lock_put(export, lock);
231
232                         /* release extra ref grabbed by ldlm_add_waiting_lock()
233                          * or ldlm_failed_ast() */
234                         LDLM_LOCK_RELEASE(lock);
235
236                         cfs_spin_lock_bh(&waiting_locks_spinlock);
237                 }
238                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
239
240                 if (do_dump && obd_dump_on_eviction) {
241                         CERROR("dump the log upon eviction\n");
242                         libcfs_debug_dumplog();
243                 }
244
245                 if (expired_lock_thread.elt_state == ELT_TERMINATE)
246                         break;
247         }
248
249         expired_lock_thread.elt_state = ELT_STOPPED;
250         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
251         RETURN(0);
252 }
253
254 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
255 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds);
256
257 /**
258  * Check if there is a request in the export request list
259  * which prevents the lock canceling.
260  */
261 static int ldlm_lock_busy(struct ldlm_lock *lock)
262 {
263         struct ptlrpc_request *req;
264         int match = 0;
265         ENTRY;
266
267         if (lock->l_export == NULL)
268                 return 0;
269
270         cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
271         cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
272                                 rq_exp_list) {
273                 if (req->rq_ops->hpreq_lock_match) {
274                         match = req->rq_ops->hpreq_lock_match(req, lock);
275                         if (match)
276                                 break;
277                 }
278         }
279         cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
280         RETURN(match);
281 }
282
283 /* This is called from within a timer interrupt and cannot schedule */
284 static void waiting_locks_callback(unsigned long unused)
285 {
286         struct ldlm_lock        *lock;
287         int                     need_dump = 0;
288
289         cfs_spin_lock_bh(&waiting_locks_spinlock);
290         while (!cfs_list_empty(&waiting_locks_list)) {
291                 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
292                                       l_pending_chain);
293                 if (cfs_time_after(lock->l_callback_timeout,
294                                    cfs_time_current()) ||
295                     (lock->l_req_mode == LCK_GROUP))
296                         break;
297
298                 if (ptlrpc_check_suspend()) {
299                         /* there is a case when we talk to one mds, holding
300                          * lock from another mds. this way we easily can get
301                          * here, if second mds is being recovered. so, we
302                          * suspend timeouts. bug 6019 */
303
304                         LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
305                                    lock->l_export->exp_client_uuid.uuid,
306                                    lock->l_export->exp_connection->c_remote_uuid.uuid,
307                                    libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
308
309                         cfs_list_del_init(&lock->l_pending_chain);
310                         if (lock->l_destroyed) {
311                                 /* relay the lock refcount decrease to
312                                  * expired lock thread */
313                                 cfs_list_add(&lock->l_pending_chain,
314                                         &expired_lock_thread.elt_expired_locks);
315                         } else {
316                                 __ldlm_add_waiting_lock(lock,
317                                                 ldlm_get_enq_timeout(lock));
318                         }
319                         continue;
320                 }
321
322                 /* if timeout overlaps the activation time of suspended timeouts
323                  * then extend it to give a chance for client to reconnect */
324                 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
325                                                  cfs_time_seconds(obd_timeout)/2),
326                                     ptlrpc_suspend_wakeup_time())) {
327                         LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
328                                    lock->l_export->exp_client_uuid.uuid,
329                                    lock->l_export->exp_connection->c_remote_uuid.uuid,
330                                    libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
331
332                         cfs_list_del_init(&lock->l_pending_chain);
333                         if (lock->l_destroyed) {
334                                 /* relay the lock refcount decrease to
335                                  * expired lock thread */
336                                 cfs_list_add(&lock->l_pending_chain,
337                                         &expired_lock_thread.elt_expired_locks);
338                         } else {
339                                 __ldlm_add_waiting_lock(lock,
340                                                 ldlm_get_enq_timeout(lock));
341                         }
342                         continue;
343                 }
344
345                 /* Check if we need to prolong timeout */
346                 if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
347                     ldlm_lock_busy(lock)) {
348                         int cont = 1;
349
350                         if (lock->l_pending_chain.next == &waiting_locks_list)
351                                 cont = 0;
352
353                         LDLM_LOCK_GET(lock);
354
355                         cfs_spin_unlock_bh(&waiting_locks_spinlock);
356                         LDLM_DEBUG(lock, "prolong the busy lock");
357                         ldlm_refresh_waiting_lock(lock,
358                                                   ldlm_get_enq_timeout(lock));
359                         cfs_spin_lock_bh(&waiting_locks_spinlock);
360
361                         if (!cont) {
362                                 LDLM_LOCK_RELEASE(lock);
363                                 break;
364                         }
365
366                         LDLM_LOCK_RELEASE(lock);
367                         continue;
368                 }
369                 ldlm_lock_to_ns(lock)->ns_timeouts++;
370                 LDLM_ERROR(lock, "lock callback timer expired after %lds: "
371                            "evicting client at %s ",
372                            cfs_time_current_sec()- lock->l_last_activity,
373                            libcfs_nid2str(
374                                    lock->l_export->exp_connection->c_peer.nid));
375
376                 /* no needs to take an extra ref on the lock since it was in
377                  * the waiting_locks_list and ldlm_add_waiting_lock()
378                  * already grabbed a ref */
379                 cfs_list_del(&lock->l_pending_chain);
380                 cfs_list_add(&lock->l_pending_chain,
381                              &expired_lock_thread.elt_expired_locks);
382                 need_dump = 1;
383         }
384
385         if (!cfs_list_empty(&expired_lock_thread.elt_expired_locks)) {
386                 if (obd_dump_on_timeout && need_dump)
387                         expired_lock_thread.elt_dump = __LINE__;
388
389                 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
390         }
391
392         /*
393          * Make sure the timer will fire again if we have any locks
394          * left.
395          */
396         if (!cfs_list_empty(&waiting_locks_list)) {
397                 cfs_time_t timeout_rounded;
398                 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
399                                       l_pending_chain);
400                 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
401                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
402         }
403         cfs_spin_unlock_bh(&waiting_locks_spinlock);
404 }
405
406 /*
407  * Indicate that we're waiting for a client to call us back cancelling a given
408  * lock.  We add it to the pending-callback chain, and schedule the lock-timeout
409  * timer to fire appropriately.  (We round up to the next second, to avoid
410  * floods of timer firings during periods of high lock contention and traffic).
411  * As done by ldlm_add_waiting_lock(), the caller must grab a lock reference
412  * if it has been added to the waiting list (1 is returned).
413  *
414  * Called with the namespace lock held.
415  */
416 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
417 {
418         cfs_time_t timeout;
419         cfs_time_t timeout_rounded;
420
421         if (!cfs_list_empty(&lock->l_pending_chain))
422                 return 0;
423
424         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
425             OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
426                 seconds = 1;
427
428         timeout = cfs_time_shift(seconds);
429         if (likely(cfs_time_after(timeout, lock->l_callback_timeout)))
430                 lock->l_callback_timeout = timeout;
431
432         timeout_rounded = round_timeout(lock->l_callback_timeout);
433
434         if (cfs_time_before(timeout_rounded,
435                             cfs_timer_deadline(&waiting_locks_timer)) ||
436             !cfs_timer_is_armed(&waiting_locks_timer)) {
437                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
438         }
439         /* if the new lock has a shorter timeout than something earlier on
440            the list, we'll wait the longer amount of time; no big deal. */
441         /* FIFO */
442         cfs_list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
443         return 1;
444 }
445
446 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
447 {
448         int ret;
449         int timeout = ldlm_get_enq_timeout(lock);
450
451         /* NB: must be called with hold of lock_res_and_lock() */
452         LASSERT(lock->l_res_locked);
453         lock->l_waited = 1;
454
455         LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
456
457         cfs_spin_lock_bh(&waiting_locks_spinlock);
458         if (lock->l_destroyed) {
459                 static cfs_time_t next;
460                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
461                 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
462                 if (cfs_time_after(cfs_time_current(), next)) {
463                         next = cfs_time_shift(14400);
464                         libcfs_debug_dumpstack(NULL);
465                 }
466                 return 0;
467         }
468
469         ret = __ldlm_add_waiting_lock(lock, timeout);
470         if (ret) {
471                 /* grab ref on the lock if it has been added to the
472                  * waiting list */
473                 LDLM_LOCK_GET(lock);
474         }
475         cfs_spin_unlock_bh(&waiting_locks_spinlock);
476
477         if (ret) {
478                 cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
479                 if (cfs_list_empty(&lock->l_exp_list))
480                         cfs_list_add(&lock->l_exp_list,
481                                      &lock->l_export->exp_bl_list);
482                 cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
483         }
484
485         LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
486                    ret == 0 ? "not re-" : "", timeout,
487                    AT_OFF ? "off" : "on");
488         return ret;
489 }
490
491 /*
492  * Remove a lock from the pending list, likely because it had its cancellation
493  * callback arrive without incident.  This adjusts the lock-timeout timer if
494  * needed.  Returns 0 if the lock wasn't pending after all, 1 if it was.
495  * As done by ldlm_del_waiting_lock(), the caller must release the lock
496  * reference when the lock is removed from any list (1 is returned).
497  *
498  * Called with namespace lock held.
499  */
500 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
501 {
502         cfs_list_t *list_next;
503
504         if (cfs_list_empty(&lock->l_pending_chain))
505                 return 0;
506
507         list_next = lock->l_pending_chain.next;
508         if (lock->l_pending_chain.prev == &waiting_locks_list) {
509                 /* Removing the head of the list, adjust timer. */
510                 if (list_next == &waiting_locks_list) {
511                         /* No more, just cancel. */
512                         cfs_timer_disarm(&waiting_locks_timer);
513                 } else {
514                         struct ldlm_lock *next;
515                         next = cfs_list_entry(list_next, struct ldlm_lock,
516                                               l_pending_chain);
517                         cfs_timer_arm(&waiting_locks_timer,
518                                       round_timeout(next->l_callback_timeout));
519                 }
520         }
521         cfs_list_del_init(&lock->l_pending_chain);
522
523         return 1;
524 }
525
526 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
527 {
528         int ret;
529
530         if (lock->l_export == NULL) {
531                 /* We don't have a "waiting locks list" on clients. */
532                 CDEBUG(D_DLMTRACE, "Client lock %p : no-op\n", lock);
533                 return 0;
534         }
535
536         cfs_spin_lock_bh(&waiting_locks_spinlock);
537         ret = __ldlm_del_waiting_lock(lock);
538         cfs_spin_unlock_bh(&waiting_locks_spinlock);
539
540         /* remove the lock out of export blocking list */
541         cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
542         cfs_list_del_init(&lock->l_exp_list);
543         cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
544
545         if (ret) {
546                 /* release lock ref if it has indeed been removed
547                  * from a list */
548                 LDLM_LOCK_RELEASE(lock);
549         }
550
551         LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
552         return ret;
553 }
554
555 /*
556  * Prolong the lock
557  *
558  * Called with namespace lock held.
559  */
560 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
561 {
562         if (lock->l_export == NULL) {
563                 /* We don't have a "waiting locks list" on clients. */
564                 LDLM_DEBUG(lock, "client lock: no-op");
565                 return 0;
566         }
567
568         cfs_spin_lock_bh(&waiting_locks_spinlock);
569
570         if (cfs_list_empty(&lock->l_pending_chain)) {
571                 cfs_spin_unlock_bh(&waiting_locks_spinlock);
572                 LDLM_DEBUG(lock, "wasn't waiting");
573                 return 0;
574         }
575
576         /* we remove/add the lock to the waiting list, so no needs to
577          * release/take a lock reference */
578         __ldlm_del_waiting_lock(lock);
579         __ldlm_add_waiting_lock(lock, timeout);
580         cfs_spin_unlock_bh(&waiting_locks_spinlock);
581
582         LDLM_DEBUG(lock, "refreshed");
583         return 1;
584 }
585
586 #else /* !HAVE_SERVER_SUPPORT ||  !__KERNEL__ */
587
588 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
589 {
590         RETURN(0);
591 }
592
593 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
594 {
595         RETURN(0);
596 }
597
598 # ifdef HAVE_SERVER_SUPPORT
599 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
600 {
601         LASSERT(lock->l_res_locked);
602         LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
603         RETURN(1);
604 }
605
606 # endif
607 #endif /* HAVE_SERVER_SUPPORT && __KERNEL__ */
608
609 #ifdef HAVE_SERVER_SUPPORT
610
611 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
612                             const char *ast_type)
613 {
614         LCONSOLE_ERROR_MSG(0x138, "%s: A client on nid %s was evicted due "
615                            "to a lock %s callback time out: rc %d\n",
616                            lock->l_export->exp_obd->obd_name,
617                            obd_export_nid2str(lock->l_export), ast_type, rc);
618
619         if (obd_dump_on_timeout)
620                 libcfs_debug_dumplog();
621 #ifdef __KERNEL__
622         cfs_spin_lock_bh(&waiting_locks_spinlock);
623         if (__ldlm_del_waiting_lock(lock) == 0)
624                 /* the lock was not in any list, grab an extra ref before adding
625                  * the lock to the expired list */
626                 LDLM_LOCK_GET(lock);
627         cfs_list_add(&lock->l_pending_chain,
628                      &expired_lock_thread.elt_expired_locks);
629         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
630         cfs_spin_unlock_bh(&waiting_locks_spinlock);
631 #else
632         class_fail_export(lock->l_export);
633 #endif
634 }
635
636 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
637                                  struct ptlrpc_request *req, int rc,
638                                  const char *ast_type)
639 {
640         lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
641
642         if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
643                 LASSERT(lock->l_export);
644                 if (lock->l_export->exp_libclient) {
645                         LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
646                                    " timeout, just cancelling lock", ast_type,
647                                    libcfs_nid2str(peer.nid));
648                         ldlm_lock_cancel(lock);
649                         rc = -ERESTART;
650                 } else if (lock->l_flags & LDLM_FL_CANCEL) {
651                         LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
652                                    "cancel was received (AST reply lost?)",
653                                    ast_type, libcfs_nid2str(peer.nid));
654                         ldlm_lock_cancel(lock);
655                         rc = -ERESTART;
656                 } else {
657                         ldlm_del_waiting_lock(lock);
658                         ldlm_failed_ast(lock, rc, ast_type);
659                 }
660         } else if (rc) {
661                 if (rc == -EINVAL) {
662                         struct ldlm_resource *res = lock->l_resource;
663                         LDLM_DEBUG(lock, "client (nid %s) returned %d"
664                                " from %s AST - normal race",
665                                libcfs_nid2str(peer.nid),
666                                req->rq_repmsg ?
667                                lustre_msg_get_status(req->rq_repmsg) : -1,
668                                ast_type);
669                         if (res) {
670                                 /* update lvbo to return proper attributes.
671                                  * see bug 23174 */
672                                 ldlm_resource_getref(res);
673                                 ldlm_res_lvbo_update(res, NULL, 1);
674                                 ldlm_resource_putref(res);
675                         }
676
677                 } else {
678                         LDLM_ERROR(lock, "client (nid %s) returned %d "
679                                    "from %s AST", libcfs_nid2str(peer.nid),
680                                    (req->rq_repmsg != NULL) ?
681                                    lustre_msg_get_status(req->rq_repmsg) : 0,
682                                    ast_type);
683                 }
684                 ldlm_lock_cancel(lock);
685                 /* Server-side AST functions are called from ldlm_reprocess_all,
686                  * which needs to be told to please restart its reprocessing. */
687                 rc = -ERESTART;
688         }
689
690         return rc;
691 }
692
693 static int ldlm_cb_interpret(const struct lu_env *env,
694                              struct ptlrpc_request *req, void *data, int rc)
695 {
696         struct ldlm_cb_async_args *ca   = data;
697         struct ldlm_lock          *lock = ca->ca_lock;
698         struct ldlm_cb_set_arg    *arg  = ca->ca_set_arg;
699         ENTRY;
700
701         LASSERT(lock != NULL);
702
703         switch (arg->type) {
704         case LDLM_GL_CALLBACK:
705                 /* Update the LVB from disk if the AST failed
706                  * (this is a legal race)
707                  *
708                  * - Glimpse callback of local lock just returns
709                  *   -ELDLM_NO_LOCK_DATA.
710                  * - Glimpse callback of remote lock might return
711                  *   -ELDLM_NO_LOCK_DATA when inode is cleared. LU-274
712                  */
713                 if (rc == -ELDLM_NO_LOCK_DATA) {
714                         LDLM_DEBUG(lock, "lost race - client has a lock but no "
715                                    "inode");
716                         ldlm_res_lvbo_update(lock->l_resource, NULL, 1);
717                 } else if (rc != 0) {
718                         rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
719                 } else {
720                         rc = ldlm_res_lvbo_update(lock->l_resource, req, 1);
721                 }
722                 break;
723         case LDLM_BL_CALLBACK:
724                 if (rc != 0)
725                         rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
726                 break;
727         case LDLM_CP_CALLBACK:
728                 if (rc != 0)
729                         rc = ldlm_handle_ast_error(lock, req, rc, "completion");
730                 break;
731         default:
732                 LDLM_ERROR(lock, "invalid opcode for lock callback %d",
733                            arg->type);
734                 LBUG();
735         }
736
737         /* release extra reference taken in ldlm_ast_fini() */
738         LDLM_LOCK_RELEASE(lock);
739
740         if (rc == -ERESTART)
741                 cfs_atomic_inc(&arg->restart);
742
743         RETURN(0);
744 }
745
746 static inline int ldlm_ast_fini(struct ptlrpc_request *req,
747                                 struct ldlm_cb_set_arg *arg,
748                                 struct ldlm_lock *lock,
749                                 int instant_cancel)
750 {
751         int rc = 0;
752         ENTRY;
753
754         if (unlikely(instant_cancel)) {
755                 rc = ptl_send_rpc(req, 1);
756                 ptlrpc_req_finished(req);
757                 if (rc == 0)
758                         cfs_atomic_inc(&arg->restart);
759         } else {
760                 LDLM_LOCK_GET(lock);
761                 ptlrpc_set_add_req(arg->set, req);
762         }
763
764         RETURN(rc);
765 }
766
767 /**
768  * Check if there are requests in the export request list which prevent
769  * the lock canceling and make these requests high priority ones.
770  */
771 static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
772 {
773         struct ptlrpc_request *req;
774         ENTRY;
775
776         if (lock->l_export == NULL) {
777                 LDLM_DEBUG(lock, "client lock: no-op");
778                 RETURN_EXIT;
779         }
780
781         cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
782         cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
783                                 rq_exp_list) {
784                 /* Do not process requests that were not yet added to there
785                  * incoming queue or were already removed from there for
786                  * processing */
787                 if (!req->rq_hp && !cfs_list_empty(&req->rq_list) &&
788                     req->rq_ops->hpreq_lock_match &&
789                     req->rq_ops->hpreq_lock_match(req, lock))
790                         ptlrpc_hpreq_reorder(req);
791         }
792         cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
793         EXIT;
794 }
795
796 /*
797  * ->l_blocking_ast() method for server-side locks. This is invoked when newly
798  * enqueued server lock conflicts with given one.
799  *
800  * Sends blocking ast rpc to the client owning that lock; arms timeout timer
801  * to wait for client response.
802  */
803 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
804                              struct ldlm_lock_desc *desc,
805                              void *data, int flag)
806 {
807         struct ldlm_cb_async_args *ca;
808         struct ldlm_cb_set_arg *arg = data;
809         struct ldlm_request    *body;
810         struct ptlrpc_request  *req;
811         int                     instant_cancel = 0;
812         int                     rc = 0;
813         ENTRY;
814
815         if (flag == LDLM_CB_CANCELING)
816                 /* Don't need to do anything here. */
817                 RETURN(0);
818
819         LASSERT(lock);
820         LASSERT(data != NULL);
821         if (lock->l_export->exp_obd->obd_recovering != 0)
822                 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
823
824         ldlm_lock_reorder_req(lock);
825
826         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
827                                         &RQF_LDLM_BL_CALLBACK,
828                                         LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
829         if (req == NULL)
830                 RETURN(-ENOMEM);
831
832         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
833         ca = ptlrpc_req_async_args(req);
834         ca->ca_set_arg = arg;
835         ca->ca_lock = lock;
836
837         req->rq_interpret_reply = ldlm_cb_interpret;
838         req->rq_no_resend = 1;
839
840         lock_res_and_lock(lock);
841         if (lock->l_granted_mode != lock->l_req_mode) {
842                 /* this blocking AST will be communicated as part of the
843                  * completion AST instead */
844                 unlock_res_and_lock(lock);
845
846                 ptlrpc_req_finished(req);
847                 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
848                 RETURN(0);
849         }
850
851         if (lock->l_destroyed) {
852                 /* What's the point? */
853                 unlock_res_and_lock(lock);
854                 ptlrpc_req_finished(req);
855                 RETURN(0);
856         }
857
858         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
859                 instant_cancel = 1;
860
861         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
862         body->lock_handle[0] = lock->l_remote_handle;
863         body->lock_desc = *desc;
864         body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
865
866         LDLM_DEBUG(lock, "server preparing blocking AST");
867
868         ptlrpc_request_set_replen(req);
869         if (instant_cancel) {
870                 unlock_res_and_lock(lock);
871                 ldlm_lock_cancel(lock);
872         } else {
873                 LASSERT(lock->l_granted_mode == lock->l_req_mode);
874                 ldlm_add_waiting_lock(lock);
875                 unlock_res_and_lock(lock);
876         }
877
878         req->rq_send_state = LUSTRE_IMP_FULL;
879         /* ptlrpc_request_alloc_pack already set timeout */
880         if (AT_OFF)
881                 req->rq_timeout = ldlm_get_rq_timeout();
882
883         if (lock->l_export && lock->l_export->exp_nid_stats &&
884             lock->l_export->exp_nid_stats->nid_ldlm_stats)
885                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
886                                      LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
887
888         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
889
890         RETURN(rc);
891 }
892
893 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
894 {
895         struct ldlm_cb_set_arg *arg = data;
896         struct ldlm_request    *body;
897         struct ptlrpc_request  *req;
898         struct ldlm_cb_async_args *ca;
899         long                    total_enqueue_wait;
900         int                     instant_cancel = 0;
901         int                     rc = 0;
902         ENTRY;
903
904         LASSERT(lock != NULL);
905         LASSERT(data != NULL);
906
907         total_enqueue_wait = cfs_time_sub(cfs_time_current_sec(),
908                                           lock->l_last_activity);
909
910         req = ptlrpc_request_alloc(lock->l_export->exp_imp_reverse,
911                                     &RQF_LDLM_CP_CALLBACK);
912         if (req == NULL)
913                 RETURN(-ENOMEM);
914
915         /* server namespace, doesn't need lock */
916         if (lock->l_resource->lr_lvb_len) {
917                  req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT,
918                                       lock->l_resource->lr_lvb_len);
919         }
920
921         rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
922         if (rc) {
923                 ptlrpc_request_free(req);
924                 RETURN(rc);
925         }
926
927         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
928         ca = ptlrpc_req_async_args(req);
929         ca->ca_set_arg = arg;
930         ca->ca_lock = lock;
931
932         req->rq_interpret_reply = ldlm_cb_interpret;
933         req->rq_no_resend = 1;
934         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
935
936         body->lock_handle[0] = lock->l_remote_handle;
937         body->lock_flags = flags;
938         ldlm_lock2desc(lock, &body->lock_desc);
939         if (lock->l_resource->lr_lvb_len) {
940                 void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
941
942                 lock_res(lock->l_resource);
943                 memcpy(lvb, lock->l_resource->lr_lvb_data,
944                        lock->l_resource->lr_lvb_len);
945                 unlock_res(lock->l_resource);
946         }
947
948         LDLM_DEBUG(lock, "server preparing completion AST (after %lds wait)",
949                    total_enqueue_wait);
950
951         /* Server-side enqueue wait time estimate, used in
952             __ldlm_add_waiting_lock to set future enqueue timers */
953         if (total_enqueue_wait < ldlm_get_enq_timeout(lock))
954                 at_measured(ldlm_lock_to_ns_at(lock),
955                             total_enqueue_wait);
956         else
957                 /* bz18618. Don't add lock enqueue time we spend waiting for a
958                    previous callback to fail. Locks waiting legitimately will
959                    get extended by ldlm_refresh_waiting_lock regardless of the
960                    estimate, so it's okay to underestimate here. */
961                 LDLM_DEBUG(lock, "lock completed after %lus; estimate was %ds. "
962                        "It is likely that a previous callback timed out.",
963                        total_enqueue_wait,
964                        at_get(ldlm_lock_to_ns_at(lock)));
965
966         ptlrpc_request_set_replen(req);
967
968         req->rq_send_state = LUSTRE_IMP_FULL;
969         /* ptlrpc_request_pack already set timeout */
970         if (AT_OFF)
971                 req->rq_timeout = ldlm_get_rq_timeout();
972
973         /* We only send real blocking ASTs after the lock is granted */
974         lock_res_and_lock(lock);
975         if (lock->l_flags & LDLM_FL_AST_SENT) {
976                 body->lock_flags |= LDLM_FL_AST_SENT;
977                 /* copy ast flags like LDLM_FL_DISCARD_DATA */
978                 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
979
980                 /* We might get here prior to ldlm_handle_enqueue setting
981                  * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
982                  * into waiting list, but this is safe and similar code in
983                  * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
984                  * that would not only cancel the lock, but will also remove
985                  * it from waiting list */
986                 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
987                         unlock_res_and_lock(lock);
988                         ldlm_lock_cancel(lock);
989                         instant_cancel = 1;
990                         lock_res_and_lock(lock);
991                 } else {
992                         /* start the lock-timeout clock */
993                         ldlm_add_waiting_lock(lock);
994                 }
995         }
996         unlock_res_and_lock(lock);
997
998         if (lock->l_export && lock->l_export->exp_nid_stats &&
999             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1000                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1001                                      LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
1002
1003         rc = ldlm_ast_fini(req, arg, lock, instant_cancel);
1004
1005         RETURN(rc);
1006 }
1007
1008 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
1009 {
1010         struct ldlm_cb_set_arg          *arg = data;
1011         struct ldlm_request             *body;
1012         struct ptlrpc_request           *req;
1013         struct ldlm_cb_async_args       *ca;
1014         int                              rc;
1015         ENTRY;
1016
1017         LASSERT(lock != NULL);
1018
1019         req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
1020                                         &RQF_LDLM_GL_CALLBACK,
1021                                         LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK);
1022
1023         if (req == NULL)
1024                 RETURN(-ENOMEM);
1025
1026         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1027         body->lock_handle[0] = lock->l_remote_handle;
1028         ldlm_lock2desc(lock, &body->lock_desc);
1029
1030         CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
1031         ca = ptlrpc_req_async_args(req);
1032         ca->ca_set_arg = arg;
1033         ca->ca_lock = lock;
1034
1035         /* server namespace, doesn't need lock */
1036         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
1037                              lock->l_resource->lr_lvb_len);
1038         ptlrpc_request_set_replen(req);
1039
1040         req->rq_send_state = LUSTRE_IMP_FULL;
1041         /* ptlrpc_request_alloc_pack already set timeout */
1042         if (AT_OFF)
1043                 req->rq_timeout = ldlm_get_rq_timeout();
1044
1045         req->rq_interpret_reply = ldlm_cb_interpret;
1046
1047         if (lock->l_export && lock->l_export->exp_nid_stats &&
1048             lock->l_export->exp_nid_stats->nid_ldlm_stats)
1049                 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
1050                                      LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
1051
1052         rc = ldlm_ast_fini(req, arg, lock, 0);
1053
1054         RETURN(rc);
1055 }
1056
1057 int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list)
1058 {
1059         int     rc;
1060         ENTRY;
1061
1062         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), gl_work_list,
1063                                LDLM_WORK_GL_AST);
1064         if (rc == -ERESTART)
1065                 ldlm_reprocess_all(res);
1066
1067         RETURN(rc);
1068 }
1069
1070 static void ldlm_svc_get_eopc(const struct ldlm_request *dlm_req,
1071                        struct lprocfs_stats *srv_stats)
1072 {
1073         int lock_type = 0, op = 0;
1074
1075         lock_type = dlm_req->lock_desc.l_resource.lr_type;
1076
1077         switch (lock_type) {
1078         case LDLM_PLAIN:
1079                 op = PTLRPC_LAST_CNTR + LDLM_PLAIN_ENQUEUE;
1080                 break;
1081         case LDLM_EXTENT:
1082                 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT)
1083                         op = PTLRPC_LAST_CNTR + LDLM_GLIMPSE_ENQUEUE;
1084                 else
1085                         op = PTLRPC_LAST_CNTR + LDLM_EXTENT_ENQUEUE;
1086                 break;
1087         case LDLM_FLOCK:
1088                 op = PTLRPC_LAST_CNTR + LDLM_FLOCK_ENQUEUE;
1089                 break;
1090         case LDLM_IBITS:
1091                 op = PTLRPC_LAST_CNTR + LDLM_IBITS_ENQUEUE;
1092                 break;
1093         default:
1094                 op = 0;
1095                 break;
1096         }
1097
1098         if (op)
1099                 lprocfs_counter_incr(srv_stats, op);
1100
1101         return;
1102 }
1103
1104 /*
1105  * Main server-side entry point into LDLM. This is called by ptlrpc service
1106  * threads to carry out client lock enqueueing requests.
1107  */
1108 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
1109                          struct ptlrpc_request *req,
1110                          const struct ldlm_request *dlm_req,
1111                          const struct ldlm_callback_suite *cbs)
1112 {
1113         struct ldlm_reply *dlm_rep;
1114         __u32 flags;
1115         ldlm_error_t err = ELDLM_OK;
1116         struct ldlm_lock *lock = NULL;
1117         void *cookie = NULL;
1118         int rc = 0;
1119         ENTRY;
1120
1121         LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
1122
1123         ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
1124         flags = dlm_req->lock_flags;
1125
1126         LASSERT(req->rq_export);
1127
1128         if (ptlrpc_req2svc(req)->srv_stats != NULL)
1129                 ldlm_svc_get_eopc(dlm_req, ptlrpc_req2svc(req)->srv_stats);
1130
1131         if (req->rq_export && req->rq_export->exp_nid_stats &&
1132             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1133                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1134                                      LDLM_ENQUEUE - LDLM_FIRST_OPC);
1135
1136         if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
1137                      dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
1138                 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
1139                           dlm_req->lock_desc.l_resource.lr_type);
1140                 GOTO(out, rc = -EFAULT);
1141         }
1142
1143         if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
1144                      dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
1145                      dlm_req->lock_desc.l_req_mode &
1146                      (dlm_req->lock_desc.l_req_mode-1))) {
1147                 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
1148                           dlm_req->lock_desc.l_req_mode);
1149                 GOTO(out, rc = -EFAULT);
1150         }
1151
1152         if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
1153                 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1154                              LDLM_PLAIN)) {
1155                         DEBUG_REQ(D_ERROR, req,
1156                                   "PLAIN lock request from IBITS client?");
1157                         GOTO(out, rc = -EPROTO);
1158                 }
1159         } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1160                             LDLM_IBITS)) {
1161                 DEBUG_REQ(D_ERROR, req,
1162                           "IBITS lock request from unaware client?");
1163                 GOTO(out, rc = -EPROTO);
1164         }
1165
1166 #if 0
1167         /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
1168            against server's _CONNECT_SUPPORTED flags? (I don't want to use
1169            ibits for mgc/mgs) */
1170
1171         /* INODEBITS_INTEROP: Perform conversion from plain lock to
1172          * inodebits lock if client does not support them. */
1173         if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
1174             (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
1175                 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
1176                 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
1177                         MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
1178                 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
1179                         dlm_req->lock_desc.l_req_mode = LCK_CR;
1180         }
1181 #endif
1182
1183         if (unlikely(flags & LDLM_FL_REPLAY)) {
1184                 /* Find an existing lock in the per-export lock hash */
1185                 /* In the function below, .hs_keycmp resolves to
1186                  * ldlm_export_lock_keycmp() */
1187                 /* coverity[overrun-buffer-val] */
1188                 lock = cfs_hash_lookup(req->rq_export->exp_lock_hash,
1189                                        (void *)&dlm_req->lock_handle[0]);
1190                 if (lock != NULL) {
1191                         DEBUG_REQ(D_DLMTRACE, req, "found existing lock cookie "
1192                                   LPX64, lock->l_handle.h_cookie);
1193                         GOTO(existing_lock, rc = 0);
1194                 }
1195         }
1196
1197         /* The lock's callback data might be set in the policy function */
1198         lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
1199                                 dlm_req->lock_desc.l_resource.lr_type,
1200                                 dlm_req->lock_desc.l_req_mode,
1201                                 cbs, NULL, 0);
1202
1203         if (!lock)
1204                 GOTO(out, rc = -ENOMEM);
1205
1206         lock->l_last_activity = cfs_time_current_sec();
1207         lock->l_remote_handle = dlm_req->lock_handle[0];
1208         LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
1209
1210         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
1211         /* Don't enqueue a lock onto the export if it is been disonnected
1212          * due to eviction (bug 3822) or server umount (bug 24324).
1213          * Cancel it now instead. */
1214         if (req->rq_export->exp_disconnected) {
1215                 LDLM_ERROR(lock, "lock on disconnected export %p",
1216                            req->rq_export);
1217                 GOTO(out, rc = -ENOTCONN);
1218         }
1219
1220         lock->l_export = class_export_lock_get(req->rq_export, lock);
1221         if (lock->l_export->exp_lock_hash)
1222                 cfs_hash_add(lock->l_export->exp_lock_hash,
1223                              &lock->l_remote_handle,
1224                              &lock->l_exp_hash);
1225
1226 existing_lock:
1227
1228         if (flags & LDLM_FL_HAS_INTENT) {
1229                 /* In this case, the reply buffer is allocated deep in
1230                  * local_lock_enqueue by the policy function. */
1231                 cookie = req;
1232         } else {
1233                 /* based on the assumption that lvb size never changes during
1234                  * resource life time otherwise it need resource->lr_lock's
1235                  * protection */
1236                 if (lock->l_resource->lr_lvb_len) {
1237                         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
1238                                              RCL_SERVER,
1239                                              lock->l_resource->lr_lvb_len);
1240                 }
1241
1242                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
1243                         GOTO(out, rc = -ENOMEM);
1244
1245                 rc = req_capsule_server_pack(&req->rq_pill);
1246                 if (rc)
1247                         GOTO(out, rc);
1248         }
1249
1250         if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
1251                 ldlm_convert_policy_to_local(req->rq_export,
1252                                           dlm_req->lock_desc.l_resource.lr_type,
1253                                           &dlm_req->lock_desc.l_policy_data,
1254                                           &lock->l_policy_data);
1255         if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
1256                 lock->l_req_extent = lock->l_policy_data.l_extent;
1257
1258         err = ldlm_lock_enqueue(ns, &lock, cookie, (int *)&flags);
1259         if (err)
1260                 GOTO(out, err);
1261
1262         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1263         dlm_rep->lock_flags = flags;
1264
1265         ldlm_lock2desc(lock, &dlm_rep->lock_desc);
1266         ldlm_lock2handle(lock, &dlm_rep->lock_handle);
1267
1268         /* We never send a blocking AST until the lock is granted, but
1269          * we can tell it right now */
1270         lock_res_and_lock(lock);
1271
1272         /* Now take into account flags to be inherited from original lock
1273            request both in reply to client and in our own lock flags. */
1274         dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1275         lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1276
1277         /* Don't move a pending lock onto the export if it has already been
1278          * disconnected due to eviction (bug 5683) or server umount (bug 24324).
1279          * Cancel it now instead. */
1280         if (unlikely(req->rq_export->exp_disconnected ||
1281                      OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
1282                 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
1283                 rc = -ENOTCONN;
1284         } else if (lock->l_flags & LDLM_FL_AST_SENT) {
1285                 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
1286                 if (lock->l_granted_mode == lock->l_req_mode) {
1287                         /*
1288                          * Only cancel lock if it was granted, because it would
1289                          * be destroyed immediately and would never be granted
1290                          * in the future, causing timeouts on client.  Not
1291                          * granted lock will be cancelled immediately after
1292                          * sending completion AST.
1293                          */
1294                         if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
1295                                 unlock_res_and_lock(lock);
1296                                 ldlm_lock_cancel(lock);
1297                                 lock_res_and_lock(lock);
1298                         } else
1299                                 ldlm_add_waiting_lock(lock);
1300                 }
1301         }
1302         /* Make sure we never ever grant usual metadata locks to liblustre
1303            clients */
1304         if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
1305             dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
1306              req->rq_export->exp_libclient) {
1307                 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
1308                              !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
1309                         CERROR("Granting sync lock to libclient. "
1310                                "req fl %d, rep fl %d, lock fl "LPX64"\n",
1311                                dlm_req->lock_flags, dlm_rep->lock_flags,
1312                                lock->l_flags);
1313                         LDLM_ERROR(lock, "sync lock");
1314                         if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
1315                                 struct ldlm_intent *it;
1316
1317                                 it = req_capsule_client_get(&req->rq_pill,
1318                                                             &RMF_LDLM_INTENT);
1319                                 if (it != NULL) {
1320                                         CERROR("This is intent %s ("LPU64")\n",
1321                                                ldlm_it2str(it->opc), it->opc);
1322                                 }
1323                         }
1324                 }
1325         }
1326
1327         unlock_res_and_lock(lock);
1328
1329         EXIT;
1330  out:
1331         req->rq_status = rc ?: err; /* return either error - bug 11190 */
1332         if (!req->rq_packed_final) {
1333                 err = lustre_pack_reply(req, 1, NULL, NULL);
1334                 if (rc == 0)
1335                         rc = err;
1336         }
1337
1338         /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1339          * ldlm_reprocess_all.  If this moves, revisit that code. -phil */
1340         if (lock) {
1341                 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
1342                            "(err=%d, rc=%d)", err, rc);
1343
1344                 if (rc == 0) {
1345                         if (lock->l_resource->lr_lvb_len > 0) {
1346                                 /* MDT path won't handle lr_lvb_data, so
1347                                  * lock/unlock better be contained in the
1348                                  * if block */
1349                                 void *lvb;
1350
1351                                 lvb = req_capsule_server_get(&req->rq_pill,
1352                                                              &RMF_DLM_LVB);
1353                                 LASSERTF(lvb != NULL, "req %p, lock %p\n",
1354                                          req, lock);
1355                                 lock_res(lock->l_resource);
1356                                 memcpy(lvb, lock->l_resource->lr_lvb_data,
1357                                        lock->l_resource->lr_lvb_len);
1358                                 unlock_res(lock->l_resource);
1359                         }
1360                 } else {
1361                         lock_res_and_lock(lock);
1362                         ldlm_resource_unlink_lock(lock);
1363                         ldlm_lock_destroy_nolock(lock);
1364                         unlock_res_and_lock(lock);
1365                 }
1366
1367                 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1368                         ldlm_reprocess_all(lock->l_resource);
1369
1370                 LDLM_LOCK_RELEASE(lock);
1371         }
1372
1373         LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1374                           lock, rc);
1375
1376         return rc;
1377 }
1378
1379 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1380                         ldlm_completion_callback completion_callback,
1381                         ldlm_blocking_callback blocking_callback,
1382                         ldlm_glimpse_callback glimpse_callback)
1383 {
1384         struct ldlm_request *dlm_req;
1385         struct ldlm_callback_suite cbs = {
1386                 .lcs_completion = completion_callback,
1387                 .lcs_blocking   = blocking_callback,
1388                 .lcs_glimpse    = glimpse_callback
1389         };
1390         int rc;
1391
1392         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1393         if (dlm_req != NULL) {
1394                 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1395                                           req, dlm_req, &cbs);
1396         } else {
1397                 rc = -EFAULT;
1398         }
1399         return rc;
1400 }
1401
1402 int ldlm_handle_convert0(struct ptlrpc_request *req,
1403                          const struct ldlm_request *dlm_req)
1404 {
1405         struct ldlm_reply *dlm_rep;
1406         struct ldlm_lock *lock;
1407         int rc;
1408         ENTRY;
1409
1410         if (req->rq_export && req->rq_export->exp_nid_stats &&
1411             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1412                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1413                                      LDLM_CONVERT - LDLM_FIRST_OPC);
1414
1415         rc = req_capsule_server_pack(&req->rq_pill);
1416         if (rc)
1417                 RETURN(rc);
1418
1419         dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1420         dlm_rep->lock_flags = dlm_req->lock_flags;
1421
1422         lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1423         if (!lock) {
1424                 req->rq_status = EINVAL;
1425         } else {
1426                 void *res = NULL;
1427
1428                 LDLM_DEBUG(lock, "server-side convert handler START");
1429
1430                 lock->l_last_activity = cfs_time_current_sec();
1431                 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1432                                         &dlm_rep->lock_flags);
1433                 if (res) {
1434                         if (ldlm_del_waiting_lock(lock))
1435                                 LDLM_DEBUG(lock, "converted waiting lock");
1436                         req->rq_status = 0;
1437                 } else {
1438                         req->rq_status = EDEADLOCK;
1439                 }
1440         }
1441
1442         if (lock) {
1443                 if (!req->rq_status)
1444                         ldlm_reprocess_all(lock->l_resource);
1445                 LDLM_DEBUG(lock, "server-side convert handler END");
1446                 LDLM_LOCK_PUT(lock);
1447         } else
1448                 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1449
1450         RETURN(0);
1451 }
1452
1453 int ldlm_handle_convert(struct ptlrpc_request *req)
1454 {
1455         int rc;
1456         struct ldlm_request *dlm_req;
1457
1458         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1459         if (dlm_req != NULL) {
1460                 rc = ldlm_handle_convert0(req, dlm_req);
1461         } else {
1462                 CERROR ("Can't unpack dlm_req\n");
1463                 rc = -EFAULT;
1464         }
1465         return rc;
1466 }
1467
1468 /* Cancel all the locks whos handles are packed into ldlm_request */
1469 int ldlm_request_cancel(struct ptlrpc_request *req,
1470                         const struct ldlm_request *dlm_req, int first)
1471 {
1472         struct ldlm_resource *res, *pres = NULL;
1473         struct ldlm_lock *lock;
1474         int i, count, done = 0;
1475         ENTRY;
1476
1477         count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1478         if (first >= count)
1479                 RETURN(0);
1480
1481         /* There is no lock on the server at the replay time,
1482          * skip lock cancelling to make replay tests to pass. */
1483         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1484                 RETURN(0);
1485
1486         LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
1487                           "starting at %d", count, first);
1488
1489         for (i = first; i < count; i++) {
1490                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1491                 if (!lock) {
1492                         LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1493                                           "lock (cookie "LPU64")",
1494                                           dlm_req->lock_handle[i].cookie);
1495                         continue;
1496                 }
1497
1498                 res = lock->l_resource;
1499                 done++;
1500
1501                 if (res != pres) {
1502                         if (pres != NULL) {
1503                                 ldlm_reprocess_all(pres);
1504                                 LDLM_RESOURCE_DELREF(pres);
1505                                 ldlm_resource_putref(pres);
1506                         }
1507                         if (res != NULL) {
1508                                 ldlm_resource_getref(res);
1509                                 LDLM_RESOURCE_ADDREF(res);
1510                                 ldlm_res_lvbo_update(res, NULL, 1);
1511                         }
1512                         pres = res;
1513                 }
1514                 ldlm_lock_cancel(lock);
1515                 LDLM_LOCK_PUT(lock);
1516         }
1517         if (pres != NULL) {
1518                 ldlm_reprocess_all(pres);
1519                 LDLM_RESOURCE_DELREF(pres);
1520                 ldlm_resource_putref(pres);
1521         }
1522         LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1523         RETURN(done);
1524 }
1525
1526 int ldlm_handle_cancel(struct ptlrpc_request *req)
1527 {
1528         struct ldlm_request *dlm_req;
1529         int rc;
1530         ENTRY;
1531
1532         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1533         if (dlm_req == NULL) {
1534                 CDEBUG(D_INFO, "bad request buffer for cancel\n");
1535                 RETURN(-EFAULT);
1536         }
1537
1538         if (req->rq_export && req->rq_export->exp_nid_stats &&
1539             req->rq_export->exp_nid_stats->nid_ldlm_stats)
1540                 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1541                                      LDLM_CANCEL - LDLM_FIRST_OPC);
1542
1543         rc = req_capsule_server_pack(&req->rq_pill);
1544         if (rc)
1545                 RETURN(rc);
1546
1547         if (!ldlm_request_cancel(req, dlm_req, 0))
1548                 req->rq_status = ESTALE;
1549
1550         RETURN(ptlrpc_reply(req));
1551 }
1552 #endif /* HAVE_SERVER_SUPPORT */
1553
1554 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1555                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1556 {
1557         int do_ast;
1558         ENTRY;
1559
1560         LDLM_DEBUG(lock, "client blocking AST callback handler");
1561
1562         lock_res_and_lock(lock);
1563         lock->l_flags |= LDLM_FL_CBPENDING;
1564
1565         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1566                 lock->l_flags |= LDLM_FL_CANCEL;
1567
1568         do_ast = (!lock->l_readers && !lock->l_writers);
1569         unlock_res_and_lock(lock);
1570
1571         if (do_ast) {
1572                 CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
1573                        lock, lock->l_blocking_ast);
1574                 if (lock->l_blocking_ast != NULL)
1575                         lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1576                                              LDLM_CB_BLOCKING);
1577         } else {
1578                 CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
1579                        lock);
1580         }
1581
1582         LDLM_DEBUG(lock, "client blocking callback handler END");
1583         LDLM_LOCK_RELEASE(lock);
1584         EXIT;
1585 }
1586
1587 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1588                                     struct ldlm_namespace *ns,
1589                                     struct ldlm_request *dlm_req,
1590                                     struct ldlm_lock *lock)
1591 {
1592         int lvb_len;
1593         CFS_LIST_HEAD(ast_list);
1594         ENTRY;
1595
1596         LDLM_DEBUG(lock, "client completion callback handler START");
1597
1598         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
1599                 int to = cfs_time_seconds(1);
1600                 while (to > 0) {
1601                         cfs_schedule_timeout_and_set_state(
1602                                 CFS_TASK_INTERRUPTIBLE, to);
1603                         if (lock->l_granted_mode == lock->l_req_mode ||
1604                             lock->l_destroyed)
1605                                 break;
1606                 }
1607         }
1608
1609         lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
1610         if (lvb_len > 0) {
1611                 if (lock->l_lvb_len > 0) {
1612                         /* for extent lock, lvb contains ost_lvb{}. */
1613                         LASSERT(lock->l_lvb_data != NULL);
1614                         LASSERTF(lock->l_lvb_len == lvb_len,
1615                                 "preallocated %d, actual %d.\n",
1616                                 lock->l_lvb_len, lvb_len);
1617                 } else { /* for layout lock, lvb has variable length */
1618                         void *lvb_data;
1619
1620                         OBD_ALLOC(lvb_data, lvb_len);
1621                         if (lvb_data == NULL)
1622                                 LDLM_ERROR(lock, "no memory.\n");
1623
1624                         lock_res_and_lock(lock);
1625                         if (lvb_data == NULL) {
1626                                 lock->l_flags |= LDLM_FL_FAILED;
1627                         } else {
1628                                 LASSERT(lock->l_lvb_data == NULL);
1629                                 lock->l_lvb_data = lvb_data;
1630                                 lock->l_lvb_len = lvb_len;
1631                         }
1632                         unlock_res_and_lock(lock);
1633                 }
1634         }
1635
1636         lock_res_and_lock(lock);
1637         if (lock->l_destroyed ||
1638             lock->l_granted_mode == lock->l_req_mode) {
1639                 /* bug 11300: the lock has already been granted */
1640                 unlock_res_and_lock(lock);
1641                 LDLM_DEBUG(lock, "Double grant race happened");
1642                 LDLM_LOCK_RELEASE(lock);
1643                 EXIT;
1644                 return;
1645         }
1646
1647         /* If we receive the completion AST before the actual enqueue returned,
1648          * then we might need to switch lock modes, resources, or extents. */
1649         if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1650                 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1651                 LDLM_DEBUG(lock, "completion AST, new lock mode");
1652         }
1653
1654         if (lock->l_resource->lr_type != LDLM_PLAIN) {
1655                 ldlm_convert_policy_to_local(req->rq_export,
1656                                           dlm_req->lock_desc.l_resource.lr_type,
1657                                           &dlm_req->lock_desc.l_policy_data,
1658                                           &lock->l_policy_data);
1659                 LDLM_DEBUG(lock, "completion AST, new policy data");
1660         }
1661
1662         ldlm_resource_unlink_lock(lock);
1663         if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1664                    &lock->l_resource->lr_name,
1665                    sizeof(lock->l_resource->lr_name)) != 0) {
1666                 unlock_res_and_lock(lock);
1667                 if (ldlm_lock_change_resource(ns, lock,
1668                                 &dlm_req->lock_desc.l_resource.lr_name) != 0) {
1669                         LDLM_ERROR(lock, "Failed to allocate resource");
1670                         LDLM_LOCK_RELEASE(lock);
1671                         EXIT;
1672                         return;
1673                 }
1674                 LDLM_DEBUG(lock, "completion AST, new resource");
1675                 CERROR("change resource!\n");
1676                 lock_res_and_lock(lock);
1677         }
1678
1679         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1680                 /* BL_AST locks are not needed in lru.
1681                  * let ldlm_cancel_lru() be fast. */
1682                 ldlm_lock_remove_from_lru(lock);
1683                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
1684                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1685         }
1686
1687         if (lock->l_lvb_len) {
1688                 if (req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
1689                                          RCL_CLIENT) < lock->l_lvb_len) {
1690                         LDLM_ERROR(lock, "completion AST did not contain "
1691                                    "expected LVB!");
1692                 } else {
1693                         void *lvb = req_capsule_client_get(&req->rq_pill,
1694                                                            &RMF_DLM_LVB);
1695                         memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1696                 }
1697         }
1698
1699         ldlm_grant_lock(lock, &ast_list);
1700         unlock_res_and_lock(lock);
1701
1702         LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1703
1704         /* Let Enqueue to call osc_lock_upcall() and initialize
1705          * l_ast_data */
1706         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
1707
1708         ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
1709
1710         LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1711                           lock);
1712         LDLM_LOCK_RELEASE(lock);
1713         EXIT;
1714 }
1715
1716 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1717                                     struct ldlm_namespace *ns,
1718                                     struct ldlm_request *dlm_req,
1719                                     struct ldlm_lock *lock)
1720 {
1721         int rc = -ENOSYS;
1722         ENTRY;
1723
1724         LDLM_DEBUG(lock, "client glimpse AST callback handler");
1725
1726         if (lock->l_glimpse_ast != NULL)
1727                 rc = lock->l_glimpse_ast(lock, req);
1728
1729         if (req->rq_repmsg != NULL) {
1730                 ptlrpc_reply(req);
1731         } else {
1732                 req->rq_status = rc;
1733                 ptlrpc_error(req);
1734         }
1735
1736         lock_res_and_lock(lock);
1737         if (lock->l_granted_mode == LCK_PW &&
1738             !lock->l_readers && !lock->l_writers &&
1739             cfs_time_after(cfs_time_current(),
1740                            cfs_time_add(lock->l_last_used,
1741                                         cfs_time_seconds(10)))) {
1742                 unlock_res_and_lock(lock);
1743                 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
1744                         ldlm_handle_bl_callback(ns, NULL, lock);
1745
1746                 EXIT;
1747                 return;
1748         }
1749         unlock_res_and_lock(lock);
1750         LDLM_LOCK_RELEASE(lock);
1751         EXIT;
1752 }
1753
1754 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1755 {
1756         if (req->rq_no_reply)
1757                 return 0;
1758
1759         req->rq_status = rc;
1760         if (!req->rq_packed_final) {
1761                 rc = lustre_pack_reply(req, 1, NULL, NULL);
1762                 if (rc)
1763                         return rc;
1764         }
1765         return ptlrpc_reply(req);
1766 }
1767
1768 #ifdef __KERNEL__
1769 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode)
1770 {
1771         struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1772         ENTRY;
1773
1774         cfs_spin_lock(&blp->blp_lock);
1775         if (blwi->blwi_lock && blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
1776                 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
1777                 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
1778         } else {
1779                 /* other blocking callbacks are added to the regular list */
1780                 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1781         }
1782         cfs_spin_unlock(&blp->blp_lock);
1783
1784         cfs_waitq_signal(&blp->blp_waitq);
1785
1786         /* can not use blwi->blwi_mode as blwi could be already freed in
1787            LDLM_ASYNC mode */
1788         if (mode == LDLM_SYNC)
1789                 cfs_wait_for_completion(&blwi->blwi_comp);
1790
1791         RETURN(0);
1792 }
1793
1794 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
1795                              struct ldlm_namespace *ns,
1796                              struct ldlm_lock_desc *ld,
1797                              cfs_list_t *cancels, int count,
1798                              struct ldlm_lock *lock,
1799                              int mode)
1800 {
1801         cfs_init_completion(&blwi->blwi_comp);
1802         CFS_INIT_LIST_HEAD(&blwi->blwi_head);
1803
1804         if (cfs_memory_pressure_get())
1805                 blwi->blwi_mem_pressure = 1;
1806
1807         blwi->blwi_ns = ns;
1808         blwi->blwi_mode = mode;
1809         if (ld != NULL)
1810                 blwi->blwi_ld = *ld;
1811         if (count) {
1812                 cfs_list_add(&blwi->blwi_head, cancels);
1813                 cfs_list_del_init(cancels);
1814                 blwi->blwi_count = count;
1815         } else {
1816                 blwi->blwi_lock = lock;
1817         }
1818 }
1819
1820 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
1821                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
1822                              cfs_list_t *cancels, int count, int mode)
1823 {
1824         ENTRY;
1825
1826         if (cancels && count == 0)
1827                 RETURN(0);
1828
1829         if (mode == LDLM_SYNC) {
1830                 /* if it is synchronous call do minimum mem alloc, as it could
1831                  * be triggered from kernel shrinker
1832                  */
1833                 struct ldlm_bl_work_item blwi;
1834                 memset(&blwi, 0, sizeof(blwi));
1835                 init_blwi(&blwi, ns, ld, cancels, count, lock, LDLM_SYNC);
1836                 RETURN(__ldlm_bl_to_thread(&blwi, LDLM_SYNC));
1837         } else {
1838                 struct ldlm_bl_work_item *blwi;
1839                 OBD_ALLOC(blwi, sizeof(*blwi));
1840                 if (blwi == NULL)
1841                         RETURN(-ENOMEM);
1842                 init_blwi(blwi, ns, ld, cancels, count, lock, LDLM_ASYNC);
1843
1844                 RETURN(__ldlm_bl_to_thread(blwi, LDLM_ASYNC));
1845         }
1846 }
1847
1848 #endif
1849
1850 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1851                            struct ldlm_lock *lock)
1852 {
1853 #ifdef __KERNEL__
1854         RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LDLM_ASYNC));
1855 #else
1856         RETURN(-ENOSYS);
1857 #endif
1858 }
1859
1860 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1861                            cfs_list_t *cancels, int count, int mode)
1862 {
1863 #ifdef __KERNEL__
1864         RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count, mode));
1865 #else
1866         RETURN(-ENOSYS);
1867 #endif
1868 }
1869
1870 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
1871 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
1872 {
1873         struct obd_device *obd = req->rq_export->exp_obd;
1874         char *key;
1875         void *val;
1876         int keylen, vallen;
1877         int rc = -ENOSYS;
1878         ENTRY;
1879
1880         DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
1881
1882         req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
1883
1884         key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1885         if (key == NULL) {
1886                 DEBUG_REQ(D_IOCTL, req, "no set_info key");
1887                 RETURN(-EFAULT);
1888         }
1889         keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
1890                                       RCL_CLIENT);
1891         val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1892         if (val == NULL) {
1893                 DEBUG_REQ(D_IOCTL, req, "no set_info val");
1894                 RETURN(-EFAULT);
1895         }
1896         vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
1897                                       RCL_CLIENT);
1898
1899         /* We are responsible for swabbing contents of val */
1900
1901         if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
1902                 /* Pass it on to mdc (the "export" in this case) */
1903                 rc = obd_set_info_async(req->rq_svc_thread->t_env,
1904                                         req->rq_export,
1905                                         sizeof(KEY_HSM_COPYTOOL_SEND),
1906                                         KEY_HSM_COPYTOOL_SEND,
1907                                         vallen, val, NULL);
1908         else
1909                 DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
1910
1911         return rc;
1912 }
1913
1914 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
1915                                         const char *msg, int rc,
1916                                         struct lustre_handle *handle)
1917 {
1918         DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
1919                   "%s: [nid %s] [rc %d] [lock "LPX64"]",
1920                   msg, libcfs_id2str(req->rq_peer), rc,
1921                   handle ? handle->cookie : 0);
1922         if (req->rq_no_reply)
1923                 CWARN("No reply was sent, maybe cause bug 21636.\n");
1924         else if (rc)
1925                 CWARN("Send reply failed, maybe cause bug 21636.\n");
1926 }
1927
1928 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
1929 static int ldlm_callback_handler(struct ptlrpc_request *req)
1930 {
1931         struct ldlm_namespace *ns;
1932         struct ldlm_request *dlm_req;
1933         struct ldlm_lock *lock;
1934         int rc;
1935         ENTRY;
1936
1937         /* Requests arrive in sender's byte order.  The ptlrpc service
1938          * handler has already checked and, if necessary, byte-swapped the
1939          * incoming request message body, but I am responsible for the
1940          * message buffers. */
1941
1942         /* do nothing for sec context finalize */
1943         if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
1944                 RETURN(0);
1945
1946         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
1947
1948         if (req->rq_export == NULL) {
1949                 rc = ldlm_callback_reply(req, -ENOTCONN);
1950                 ldlm_callback_errmsg(req, "Operate on unconnected server",
1951                                      rc, NULL);
1952                 RETURN(0);
1953         }
1954
1955         LASSERT(req->rq_export != NULL);
1956         LASSERT(req->rq_export->exp_obd != NULL);
1957
1958         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1959         case LDLM_BL_CALLBACK:
1960                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK))
1961                         RETURN(0);
1962                 break;
1963         case LDLM_CP_CALLBACK:
1964                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK))
1965                         RETURN(0);
1966                 break;
1967         case LDLM_GL_CALLBACK:
1968                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK))
1969                         RETURN(0);
1970                 break;
1971         case LDLM_SET_INFO:
1972                 rc = ldlm_handle_setinfo(req);
1973                 ldlm_callback_reply(req, rc);
1974                 RETURN(0);
1975         case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
1976                 CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
1977                 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
1978                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
1979                         RETURN(0);
1980                 rc = llog_origin_handle_cancel(req);
1981                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
1982                         RETURN(0);
1983                 ldlm_callback_reply(req, rc);
1984                 RETURN(0);
1985         case OBD_QC_CALLBACK:
1986                 req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
1987                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
1988                         RETURN(0);
1989                 rc = target_handle_qc_callback(req);
1990                 ldlm_callback_reply(req, rc);
1991                 RETURN(0);
1992         case QUOTA_DQACQ:
1993         case QUOTA_DQREL:
1994                 /* reply in handler */
1995                 req_capsule_set(&req->rq_pill, &RQF_MDS_QUOTA_DQACQ);
1996                 rc = target_handle_dqacq_callback(req);
1997                 RETURN(0);
1998         case LLOG_ORIGIN_HANDLE_CREATE:
1999                 req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2000                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2001                         RETURN(0);
2002                 rc = llog_origin_handle_open(req);
2003                 ldlm_callback_reply(req, rc);
2004                 RETURN(0);
2005         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2006                 req_capsule_set(&req->rq_pill,
2007                                 &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2008                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2009                         RETURN(0);
2010                 rc = llog_origin_handle_next_block(req);
2011                 ldlm_callback_reply(req, rc);
2012                 RETURN(0);
2013         case LLOG_ORIGIN_HANDLE_READ_HEADER:
2014                 req_capsule_set(&req->rq_pill,
2015                                 &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2016                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2017                         RETURN(0);
2018                 rc = llog_origin_handle_read_header(req);
2019                 ldlm_callback_reply(req, rc);
2020                 RETURN(0);
2021         case LLOG_ORIGIN_HANDLE_CLOSE:
2022                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
2023                         RETURN(0);
2024                 rc = llog_origin_handle_close(req);
2025                 ldlm_callback_reply(req, rc);
2026                 RETURN(0);
2027         default:
2028                 CERROR("unknown opcode %u\n",
2029                        lustre_msg_get_opc(req->rq_reqmsg));
2030                 ldlm_callback_reply(req, -EPROTO);
2031                 RETURN(0);
2032         }
2033
2034         ns = req->rq_export->exp_obd->obd_namespace;
2035         LASSERT(ns != NULL);
2036
2037         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2038
2039         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2040         if (dlm_req == NULL) {
2041                 rc = ldlm_callback_reply(req, -EPROTO);
2042                 ldlm_callback_errmsg(req, "Operate without parameter", rc,
2043                                      NULL);
2044                 RETURN(0);
2045         }
2046
2047         /* Force a known safe race, send a cancel to the server for a lock
2048          * which the server has already started a blocking callback on. */
2049         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
2050             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2051                 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0]);
2052                 if (rc < 0)
2053                         CERROR("ldlm_cli_cancel: %d\n", rc);
2054         }
2055
2056         lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
2057         if (!lock) {
2058                 CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
2059                        "disappeared\n", dlm_req->lock_handle[0].cookie);
2060                 rc = ldlm_callback_reply(req, -EINVAL);
2061                 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
2062                                      &dlm_req->lock_handle[0]);
2063                 RETURN(0);
2064         }
2065
2066         if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
2067             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
2068                 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
2069
2070         /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
2071         lock_res_and_lock(lock);
2072         lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
2073         if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
2074                 /* If somebody cancels lock and cache is already dropped,
2075                  * or lock is failed before cp_ast received on client,
2076                  * we can tell the server we have no lock. Otherwise, we
2077                  * should send cancel after dropping the cache. */
2078                 if (((lock->l_flags & LDLM_FL_CANCELING) &&
2079                     (lock->l_flags & LDLM_FL_BL_DONE)) ||
2080                     (lock->l_flags & LDLM_FL_FAILED)) {
2081                         LDLM_DEBUG(lock, "callback on lock "
2082                                    LPX64" - lock disappeared\n",
2083                                    dlm_req->lock_handle[0].cookie);
2084                         unlock_res_and_lock(lock);
2085                         LDLM_LOCK_RELEASE(lock);
2086                         rc = ldlm_callback_reply(req, -EINVAL);
2087                         ldlm_callback_errmsg(req, "Operate on stale lock", rc,
2088                                              &dlm_req->lock_handle[0]);
2089                         RETURN(0);
2090                 }
2091                 /* BL_AST locks are not needed in lru.
2092                  * let ldlm_cancel_lru() be fast. */
2093                 ldlm_lock_remove_from_lru(lock);
2094                 lock->l_flags |= LDLM_FL_BL_AST;
2095         }
2096         unlock_res_and_lock(lock);
2097
2098         /* We want the ost thread to get this reply so that it can respond
2099          * to ost requests (write cache writeback) that might be triggered
2100          * in the callback.
2101          *
2102          * But we'd also like to be able to indicate in the reply that we're
2103          * cancelling right now, because it's unused, or have an intent result
2104          * in the reply, so we might have to push the responsibility for sending
2105          * the reply down into the AST handlers, alas. */
2106
2107         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2108         case LDLM_BL_CALLBACK:
2109                 CDEBUG(D_INODE, "blocking ast\n");
2110                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
2111                 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
2112                         rc = ldlm_callback_reply(req, 0);
2113                         if (req->rq_no_reply || rc)
2114                                 ldlm_callback_errmsg(req, "Normal process", rc,
2115                                                      &dlm_req->lock_handle[0]);
2116                 }
2117                 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
2118                         ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
2119                 break;
2120         case LDLM_CP_CALLBACK:
2121                 CDEBUG(D_INODE, "completion ast\n");
2122                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
2123                 ldlm_callback_reply(req, 0);
2124                 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
2125                 break;
2126         case LDLM_GL_CALLBACK:
2127                 CDEBUG(D_INODE, "glimpse ast\n");
2128                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
2129                 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
2130                 break;
2131         default:
2132                 LBUG();                         /* checked above */
2133         }
2134
2135         RETURN(0);
2136 }
2137
2138 #ifdef HAVE_SERVER_SUPPORT
2139 static int ldlm_cancel_handler(struct ptlrpc_request *req)
2140 {
2141         int rc;
2142         ENTRY;
2143
2144         /* Requests arrive in sender's byte order.  The ptlrpc service
2145          * handler has already checked and, if necessary, byte-swapped the
2146          * incoming request message body, but I am responsible for the
2147          * message buffers. */
2148
2149         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2150
2151         if (req->rq_export == NULL) {
2152                 struct ldlm_request *dlm_req;
2153
2154                 CERROR("%s from %s arrived at %lu with bad export cookie "
2155                        LPU64"\n",
2156                        ll_opcode2str(lustre_msg_get_opc(req->rq_reqmsg)),
2157                        libcfs_nid2str(req->rq_peer.nid),
2158                        req->rq_arrival_time.tv_sec,
2159                        lustre_msg_get_handle(req->rq_reqmsg)->cookie);
2160
2161                 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_CANCEL) {
2162                         req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2163                         dlm_req = req_capsule_client_get(&req->rq_pill,
2164                                                          &RMF_DLM_REQ);
2165                         if (dlm_req != NULL)
2166                                 ldlm_lock_dump_handle(D_ERROR,
2167                                                       &dlm_req->lock_handle[0]);
2168                 }
2169                 ldlm_callback_reply(req, -ENOTCONN);
2170                 RETURN(0);
2171         }
2172
2173         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2174
2175         /* XXX FIXME move this back to mds/handler.c, bug 249 */
2176         case LDLM_CANCEL:
2177                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2178                 CDEBUG(D_INODE, "cancel\n");
2179                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL))
2180                         RETURN(0);
2181                 rc = ldlm_handle_cancel(req);
2182                 if (rc)
2183                         break;
2184                 RETURN(0);
2185         case OBD_LOG_CANCEL:
2186                 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
2187                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
2188                         RETURN(0);
2189                 rc = llog_origin_handle_cancel(req);
2190                 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
2191                         RETURN(0);
2192                 ldlm_callback_reply(req, rc);
2193                 RETURN(0);
2194         default:
2195                 CERROR("invalid opcode %d\n",
2196                        lustre_msg_get_opc(req->rq_reqmsg));
2197                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2198                 ldlm_callback_reply(req, -EINVAL);
2199         }
2200
2201         RETURN(0);
2202 }
2203
2204 static int ldlm_cancel_hpreq_lock_match(struct ptlrpc_request *req,
2205                                         struct ldlm_lock *lock)
2206 {
2207         struct ldlm_request *dlm_req;
2208         struct lustre_handle lockh;
2209         int rc = 0;
2210         int i;
2211         ENTRY;
2212
2213         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2214         if (dlm_req == NULL)
2215                 RETURN(0);
2216
2217         ldlm_lock2handle(lock, &lockh);
2218         for (i = 0; i < dlm_req->lock_count; i++) {
2219                 if (lustre_handle_equal(&dlm_req->lock_handle[i],
2220                                         &lockh)) {
2221                         DEBUG_REQ(D_RPCTRACE, req,
2222                                   "Prio raised by lock "LPX64".", lockh.cookie);
2223
2224                         rc = 1;
2225                         break;
2226                 }
2227         }
2228
2229         RETURN(rc);
2230
2231 }
2232
2233 static int ldlm_cancel_hpreq_check(struct ptlrpc_request *req)
2234 {
2235         struct ldlm_request *dlm_req;
2236         int rc = 0;
2237         int i;
2238         ENTRY;
2239
2240         /* no prolong in recovery */
2241         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
2242                 RETURN(0);
2243
2244         dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2245         if (dlm_req == NULL)
2246                 RETURN(-EFAULT);
2247
2248         for (i = 0; i < dlm_req->lock_count; i++) {
2249                 struct ldlm_lock *lock;
2250
2251                 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
2252                 if (lock == NULL)
2253                         continue;
2254
2255                 rc = !!(lock->l_flags & LDLM_FL_AST_SENT);
2256                 if (rc)
2257                         LDLM_DEBUG(lock, "hpreq cancel lock");
2258                 LDLM_LOCK_PUT(lock);
2259
2260                 if (rc)
2261                         break;
2262         }
2263
2264         RETURN(rc);
2265 }
2266
2267 static struct ptlrpc_hpreq_ops ldlm_cancel_hpreq_ops = {
2268         .hpreq_lock_match = ldlm_cancel_hpreq_lock_match,
2269         .hpreq_check      = ldlm_cancel_hpreq_check,
2270         .hpreq_fini       = NULL,
2271 };
2272
2273 static int ldlm_hpreq_handler(struct ptlrpc_request *req)
2274 {
2275         ENTRY;
2276
2277         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2278
2279         if (req->rq_export == NULL)
2280                 RETURN(0);
2281
2282         if (LDLM_CANCEL == lustre_msg_get_opc(req->rq_reqmsg)) {
2283                 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2284                 req->rq_ops = &ldlm_cancel_hpreq_ops;
2285         }
2286         RETURN(0);
2287 }
2288
2289 int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
2290                         cfs_hlist_node_t *hnode, void *data)
2291
2292 {
2293         cfs_list_t         *rpc_list = data;
2294         struct ldlm_lock   *lock = cfs_hash_object(hs, hnode);
2295
2296         lock_res_and_lock(lock);
2297
2298         if (lock->l_req_mode != lock->l_granted_mode) {
2299                 unlock_res_and_lock(lock);
2300                 return 0;
2301         }
2302
2303         LASSERT(lock->l_resource);
2304         if (lock->l_resource->lr_type != LDLM_IBITS &&
2305             lock->l_resource->lr_type != LDLM_PLAIN) {
2306                 unlock_res_and_lock(lock);
2307                 return 0;
2308         }
2309
2310         if (lock->l_flags & LDLM_FL_AST_SENT) {
2311                 unlock_res_and_lock(lock);
2312                 return 0;
2313         }
2314
2315         LASSERT(lock->l_blocking_ast);
2316         LASSERT(!lock->l_blocking_lock);
2317
2318         lock->l_flags |= LDLM_FL_AST_SENT;
2319         if (lock->l_export && lock->l_export->exp_lock_hash) {
2320                 /* NB: it's safe to call cfs_hash_del() even lock isn't
2321                  * in exp_lock_hash. */
2322                 /* In the function below, .hs_keycmp resolves to
2323                  * ldlm_export_lock_keycmp() */
2324                 /* coverity[overrun-buffer-val] */
2325                 cfs_hash_del(lock->l_export->exp_lock_hash,
2326                              &lock->l_remote_handle, &lock->l_exp_hash);
2327         }
2328
2329         cfs_list_add_tail(&lock->l_rk_ast, rpc_list);
2330         LDLM_LOCK_GET(lock);
2331
2332         unlock_res_and_lock(lock);
2333         return 0;
2334 }
2335
2336 void ldlm_revoke_export_locks(struct obd_export *exp)
2337 {
2338         cfs_list_t  rpc_list;
2339         ENTRY;
2340
2341         CFS_INIT_LIST_HEAD(&rpc_list);
2342         cfs_hash_for_each_empty(exp->exp_lock_hash,
2343                                 ldlm_revoke_lock_cb, &rpc_list);
2344         ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
2345                           LDLM_WORK_REVOKE_AST);
2346
2347         EXIT;
2348 }
2349 #endif /* HAVE_SERVER_SUPPORT */
2350
2351 #ifdef __KERNEL__
2352 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
2353 {
2354         struct ldlm_bl_work_item *blwi = NULL;
2355         static unsigned int num_bl = 0;
2356
2357         cfs_spin_lock(&blp->blp_lock);
2358         /* process a request from the blp_list at least every blp_num_threads */
2359         if (!cfs_list_empty(&blp->blp_list) &&
2360             (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
2361                 blwi = cfs_list_entry(blp->blp_list.next,
2362                                       struct ldlm_bl_work_item, blwi_entry);
2363         else
2364                 if (!cfs_list_empty(&blp->blp_prio_list))
2365                         blwi = cfs_list_entry(blp->blp_prio_list.next,
2366                                               struct ldlm_bl_work_item,
2367                                               blwi_entry);
2368
2369         if (blwi) {
2370                 if (++num_bl >= cfs_atomic_read(&blp->blp_num_threads))
2371                         num_bl = 0;
2372                 cfs_list_del(&blwi->blwi_entry);
2373         }
2374         cfs_spin_unlock(&blp->blp_lock);
2375
2376         return blwi;
2377 }
2378
2379 /* This only contains temporary data until the thread starts */
2380 struct ldlm_bl_thread_data {
2381         char                    bltd_name[CFS_CURPROC_COMM_MAX];
2382         struct ldlm_bl_pool     *bltd_blp;
2383         cfs_completion_t        bltd_comp;
2384         int                     bltd_num;
2385 };
2386
2387 static int ldlm_bl_thread_main(void *arg);
2388
2389 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
2390 {
2391         struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
2392         int rc;
2393
2394         cfs_init_completion(&bltd.bltd_comp);
2395         rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
2396         if (rc < 0) {
2397                 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
2398                        cfs_atomic_read(&blp->blp_num_threads), rc);
2399                 return rc;
2400         }
2401         cfs_wait_for_completion(&bltd.bltd_comp);
2402
2403         return 0;
2404 }
2405
2406 static int ldlm_bl_thread_main(void *arg)
2407 {
2408         struct ldlm_bl_pool *blp;
2409         ENTRY;
2410
2411         {
2412                 struct ldlm_bl_thread_data *bltd = arg;
2413
2414                 blp = bltd->bltd_blp;
2415
2416                 bltd->bltd_num =
2417                         cfs_atomic_inc_return(&blp->blp_num_threads) - 1;
2418                 cfs_atomic_inc(&blp->blp_busy_threads);
2419
2420                 snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
2421                         "ldlm_bl_%02d", bltd->bltd_num);
2422                 cfs_daemonize(bltd->bltd_name);
2423
2424                 cfs_complete(&bltd->bltd_comp);
2425                 /* cannot use bltd after this, it is only on caller's stack */
2426         }
2427
2428         while (1) {
2429                 struct l_wait_info lwi = { 0 };
2430                 struct ldlm_bl_work_item *blwi = NULL;
2431                 int busy;
2432
2433                 blwi = ldlm_bl_get_work(blp);
2434
2435                 if (blwi == NULL) {
2436                         cfs_atomic_dec(&blp->blp_busy_threads);
2437                         l_wait_event_exclusive(blp->blp_waitq,
2438                                          (blwi = ldlm_bl_get_work(blp)) != NULL,
2439                                          &lwi);
2440                         busy = cfs_atomic_inc_return(&blp->blp_busy_threads);
2441                 } else {
2442                         busy = cfs_atomic_read(&blp->blp_busy_threads);
2443                 }
2444
2445                 if (blwi->blwi_ns == NULL)
2446                         /* added by ldlm_cleanup() */
2447                         break;
2448
2449                 /* Not fatal if racy and have a few too many threads */
2450                 if (unlikely(busy < blp->blp_max_threads &&
2451                              busy >= cfs_atomic_read(&blp->blp_num_threads) &&
2452                              !blwi->blwi_mem_pressure))
2453                         /* discard the return value, we tried */
2454                         ldlm_bl_thread_start(blp);
2455
2456                 if (blwi->blwi_mem_pressure)
2457                         cfs_memory_pressure_set();
2458
2459                 if (blwi->blwi_count) {
2460                         int count;
2461                         /* The special case when we cancel locks in lru
2462                          * asynchronously, we pass the list of locks here.
2463                          * Thus locks are marked LDLM_FL_CANCELING, but NOT
2464                          * canceled locally yet. */
2465                         count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
2466                                                            blwi->blwi_count,
2467                                                            LCF_BL_AST);
2468                         ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0);
2469                 } else {
2470                         ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
2471                                                 blwi->blwi_lock);
2472                 }
2473                 if (blwi->blwi_mem_pressure)
2474                         cfs_memory_pressure_clr();
2475
2476                 if (blwi->blwi_mode == LDLM_ASYNC)
2477                         OBD_FREE(blwi, sizeof(*blwi));
2478                 else
2479                         cfs_complete(&blwi->blwi_comp);
2480         }
2481
2482         cfs_atomic_dec(&blp->blp_busy_threads);
2483         cfs_atomic_dec(&blp->blp_num_threads);
2484         cfs_complete(&blp->blp_comp);
2485         RETURN(0);
2486 }
2487
2488 #endif
2489
2490 static int ldlm_setup(void);
2491 static int ldlm_cleanup(void);
2492
2493 int ldlm_get_ref(void)
2494 {
2495         int rc = 0;
2496         ENTRY;
2497         cfs_mutex_lock(&ldlm_ref_mutex);
2498         if (++ldlm_refcount == 1) {
2499                 rc = ldlm_setup();
2500                 if (rc)
2501                         ldlm_refcount--;
2502         }
2503         cfs_mutex_unlock(&ldlm_ref_mutex);
2504
2505         RETURN(rc);
2506 }
2507
2508 void ldlm_put_ref(void)
2509 {
2510         ENTRY;
2511         cfs_mutex_lock(&ldlm_ref_mutex);
2512         if (ldlm_refcount == 1) {
2513                 int rc = ldlm_cleanup();
2514                 if (rc)
2515                         CERROR("ldlm_cleanup failed: %d\n", rc);
2516                 else
2517                         ldlm_refcount--;
2518         } else {
2519                 ldlm_refcount--;
2520         }
2521         cfs_mutex_unlock(&ldlm_ref_mutex);
2522
2523         EXIT;
2524 }
2525
2526 /*
2527  * Export handle<->lock hash operations.
2528  */
2529 static unsigned
2530 ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
2531 {
2532         return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
2533 }
2534
2535 static void *
2536 ldlm_export_lock_key(cfs_hlist_node_t *hnode)
2537 {
2538         struct ldlm_lock *lock;
2539
2540         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2541         return &lock->l_remote_handle;
2542 }
2543
2544 static void
2545 ldlm_export_lock_keycpy(cfs_hlist_node_t *hnode, void *key)
2546 {
2547         struct ldlm_lock     *lock;
2548
2549         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2550         lock->l_remote_handle = *(struct lustre_handle *)key;
2551 }
2552
2553 static int
2554 ldlm_export_lock_keycmp(const void *key, cfs_hlist_node_t *hnode)
2555 {
2556         return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
2557 }
2558
2559 static void *
2560 ldlm_export_lock_object(cfs_hlist_node_t *hnode)
2561 {
2562         return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2563 }
2564
2565 static void
2566 ldlm_export_lock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2567 {
2568         struct ldlm_lock *lock;
2569
2570         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2571         LDLM_LOCK_GET(lock);
2572 }
2573
2574 static void
2575 ldlm_export_lock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2576 {
2577         struct ldlm_lock *lock;
2578
2579         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2580         LDLM_LOCK_RELEASE(lock);
2581 }
2582
2583 static cfs_hash_ops_t ldlm_export_lock_ops = {
2584         .hs_hash        = ldlm_export_lock_hash,
2585         .hs_key         = ldlm_export_lock_key,
2586         .hs_keycmp      = ldlm_export_lock_keycmp,
2587         .hs_keycpy      = ldlm_export_lock_keycpy,
2588         .hs_object      = ldlm_export_lock_object,
2589         .hs_get         = ldlm_export_lock_get,
2590         .hs_put         = ldlm_export_lock_put,
2591         .hs_put_locked  = ldlm_export_lock_put,
2592 };
2593
2594 int ldlm_init_export(struct obd_export *exp)
2595 {
2596         ENTRY;
2597
2598         exp->exp_lock_hash =
2599                 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
2600                                 HASH_EXP_LOCK_CUR_BITS,
2601                                 HASH_EXP_LOCK_MAX_BITS,
2602                                 HASH_EXP_LOCK_BKT_BITS, 0,
2603                                 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
2604                                 &ldlm_export_lock_ops,
2605                                 CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
2606                                 CFS_HASH_NBLK_CHANGE);
2607
2608         if (!exp->exp_lock_hash)
2609                 RETURN(-ENOMEM);
2610
2611         RETURN(0);
2612 }
2613 EXPORT_SYMBOL(ldlm_init_export);
2614
2615 void ldlm_destroy_export(struct obd_export *exp)
2616 {
2617         ENTRY;
2618         cfs_hash_putref(exp->exp_lock_hash);
2619         exp->exp_lock_hash = NULL;
2620
2621         ldlm_destroy_flock_export(exp);
2622         EXIT;
2623 }
2624 EXPORT_SYMBOL(ldlm_destroy_export);
2625
2626 static int ldlm_setup(void)
2627 {
2628         static struct ptlrpc_service_conf       conf;
2629         struct ldlm_bl_pool                     *blp = NULL;
2630         int rc = 0;
2631 #ifdef __KERNEL__
2632         int i;
2633 #endif
2634         ENTRY;
2635
2636         if (ldlm_state != NULL)
2637                 RETURN(-EALREADY);
2638
2639         OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
2640         if (ldlm_state == NULL)
2641                 RETURN(-ENOMEM);
2642
2643 #ifdef LPROCFS
2644         rc = ldlm_proc_setup();
2645         if (rc != 0)
2646                 GOTO(out, rc);
2647 #endif
2648
2649         memset(&conf, 0, sizeof(conf));
2650         conf = (typeof(conf)) {
2651                 .psc_name               = "ldlm_cbd",
2652                 .psc_watchdog_factor    = 2,
2653                 .psc_buf                = {
2654                         .bc_nbufs               = LDLM_NBUFS,
2655                         .bc_buf_size            = LDLM_BUFSIZE,
2656                         .bc_req_max_size        = LDLM_MAXREQSIZE,
2657                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
2658                         .bc_req_portal          = LDLM_CB_REQUEST_PORTAL,
2659                         .bc_rep_portal          = LDLM_CB_REPLY_PORTAL,
2660                 },
2661                 .psc_thr                = {
2662                         .tc_thr_name            = "ldlm_cb",
2663                         .tc_thr_factor          = LDLM_THR_FACTOR,
2664                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
2665                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
2666                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
2667                         .tc_nthrs_user          = ldlm_num_threads,
2668                         .tc_cpu_affinity        = 1,
2669                         .tc_ctx_tags            = LCT_MD_THREAD | LCT_DT_THREAD,
2670                 },
2671                 .psc_cpt                = {
2672                         .cc_pattern             = ldlm_cpts,
2673                 },
2674                 .psc_ops                = {
2675                         .so_req_handler         = ldlm_callback_handler,
2676                 },
2677         };
2678         ldlm_state->ldlm_cb_service = \
2679                         ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
2680         if (IS_ERR(ldlm_state->ldlm_cb_service)) {
2681                 CERROR("failed to start service\n");
2682                 rc = PTR_ERR(ldlm_state->ldlm_cb_service);
2683                 ldlm_state->ldlm_cb_service = NULL;
2684                 GOTO(out, rc);
2685         }
2686
2687 #ifdef HAVE_SERVER_SUPPORT
2688         memset(&conf, 0, sizeof(conf));
2689         conf = (typeof(conf)) {
2690                 .psc_name               = "ldlm_canceld",
2691                 .psc_watchdog_factor    = 6,
2692                 .psc_buf                = {
2693                         .bc_nbufs               = LDLM_NBUFS,
2694                         .bc_buf_size            = LDLM_BUFSIZE,
2695                         .bc_req_max_size        = LDLM_MAXREQSIZE,
2696                         .bc_rep_max_size        = LDLM_MAXREPSIZE,
2697                         .bc_req_portal          = LDLM_CANCEL_REQUEST_PORTAL,
2698                         .bc_rep_portal          = LDLM_CANCEL_REPLY_PORTAL,
2699
2700                 },
2701                 .psc_thr                = {
2702                         .tc_thr_name            = "ldlm_cn",
2703                         .tc_thr_factor          = LDLM_THR_FACTOR,
2704                         .tc_nthrs_init          = LDLM_NTHRS_INIT,
2705                         .tc_nthrs_base          = LDLM_NTHRS_BASE,
2706                         .tc_nthrs_max           = LDLM_NTHRS_MAX,
2707                         .tc_nthrs_user          = ldlm_num_threads,
2708                         .tc_cpu_affinity        = 1,
2709                         .tc_ctx_tags            = LCT_MD_THREAD | \
2710                                                   LCT_DT_THREAD | \
2711                                                   LCT_CL_THREAD,
2712                 },
2713                 .psc_cpt                = {
2714                         .cc_pattern             = ldlm_cpts,
2715                 },
2716                 .psc_ops                = {
2717                         .so_req_handler         = ldlm_cancel_handler,
2718                         .so_hpreq_handler       = ldlm_hpreq_handler,
2719                 },
2720         };
2721         ldlm_state->ldlm_cancel_service = \
2722                         ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
2723         if (IS_ERR(ldlm_state->ldlm_cancel_service)) {
2724                 CERROR("failed to start service\n");
2725                 rc = PTR_ERR(ldlm_state->ldlm_cancel_service);
2726                 ldlm_state->ldlm_cancel_service = NULL;
2727                 GOTO(out, rc);
2728         }
2729 #endif
2730
2731         OBD_ALLOC(blp, sizeof(*blp));
2732         if (blp == NULL)
2733                 GOTO(out, rc = -ENOMEM);
2734         ldlm_state->ldlm_bl_pool = blp;
2735
2736         cfs_spin_lock_init(&blp->blp_lock);
2737         CFS_INIT_LIST_HEAD(&blp->blp_list);
2738         CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
2739         cfs_waitq_init(&blp->blp_waitq);
2740         cfs_atomic_set(&blp->blp_num_threads, 0);
2741         cfs_atomic_set(&blp->blp_busy_threads, 0);
2742
2743 #ifdef __KERNEL__
2744         if (ldlm_num_threads == 0) {
2745                 blp->blp_min_threads = LDLM_NTHRS_INIT;
2746                 blp->blp_max_threads = LDLM_NTHRS_MAX;
2747         } else {
2748                 blp->blp_min_threads = blp->blp_max_threads = \
2749                         min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
2750                                                          ldlm_num_threads));
2751         }
2752
2753         for (i = 0; i < blp->blp_min_threads; i++) {
2754                 rc = ldlm_bl_thread_start(blp);
2755                 if (rc < 0)
2756                         GOTO(out, rc);
2757         }
2758
2759 # ifdef HAVE_SERVER_SUPPORT
2760         CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
2761         expired_lock_thread.elt_state = ELT_STOPPED;
2762         cfs_waitq_init(&expired_lock_thread.elt_waitq);
2763
2764         CFS_INIT_LIST_HEAD(&waiting_locks_list);
2765         cfs_spin_lock_init(&waiting_locks_spinlock);
2766         cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
2767
2768         rc = cfs_create_thread(expired_lock_main, NULL, CFS_DAEMON_FLAGS);
2769         if (rc < 0) {
2770                 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
2771                 GOTO(out, rc);
2772         }
2773
2774         cfs_wait_event(expired_lock_thread.elt_waitq,
2775                        expired_lock_thread.elt_state == ELT_READY);
2776 # endif /* HAVE_SERVER_SUPPORT */
2777
2778         rc = ldlm_pools_init();
2779         if (rc) {
2780                 CERROR("Failed to initialize LDLM pools: %d\n", rc);
2781                 GOTO(out, rc);
2782         }
2783 #endif
2784         RETURN(0);
2785
2786  out:
2787         ldlm_cleanup();
2788         RETURN(rc);
2789 }
2790
2791 static int ldlm_cleanup(void)
2792 {
2793         ENTRY;
2794
2795         if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
2796             !cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
2797                 CERROR("ldlm still has namespaces; clean these up first.\n");
2798                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
2799                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
2800                 RETURN(-EBUSY);
2801         }
2802
2803 #ifdef __KERNEL__
2804         ldlm_pools_fini();
2805
2806         if (ldlm_state->ldlm_bl_pool != NULL) {
2807                 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
2808
2809                 while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
2810                         struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
2811
2812                         cfs_init_completion(&blp->blp_comp);
2813
2814                         cfs_spin_lock(&blp->blp_lock);
2815                         cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
2816                         cfs_waitq_signal(&blp->blp_waitq);
2817                         cfs_spin_unlock(&blp->blp_lock);
2818
2819                         cfs_wait_for_completion(&blp->blp_comp);
2820                 }
2821
2822                 OBD_FREE(blp, sizeof(*blp));
2823         }
2824 #endif /* __KERNEL__ */
2825
2826         if (ldlm_state->ldlm_cb_service != NULL)
2827                 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2828 # ifdef HAVE_SERVER_SUPPORT
2829         if (ldlm_state->ldlm_cancel_service != NULL)
2830                 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2831 # endif
2832
2833 #ifdef __KERNEL__
2834         ldlm_proc_cleanup();
2835
2836 # ifdef HAVE_SERVER_SUPPORT
2837         if (expired_lock_thread.elt_state != ELT_STOPPED) {
2838                 expired_lock_thread.elt_state = ELT_TERMINATE;
2839                 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
2840                 cfs_wait_event(expired_lock_thread.elt_waitq,
2841                                expired_lock_thread.elt_state == ELT_STOPPED);
2842         }
2843 # endif
2844 #endif /* __KERNEL__ */
2845
2846         OBD_FREE(ldlm_state, sizeof(*ldlm_state));
2847         ldlm_state = NULL;
2848
2849         RETURN(0);
2850 }
2851
2852 int ldlm_init(void)
2853 {
2854         cfs_mutex_init(&ldlm_ref_mutex);
2855         cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
2856         cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
2857         ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
2858                                                sizeof(struct ldlm_resource), 0,
2859                                                CFS_SLAB_HWCACHE_ALIGN);
2860         if (ldlm_resource_slab == NULL)
2861                 return -ENOMEM;
2862
2863         ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
2864                               sizeof(struct ldlm_lock), 0,
2865                               CFS_SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU);
2866         if (ldlm_lock_slab == NULL) {
2867                 cfs_mem_cache_destroy(ldlm_resource_slab);
2868                 return -ENOMEM;
2869         }
2870
2871         ldlm_interval_slab = cfs_mem_cache_create("interval_node",
2872                                         sizeof(struct ldlm_interval),
2873                                         0, CFS_SLAB_HWCACHE_ALIGN);
2874         if (ldlm_interval_slab == NULL) {
2875                 cfs_mem_cache_destroy(ldlm_resource_slab);
2876                 cfs_mem_cache_destroy(ldlm_lock_slab);
2877                 return -ENOMEM;
2878         }
2879 #if LUSTRE_TRACKS_LOCK_EXP_REFS
2880         class_export_dump_hook = ldlm_dump_export_locks;
2881 #endif
2882         return 0;
2883 }
2884
2885 void ldlm_exit(void)
2886 {
2887         int rc;
2888         if (ldlm_refcount)
2889                 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
2890         rc = cfs_mem_cache_destroy(ldlm_resource_slab);
2891         LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
2892 #ifdef __KERNEL__
2893         /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
2894          * synchronize_rcu() to wait a grace period elapsed, so that
2895          * ldlm_lock_free() get a chance to be called. */
2896         synchronize_rcu();
2897 #endif
2898         rc = cfs_mem_cache_destroy(ldlm_lock_slab);
2899         LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
2900         rc = cfs_mem_cache_destroy(ldlm_interval_slab);
2901         LASSERTF(rc == 0, "couldn't free interval node slab\n");
2902 }
2903
2904 /* ldlm_extent.c */
2905 EXPORT_SYMBOL(ldlm_extent_shift_kms);
2906
2907 /* ldlm_lock.c */
2908 #ifdef HAVE_SERVER_SUPPORT
2909 EXPORT_SYMBOL(ldlm_get_processing_policy);
2910 #endif
2911 EXPORT_SYMBOL(ldlm_lock2desc);
2912 EXPORT_SYMBOL(ldlm_register_intent);
2913 EXPORT_SYMBOL(ldlm_lockname);
2914 EXPORT_SYMBOL(ldlm_typename);
2915 EXPORT_SYMBOL(ldlm_lock2handle);
2916 EXPORT_SYMBOL(__ldlm_handle2lock);
2917 EXPORT_SYMBOL(ldlm_lock_get);
2918 EXPORT_SYMBOL(ldlm_lock_put);
2919 EXPORT_SYMBOL(ldlm_lock_match);
2920 EXPORT_SYMBOL(ldlm_lock_cancel);
2921 EXPORT_SYMBOL(ldlm_lock_addref);
2922 EXPORT_SYMBOL(ldlm_lock_addref_try);
2923 EXPORT_SYMBOL(ldlm_lock_decref);
2924 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
2925 EXPORT_SYMBOL(ldlm_lock_change_resource);
2926 EXPORT_SYMBOL(ldlm_it2str);
2927 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2928 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
2929 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
2930 EXPORT_SYMBOL(ldlm_lock_allow_match);
2931 EXPORT_SYMBOL(ldlm_lock_downgrade);
2932 EXPORT_SYMBOL(ldlm_lock_convert);
2933
2934 /* ldlm_request.c */
2935 EXPORT_SYMBOL(ldlm_completion_ast_async);
2936 EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
2937 EXPORT_SYMBOL(ldlm_completion_ast);
2938 EXPORT_SYMBOL(ldlm_blocking_ast);
2939 EXPORT_SYMBOL(ldlm_glimpse_ast);
2940 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2941 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2942 EXPORT_SYMBOL(ldlm_prep_elc_req);
2943 EXPORT_SYMBOL(ldlm_cli_convert);
2944 EXPORT_SYMBOL(ldlm_cli_enqueue);
2945 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2946 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2947 EXPORT_SYMBOL(ldlm_cli_cancel);
2948 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2949 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2950 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2951 EXPORT_SYMBOL(ldlm_replay_locks);
2952 EXPORT_SYMBOL(ldlm_resource_foreach);
2953 EXPORT_SYMBOL(ldlm_namespace_foreach);
2954 EXPORT_SYMBOL(ldlm_resource_iterate);
2955 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2956 EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
2957 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2958
2959 /* ldlm_lockd.c */
2960 #ifdef HAVE_SERVER_SUPPORT
2961 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2962 EXPORT_SYMBOL(ldlm_server_completion_ast);
2963 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2964 EXPORT_SYMBOL(ldlm_glimpse_locks);
2965 EXPORT_SYMBOL(ldlm_handle_enqueue);
2966 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2967 EXPORT_SYMBOL(ldlm_handle_cancel);
2968 EXPORT_SYMBOL(ldlm_request_cancel);
2969 EXPORT_SYMBOL(ldlm_handle_convert);
2970 EXPORT_SYMBOL(ldlm_handle_convert0);
2971 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2972 #endif
2973 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2974 EXPORT_SYMBOL(ldlm_get_ref);
2975 EXPORT_SYMBOL(ldlm_put_ref);
2976 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2977
2978 /* ldlm_resource.c */
2979 EXPORT_SYMBOL(ldlm_namespace_new);
2980 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2981 EXPORT_SYMBOL(ldlm_namespace_free);
2982 EXPORT_SYMBOL(ldlm_namespace_dump);
2983 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2984 EXPORT_SYMBOL(ldlm_resource_get);
2985 EXPORT_SYMBOL(ldlm_resource_putref);
2986 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
2987
2988 /* ldlm_lib.c */
2989 EXPORT_SYMBOL(client_import_add_conn);
2990 EXPORT_SYMBOL(client_import_del_conn);
2991 EXPORT_SYMBOL(client_obd_setup);
2992 EXPORT_SYMBOL(client_obd_cleanup);
2993 EXPORT_SYMBOL(client_connect_import);
2994 EXPORT_SYMBOL(client_disconnect_export);
2995 EXPORT_SYMBOL(target_send_reply);
2996 EXPORT_SYMBOL(target_pack_pool_reply);
2997
2998 #ifdef HAVE_SERVER_SUPPORT
2999 EXPORT_SYMBOL(server_disconnect_export);
3000 EXPORT_SYMBOL(target_stop_recovery_thread);
3001 EXPORT_SYMBOL(target_handle_connect);
3002 EXPORT_SYMBOL(target_cleanup_recovery);
3003 EXPORT_SYMBOL(target_destroy_export);
3004 EXPORT_SYMBOL(target_cancel_recovery_timer);
3005 EXPORT_SYMBOL(target_queue_recovery_request);
3006 EXPORT_SYMBOL(target_handle_ping);
3007 EXPORT_SYMBOL(target_handle_disconnect);
3008 #endif
3009
3010 /* l_lock.c */
3011 EXPORT_SYMBOL(lock_res_and_lock);
3012 EXPORT_SYMBOL(unlock_res_and_lock);