1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * lustre/ldlm/ldlm_lockd.c
40 * Author: Peter Braam <braam@clusterfs.com>
41 * Author: Phil Schwan <phil@clusterfs.com>
45 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_LDLM
50 # include <libcfs/libcfs.h>
52 # include <liblustre.h>
55 #include <lustre_dlm.h>
56 #include <obd_class.h>
57 #include <libcfs/list.h>
58 #include "ldlm_internal.h"
61 static int ldlm_num_threads;
62 CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
63 "number of DLM service threads to start");
66 extern cfs_mem_cache_t *ldlm_resource_slab;
67 extern cfs_mem_cache_t *ldlm_lock_slab;
68 static cfs_mutex_t ldlm_ref_mutex;
69 static int ldlm_refcount;
71 struct ldlm_cb_async_args {
72 struct ldlm_cb_set_arg *ca_set_arg;
73 struct ldlm_lock *ca_lock;
78 static struct ldlm_state *ldlm_state;
80 inline cfs_time_t round_timeout(cfs_time_t timeout)
82 return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
85 /* timeout for initial callback (AST) reply (bz10399) */
86 static inline unsigned int ldlm_get_rq_timeout(void)
89 unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
91 return timeout < 1 ? 1 : timeout;
95 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
96 static cfs_spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
97 static cfs_list_t waiting_locks_list;
98 static cfs_timer_t waiting_locks_timer;
100 static struct expired_lock_thread {
101 cfs_waitq_t elt_waitq;
104 cfs_list_t elt_expired_locks;
105 } expired_lock_thread;
108 #define ELT_STOPPED 0
110 #define ELT_TERMINATE 2
112 struct ldlm_bl_pool {
113 cfs_spinlock_t blp_lock;
116 * blp_prio_list is used for callbacks that should be handled
117 * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
120 cfs_list_t blp_prio_list;
123 * blp_list is used for all other callbacks which are likely
124 * to take longer to process.
128 cfs_waitq_t blp_waitq;
129 cfs_completion_t blp_comp;
130 cfs_atomic_t blp_num_threads;
131 cfs_atomic_t blp_busy_threads;
136 struct ldlm_bl_work_item {
137 cfs_list_t blwi_entry;
138 struct ldlm_namespace *blwi_ns;
139 struct ldlm_lock_desc blwi_ld;
140 struct ldlm_lock *blwi_lock;
141 cfs_list_t blwi_head;
143 cfs_completion_t blwi_comp;
145 int blwi_mem_pressure;
150 static inline int have_expired_locks(void)
155 cfs_spin_lock_bh(&waiting_locks_spinlock);
156 need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
157 cfs_spin_unlock_bh(&waiting_locks_spinlock);
162 static int expired_lock_main(void *arg)
164 cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
165 struct l_wait_info lwi = { 0 };
169 cfs_daemonize("ldlm_elt");
171 expired_lock_thread.elt_state = ELT_READY;
172 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
175 l_wait_event(expired_lock_thread.elt_waitq,
176 have_expired_locks() ||
177 expired_lock_thread.elt_state == ELT_TERMINATE,
180 cfs_spin_lock_bh(&waiting_locks_spinlock);
181 if (expired_lock_thread.elt_dump) {
182 struct libcfs_debug_msg_data msgdata = {
183 .msg_file = __FILE__,
184 .msg_fn = "waiting_locks_callback",
185 .msg_line = expired_lock_thread.elt_dump };
186 cfs_spin_unlock_bh(&waiting_locks_spinlock);
188 /* from waiting_locks_callback, but not in timer */
189 libcfs_debug_dumplog();
190 libcfs_run_lbug_upcall(&msgdata);
192 cfs_spin_lock_bh(&waiting_locks_spinlock);
193 expired_lock_thread.elt_dump = 0;
198 while (!cfs_list_empty(expired)) {
199 struct obd_export *export;
200 struct ldlm_lock *lock;
202 lock = cfs_list_entry(expired->next, struct ldlm_lock,
204 if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
205 (void *)lock >= LP_POISON) {
206 cfs_spin_unlock_bh(&waiting_locks_spinlock);
207 CERROR("free lock on elt list %p\n", lock);
210 cfs_list_del_init(&lock->l_pending_chain);
211 if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
212 (void *)lock->l_export >= LP_POISON) {
213 CERROR("lock with free export on elt list %p\n",
215 lock->l_export = NULL;
216 LDLM_ERROR(lock, "free export");
217 /* release extra ref grabbed by
218 * ldlm_add_waiting_lock() or
219 * ldlm_failed_ast() */
220 LDLM_LOCK_RELEASE(lock);
223 export = class_export_lock_get(lock->l_export, lock);
224 cfs_spin_unlock_bh(&waiting_locks_spinlock);
227 class_fail_export(export);
228 class_export_lock_put(export, lock);
230 /* release extra ref grabbed by ldlm_add_waiting_lock()
231 * or ldlm_failed_ast() */
232 LDLM_LOCK_RELEASE(lock);
234 cfs_spin_lock_bh(&waiting_locks_spinlock);
236 cfs_spin_unlock_bh(&waiting_locks_spinlock);
238 if (do_dump && obd_dump_on_eviction) {
239 CERROR("dump the log upon eviction\n");
240 libcfs_debug_dumplog();
243 if (expired_lock_thread.elt_state == ELT_TERMINATE)
247 expired_lock_thread.elt_state = ELT_STOPPED;
248 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
252 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
255 * Check if there is a request in the export request list
256 * which prevents the lock canceling.
258 static int ldlm_lock_busy(struct ldlm_lock *lock)
260 struct ptlrpc_request *req;
264 if (lock->l_export == NULL)
267 cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
268 cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
270 if (req->rq_ops->hpreq_lock_match) {
271 match = req->rq_ops->hpreq_lock_match(req, lock);
276 cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
280 /* This is called from within a timer interrupt and cannot schedule */
281 static void waiting_locks_callback(unsigned long unused)
283 struct ldlm_lock *lock;
286 cfs_spin_lock_bh(&waiting_locks_spinlock);
287 while (!cfs_list_empty(&waiting_locks_list)) {
288 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
290 if (cfs_time_after(lock->l_callback_timeout,
291 cfs_time_current()) ||
292 (lock->l_req_mode == LCK_GROUP))
295 if (ptlrpc_check_suspend()) {
296 /* there is a case when we talk to one mds, holding
297 * lock from another mds. this way we easily can get
298 * here, if second mds is being recovered. so, we
299 * suspend timeouts. bug 6019 */
301 LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
302 lock->l_export->exp_client_uuid.uuid,
303 lock->l_export->exp_connection->c_remote_uuid.uuid,
304 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
306 cfs_list_del_init(&lock->l_pending_chain);
307 cfs_spin_unlock_bh(&waiting_locks_spinlock);
308 ldlm_add_waiting_lock(lock);
312 /* if timeout overlaps the activation time of suspended timeouts
313 * then extend it to give a chance for client to reconnect */
314 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
315 cfs_time_seconds(obd_timeout)/2),
316 ptlrpc_suspend_wakeup_time())) {
317 LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
318 lock->l_export->exp_client_uuid.uuid,
319 lock->l_export->exp_connection->c_remote_uuid.uuid,
320 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
322 cfs_list_del_init(&lock->l_pending_chain);
323 cfs_spin_unlock_bh(&waiting_locks_spinlock);
324 ldlm_add_waiting_lock(lock);
328 /* Check if we need to prolong timeout */
329 if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
330 ldlm_lock_busy(lock)) {
333 if (lock->l_pending_chain.next == &waiting_locks_list)
338 cfs_spin_unlock_bh(&waiting_locks_spinlock);
339 LDLM_DEBUG(lock, "prolong the busy lock");
340 ldlm_refresh_waiting_lock(lock,
341 ldlm_get_enq_timeout(lock));
342 cfs_spin_lock_bh(&waiting_locks_spinlock);
345 LDLM_LOCK_RELEASE(lock);
349 LDLM_LOCK_RELEASE(lock);
352 ldlm_lock_to_ns(lock)->ns_timeouts++;
353 LDLM_ERROR(lock, "lock callback timer expired after %lds: "
354 "evicting client at %s ",
355 cfs_time_current_sec()- lock->l_last_activity,
357 lock->l_export->exp_connection->c_peer.nid));
359 /* no needs to take an extra ref on the lock since it was in
360 * the waiting_locks_list and ldlm_add_waiting_lock()
361 * already grabbed a ref */
362 cfs_list_del(&lock->l_pending_chain);
363 cfs_list_add(&lock->l_pending_chain,
364 &expired_lock_thread.elt_expired_locks);
367 if (!cfs_list_empty(&expired_lock_thread.elt_expired_locks)) {
368 if (obd_dump_on_timeout)
369 expired_lock_thread.elt_dump = __LINE__;
371 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
375 * Make sure the timer will fire again if we have any locks
378 if (!cfs_list_empty(&waiting_locks_list)) {
379 cfs_time_t timeout_rounded;
380 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
382 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
383 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
385 cfs_spin_unlock_bh(&waiting_locks_spinlock);
389 * Indicate that we're waiting for a client to call us back cancelling a given
390 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
391 * timer to fire appropriately. (We round up to the next second, to avoid
392 * floods of timer firings during periods of high lock contention and traffic).
393 * As done by ldlm_add_waiting_lock(), the caller must grab a lock reference
394 * if it has been added to the waiting list (1 is returned).
396 * Called with the namespace lock held.
398 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
401 cfs_time_t timeout_rounded;
403 if (!cfs_list_empty(&lock->l_pending_chain))
406 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
407 OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
410 timeout = cfs_time_shift(seconds);
411 if (likely(cfs_time_after(timeout, lock->l_callback_timeout)))
412 lock->l_callback_timeout = timeout;
414 timeout_rounded = round_timeout(lock->l_callback_timeout);
416 if (cfs_time_before(timeout_rounded,
417 cfs_timer_deadline(&waiting_locks_timer)) ||
418 !cfs_timer_is_armed(&waiting_locks_timer)) {
419 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
421 /* if the new lock has a shorter timeout than something earlier on
422 the list, we'll wait the longer amount of time; no big deal. */
424 cfs_list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
428 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
431 int timeout = ldlm_get_enq_timeout(lock);
433 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
435 cfs_spin_lock_bh(&waiting_locks_spinlock);
436 if (lock->l_destroyed) {
437 static cfs_time_t next;
438 cfs_spin_unlock_bh(&waiting_locks_spinlock);
439 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
440 if (cfs_time_after(cfs_time_current(), next)) {
441 next = cfs_time_shift(14400);
442 libcfs_debug_dumpstack(NULL);
447 ret = __ldlm_add_waiting_lock(lock, timeout);
449 /* grab ref on the lock if it has been added to the
453 cfs_spin_unlock_bh(&waiting_locks_spinlock);
456 cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
457 if (cfs_list_empty(&lock->l_exp_list))
458 cfs_list_add(&lock->l_exp_list,
459 &lock->l_export->exp_bl_list);
460 cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
463 LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
464 ret == 0 ? "not re-" : "", timeout,
465 AT_OFF ? "off" : "on");
470 * Remove a lock from the pending list, likely because it had its cancellation
471 * callback arrive without incident. This adjusts the lock-timeout timer if
472 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
473 * As done by ldlm_del_waiting_lock(), the caller must release the lock
474 * reference when the lock is removed from any list (1 is returned).
476 * Called with namespace lock held.
478 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
480 cfs_list_t *list_next;
482 if (cfs_list_empty(&lock->l_pending_chain))
485 list_next = lock->l_pending_chain.next;
486 if (lock->l_pending_chain.prev == &waiting_locks_list) {
487 /* Removing the head of the list, adjust timer. */
488 if (list_next == &waiting_locks_list) {
489 /* No more, just cancel. */
490 cfs_timer_disarm(&waiting_locks_timer);
492 struct ldlm_lock *next;
493 next = cfs_list_entry(list_next, struct ldlm_lock,
495 cfs_timer_arm(&waiting_locks_timer,
496 round_timeout(next->l_callback_timeout));
499 cfs_list_del_init(&lock->l_pending_chain);
504 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
508 if (lock->l_export == NULL) {
509 /* We don't have a "waiting locks list" on clients. */
510 CDEBUG(D_DLMTRACE, "Client lock %p : no-op\n", lock);
514 cfs_spin_lock_bh(&waiting_locks_spinlock);
515 ret = __ldlm_del_waiting_lock(lock);
516 cfs_spin_unlock_bh(&waiting_locks_spinlock);
518 /* remove the lock out of export blocking list */
519 cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
520 cfs_list_del_init(&lock->l_exp_list);
521 cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
524 /* release lock ref if it has indeed been removed
526 LDLM_LOCK_RELEASE(lock);
529 LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
536 * Called with namespace lock held.
538 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
540 if (lock->l_export == NULL) {
541 /* We don't have a "waiting locks list" on clients. */
542 LDLM_DEBUG(lock, "client lock: no-op");
546 cfs_spin_lock_bh(&waiting_locks_spinlock);
548 if (cfs_list_empty(&lock->l_pending_chain)) {
549 cfs_spin_unlock_bh(&waiting_locks_spinlock);
550 LDLM_DEBUG(lock, "wasn't waiting");
554 /* we remove/add the lock to the waiting list, so no needs to
555 * release/take a lock reference */
556 __ldlm_del_waiting_lock(lock);
557 __ldlm_add_waiting_lock(lock, timeout);
558 cfs_spin_unlock_bh(&waiting_locks_spinlock);
560 LDLM_DEBUG(lock, "refreshed");
563 #else /* !__KERNEL__ */
565 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
567 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
571 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
576 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
580 #endif /* __KERNEL__ */
582 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
583 const char *ast_type)
585 LCONSOLE_ERROR_MSG(0x138, "%s: A client on nid %s was evicted due "
586 "to a lock %s callback time out: rc %d\n",
587 lock->l_export->exp_obd->obd_name,
588 obd_export_nid2str(lock->l_export), ast_type, rc);
590 if (obd_dump_on_timeout)
591 libcfs_debug_dumplog();
593 cfs_spin_lock_bh(&waiting_locks_spinlock);
594 if (__ldlm_del_waiting_lock(lock) == 0)
595 /* the lock was not in any list, grab an extra ref before adding
596 * the lock to the expired list */
598 cfs_list_add(&lock->l_pending_chain,
599 &expired_lock_thread.elt_expired_locks);
600 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
601 cfs_spin_unlock_bh(&waiting_locks_spinlock);
603 class_fail_export(lock->l_export);
607 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
608 struct ptlrpc_request *req, int rc,
609 const char *ast_type)
611 lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
613 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
614 LASSERT(lock->l_export);
615 if (lock->l_export->exp_libclient) {
616 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
617 " timeout, just cancelling lock", ast_type,
618 libcfs_nid2str(peer.nid));
619 ldlm_lock_cancel(lock);
621 } else if (lock->l_flags & LDLM_FL_CANCEL) {
622 LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
623 "cancel was received (AST reply lost?)",
624 ast_type, libcfs_nid2str(peer.nid));
625 ldlm_lock_cancel(lock);
628 ldlm_del_waiting_lock(lock);
629 ldlm_failed_ast(lock, rc, ast_type);
633 struct ldlm_resource *res = lock->l_resource;
634 LDLM_DEBUG(lock, "client (nid %s) returned %d"
635 " from %s AST - normal race",
636 libcfs_nid2str(peer.nid),
638 lustre_msg_get_status(req->rq_repmsg) : -1,
641 /* update lvbo to return proper attributes.
643 ldlm_resource_getref(res);
644 ldlm_res_lvbo_update(res, NULL, 1);
645 ldlm_resource_putref(res);
649 LDLM_ERROR(lock, "client (nid %s) returned %d "
650 "from %s AST", libcfs_nid2str(peer.nid),
651 (req->rq_repmsg != NULL) ?
652 lustre_msg_get_status(req->rq_repmsg) : 0,
655 ldlm_lock_cancel(lock);
656 /* Server-side AST functions are called from ldlm_reprocess_all,
657 * which needs to be told to please restart its reprocessing. */
664 static int ldlm_cb_interpret(const struct lu_env *env,
665 struct ptlrpc_request *req, void *data, int rc)
667 struct ldlm_cb_async_args *ca = data;
668 struct ldlm_lock *lock = ca->ca_lock;
669 struct ldlm_cb_set_arg *arg = ca->ca_set_arg;
672 LASSERT(lock != NULL);
674 rc = ldlm_handle_ast_error(lock, req, rc,
675 arg->type == LDLM_BL_CALLBACK
676 ? "blocking" : "completion");
678 cfs_atomic_inc(&arg->restart);
680 LDLM_LOCK_RELEASE(lock);
682 if (cfs_atomic_dec_return(&arg->rpcs) < arg->threshold)
683 cfs_waitq_signal(&arg->waitq);
689 static inline int ldlm_bl_and_cp_ast_tail(struct ptlrpc_request *req,
690 struct ldlm_cb_set_arg *arg,
691 struct ldlm_lock *lock,
697 if (unlikely(instant_cancel)) {
698 rc = ptl_send_rpc(req, 1);
699 ptlrpc_req_finished(req);
701 cfs_atomic_inc(&arg->restart);
704 cfs_atomic_inc(&arg->rpcs);
705 cfs_atomic_inc(&arg->refcount);
706 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
713 * Check if there are requests in the export request list which prevent
714 * the lock canceling and make these requests high priority ones.
716 static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
718 struct ptlrpc_request *req;
721 if (lock->l_export == NULL) {
722 LDLM_DEBUG(lock, "client lock: no-op");
726 cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
727 cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
729 /* Do not process requests that were not yet added to there
730 * incoming queue or were already removed from there for
732 if (!req->rq_hp && !cfs_list_empty(&req->rq_list) &&
733 req->rq_ops->hpreq_lock_match &&
734 req->rq_ops->hpreq_lock_match(req, lock))
735 ptlrpc_hpreq_reorder(req);
737 cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
742 * ->l_blocking_ast() method for server-side locks. This is invoked when newly
743 * enqueued server lock conflicts with given one.
745 * Sends blocking ast rpc to the client owning that lock; arms timeout timer
746 * to wait for client response.
748 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
749 struct ldlm_lock_desc *desc,
750 void *data, int flag)
752 struct ldlm_cb_async_args *ca;
753 struct ldlm_cb_set_arg *arg = data;
754 struct ldlm_request *body;
755 struct ptlrpc_request *req;
756 int instant_cancel = 0;
760 if (flag == LDLM_CB_CANCELING)
761 /* Don't need to do anything here. */
765 LASSERT(data != NULL);
766 if (lock->l_export->exp_obd->obd_recovering != 0) {
767 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
768 ldlm_lock_dump(D_ERROR, lock, 0);
771 ldlm_lock_reorder_req(lock);
773 req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
774 &RQF_LDLM_BL_CALLBACK,
775 LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
779 CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
780 ca = ptlrpc_req_async_args(req);
781 ca->ca_set_arg = arg;
784 req->rq_interpret_reply = ldlm_cb_interpret;
785 req->rq_no_resend = 1;
787 lock_res(lock->l_resource);
788 if (lock->l_granted_mode != lock->l_req_mode) {
789 /* this blocking AST will be communicated as part of the
790 * completion AST instead */
791 unlock_res(lock->l_resource);
792 ptlrpc_req_finished(req);
793 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
797 if (lock->l_destroyed) {
798 /* What's the point? */
799 unlock_res(lock->l_resource);
800 ptlrpc_req_finished(req);
804 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
807 body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
808 body->lock_handle[0] = lock->l_remote_handle;
809 body->lock_desc = *desc;
810 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
812 LDLM_DEBUG(lock, "server preparing blocking AST");
814 ptlrpc_request_set_replen(req);
815 if (instant_cancel) {
816 unlock_res(lock->l_resource);
817 ldlm_lock_cancel(lock);
819 LASSERT(lock->l_granted_mode == lock->l_req_mode);
820 ldlm_add_waiting_lock(lock);
821 unlock_res(lock->l_resource);
824 req->rq_send_state = LUSTRE_IMP_FULL;
825 /* ptlrpc_request_alloc_pack already set timeout */
827 req->rq_timeout = ldlm_get_rq_timeout();
829 if (lock->l_export && lock->l_export->exp_nid_stats &&
830 lock->l_export->exp_nid_stats->nid_ldlm_stats)
831 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
832 LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
834 rc = ldlm_bl_and_cp_ast_tail(req, arg, lock, instant_cancel);
839 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
841 struct ldlm_cb_set_arg *arg = data;
842 struct ldlm_request *body;
843 struct ptlrpc_request *req;
844 struct ldlm_cb_async_args *ca;
845 long total_enqueue_wait;
846 int instant_cancel = 0;
850 LASSERT(lock != NULL);
851 LASSERT(data != NULL);
853 total_enqueue_wait = cfs_time_sub(cfs_time_current_sec(),
854 lock->l_last_activity);
856 req = ptlrpc_request_alloc(lock->l_export->exp_imp_reverse,
857 &RQF_LDLM_CP_CALLBACK);
861 /* server namespace, doesn't need lock */
862 if (lock->l_resource->lr_lvb_len) {
863 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT,
864 lock->l_resource->lr_lvb_len);
867 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
869 ptlrpc_request_free(req);
873 CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
874 ca = ptlrpc_req_async_args(req);
875 ca->ca_set_arg = arg;
878 req->rq_interpret_reply = ldlm_cb_interpret;
879 req->rq_no_resend = 1;
880 body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
882 body->lock_handle[0] = lock->l_remote_handle;
883 body->lock_flags = flags;
884 ldlm_lock2desc(lock, &body->lock_desc);
885 if (lock->l_resource->lr_lvb_len) {
886 void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
888 lock_res(lock->l_resource);
889 memcpy(lvb, lock->l_resource->lr_lvb_data,
890 lock->l_resource->lr_lvb_len);
891 unlock_res(lock->l_resource);
894 LDLM_DEBUG(lock, "server preparing completion AST (after %lds wait)",
897 /* Server-side enqueue wait time estimate, used in
898 __ldlm_add_waiting_lock to set future enqueue timers */
899 if (total_enqueue_wait < ldlm_get_enq_timeout(lock))
900 at_measured(ldlm_lock_to_ns_at(lock),
903 /* bz18618. Don't add lock enqueue time we spend waiting for a
904 previous callback to fail. Locks waiting legitimately will
905 get extended by ldlm_refresh_waiting_lock regardless of the
906 estimate, so it's okay to underestimate here. */
907 LDLM_DEBUG(lock, "lock completed after %lus; estimate was %ds. "
908 "It is likely that a previous callback timed out.",
910 at_get(ldlm_lock_to_ns_at(lock)));
912 ptlrpc_request_set_replen(req);
914 req->rq_send_state = LUSTRE_IMP_FULL;
915 /* ptlrpc_request_pack already set timeout */
917 req->rq_timeout = ldlm_get_rq_timeout();
919 /* We only send real blocking ASTs after the lock is granted */
920 lock_res_and_lock(lock);
921 if (lock->l_flags & LDLM_FL_AST_SENT) {
922 body->lock_flags |= LDLM_FL_AST_SENT;
923 /* copy ast flags like LDLM_FL_DISCARD_DATA */
924 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
926 /* We might get here prior to ldlm_handle_enqueue setting
927 * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
928 * into waiting list, but this is safe and similar code in
929 * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
930 * that would not only cancel the lock, but will also remove
931 * it from waiting list */
932 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
933 unlock_res_and_lock(lock);
934 ldlm_lock_cancel(lock);
936 lock_res_and_lock(lock);
938 /* start the lock-timeout clock */
939 ldlm_add_waiting_lock(lock);
942 unlock_res_and_lock(lock);
944 if (lock->l_export && lock->l_export->exp_nid_stats &&
945 lock->l_export->exp_nid_stats->nid_ldlm_stats)
946 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
947 LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
949 rc = ldlm_bl_and_cp_ast_tail(req, arg, lock, instant_cancel);
954 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
956 struct ldlm_resource *res = lock->l_resource;
957 struct ldlm_request *body;
958 struct ptlrpc_request *req;
962 LASSERT(lock != NULL);
964 req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
965 &RQF_LDLM_GL_CALLBACK,
966 LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK);
971 body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
972 body->lock_handle[0] = lock->l_remote_handle;
973 ldlm_lock2desc(lock, &body->lock_desc);
975 /* server namespace, doesn't need lock */
976 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
977 lock->l_resource->lr_lvb_len);
978 res = lock->l_resource;
979 ptlrpc_request_set_replen(req);
982 req->rq_send_state = LUSTRE_IMP_FULL;
983 /* ptlrpc_request_alloc_pack already set timeout */
985 req->rq_timeout = ldlm_get_rq_timeout();
987 if (lock->l_export && lock->l_export->exp_nid_stats &&
988 lock->l_export->exp_nid_stats->nid_ldlm_stats)
989 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
990 LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
992 rc = ptlrpc_queue_wait(req);
993 /* Update the LVB from disk if the AST failed (this is a legal race)
995 * - Glimpse callback of local lock just return -ELDLM_NO_LOCK_DATA.
996 * - Glimpse callback of remote lock might return -ELDLM_NO_LOCK_DATA
997 * when inode is cleared. LU-274
999 if (rc == -ELDLM_NO_LOCK_DATA) {
1000 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
1001 ldlm_res_lvbo_update(res, NULL, 1);
1002 } else if (rc != 0) {
1003 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
1005 rc = ldlm_res_lvbo_update(res, req, 1);
1008 ptlrpc_req_finished(req);
1009 if (rc == -ERESTART)
1010 ldlm_reprocess_all(res);
1016 extern unsigned long long lu_time_stamp_get(void);
1018 #define lu_time_stamp_get() time(NULL)
1021 static void ldlm_svc_get_eopc(const struct ldlm_request *dlm_req,
1022 struct lprocfs_stats *srv_stats)
1024 int lock_type = 0, op = 0;
1026 lock_type = dlm_req->lock_desc.l_resource.lr_type;
1028 switch (lock_type) {
1030 op = PTLRPC_LAST_CNTR + LDLM_PLAIN_ENQUEUE;
1033 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT)
1034 op = PTLRPC_LAST_CNTR + LDLM_GLIMPSE_ENQUEUE;
1036 op = PTLRPC_LAST_CNTR + LDLM_EXTENT_ENQUEUE;
1039 op = PTLRPC_LAST_CNTR + LDLM_FLOCK_ENQUEUE;
1042 op = PTLRPC_LAST_CNTR + LDLM_IBITS_ENQUEUE;
1050 lprocfs_counter_incr(srv_stats, op);
1056 * Main server-side entry point into LDLM. This is called by ptlrpc service
1057 * threads to carry out client lock enqueueing requests.
1059 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
1060 struct ptlrpc_request *req,
1061 const struct ldlm_request *dlm_req,
1062 const struct ldlm_callback_suite *cbs)
1064 struct ldlm_reply *dlm_rep;
1066 ldlm_error_t err = ELDLM_OK;
1067 struct ldlm_lock *lock = NULL;
1068 void *cookie = NULL;
1072 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
1074 ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
1075 flags = dlm_req->lock_flags;
1077 LASSERT(req->rq_export);
1079 if (req->rq_rqbd->rqbd_service->srv_stats)
1080 ldlm_svc_get_eopc(dlm_req,
1081 req->rq_rqbd->rqbd_service->srv_stats);
1083 if (req->rq_export && req->rq_export->exp_nid_stats &&
1084 req->rq_export->exp_nid_stats->nid_ldlm_stats)
1085 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1086 LDLM_ENQUEUE - LDLM_FIRST_OPC);
1088 if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
1089 dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
1090 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
1091 dlm_req->lock_desc.l_resource.lr_type);
1092 GOTO(out, rc = -EFAULT);
1095 if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
1096 dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
1097 dlm_req->lock_desc.l_req_mode &
1098 (dlm_req->lock_desc.l_req_mode-1))) {
1099 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
1100 dlm_req->lock_desc.l_req_mode);
1101 GOTO(out, rc = -EFAULT);
1104 if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
1105 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1107 DEBUG_REQ(D_ERROR, req,
1108 "PLAIN lock request from IBITS client?");
1109 GOTO(out, rc = -EPROTO);
1111 } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1113 DEBUG_REQ(D_ERROR, req,
1114 "IBITS lock request from unaware client?");
1115 GOTO(out, rc = -EPROTO);
1119 /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
1120 against server's _CONNECT_SUPPORTED flags? (I don't want to use
1121 ibits for mgc/mgs) */
1123 /* INODEBITS_INTEROP: Perform conversion from plain lock to
1124 * inodebits lock if client does not support them. */
1125 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
1126 (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
1127 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
1128 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
1129 MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
1130 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
1131 dlm_req->lock_desc.l_req_mode = LCK_CR;
1135 if (unlikely(flags & LDLM_FL_REPLAY)) {
1136 /* Find an existing lock in the per-export lock hash */
1137 lock = cfs_hash_lookup(req->rq_export->exp_lock_hash,
1138 (void *)&dlm_req->lock_handle[0]);
1140 DEBUG_REQ(D_DLMTRACE, req, "found existing lock cookie "
1141 LPX64, lock->l_handle.h_cookie);
1142 GOTO(existing_lock, rc = 0);
1146 /* The lock's callback data might be set in the policy function */
1147 lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
1148 dlm_req->lock_desc.l_resource.lr_type,
1149 dlm_req->lock_desc.l_req_mode,
1153 GOTO(out, rc = -ENOMEM);
1155 lock->l_last_activity = cfs_time_current_sec();
1156 lock->l_remote_handle = dlm_req->lock_handle[0];
1157 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
1159 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
1160 /* Don't enqueue a lock onto the export if it is been disonnected
1161 * due to eviction (bug 3822) or server umount (bug 24324).
1162 * Cancel it now instead. */
1163 if (req->rq_export->exp_disconnected) {
1164 LDLM_ERROR(lock, "lock on disconnected export %p",
1166 GOTO(out, rc = -ENOTCONN);
1169 lock->l_export = class_export_lock_get(req->rq_export, lock);
1170 if (lock->l_export->exp_lock_hash)
1171 cfs_hash_add(lock->l_export->exp_lock_hash,
1172 &lock->l_remote_handle,
1177 if (flags & LDLM_FL_HAS_INTENT) {
1178 /* In this case, the reply buffer is allocated deep in
1179 * local_lock_enqueue by the policy function. */
1182 /* based on the assumption that lvb size never changes during
1183 * resource life time otherwise it need resource->lr_lock's
1185 if (lock->l_resource->lr_lvb_len) {
1186 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
1188 lock->l_resource->lr_lvb_len);
1191 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
1192 GOTO(out, rc = -ENOMEM);
1194 rc = req_capsule_server_pack(&req->rq_pill);
1199 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
1200 ldlm_convert_policy_to_local(
1201 dlm_req->lock_desc.l_resource.lr_type,
1202 &dlm_req->lock_desc.l_policy_data,
1203 &lock->l_policy_data);
1204 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
1205 lock->l_req_extent = lock->l_policy_data.l_extent;
1207 err = ldlm_lock_enqueue(ns, &lock, cookie, (int *)&flags);
1211 dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1212 dlm_rep->lock_flags = flags;
1214 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
1215 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
1217 /* We never send a blocking AST until the lock is granted, but
1218 * we can tell it right now */
1219 lock_res_and_lock(lock);
1221 /* Now take into account flags to be inherited from original lock
1222 request both in reply to client and in our own lock flags. */
1223 dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1224 lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1226 /* Don't move a pending lock onto the export if it has already been
1227 * disconnected due to eviction (bug 5683) or server umount (bug 24324).
1228 * Cancel it now instead. */
1229 if (unlikely(req->rq_export->exp_disconnected ||
1230 OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
1231 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
1233 } else if (lock->l_flags & LDLM_FL_AST_SENT) {
1234 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
1235 if (lock->l_granted_mode == lock->l_req_mode) {
1237 * Only cancel lock if it was granted, because it would
1238 * be destroyed immediately and would never be granted
1239 * in the future, causing timeouts on client. Not
1240 * granted lock will be cancelled immediately after
1241 * sending completion AST.
1243 if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
1244 unlock_res_and_lock(lock);
1245 ldlm_lock_cancel(lock);
1246 lock_res_and_lock(lock);
1248 ldlm_add_waiting_lock(lock);
1251 /* Make sure we never ever grant usual metadata locks to liblustre
1253 if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
1254 dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
1255 req->rq_export->exp_libclient) {
1256 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
1257 !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
1258 CERROR("Granting sync lock to libclient. "
1259 "req fl %d, rep fl %d, lock fl "LPX64"\n",
1260 dlm_req->lock_flags, dlm_rep->lock_flags,
1262 LDLM_ERROR(lock, "sync lock");
1263 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
1264 struct ldlm_intent *it;
1266 it = req_capsule_client_get(&req->rq_pill,
1269 CERROR("This is intent %s ("LPU64")\n",
1270 ldlm_it2str(it->opc), it->opc);
1276 unlock_res_and_lock(lock);
1280 req->rq_status = rc ?: err; /* return either error - bug 11190 */
1281 if (!req->rq_packed_final) {
1282 err = lustre_pack_reply(req, 1, NULL, NULL);
1287 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1288 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
1290 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
1291 "(err=%d, rc=%d)", err, rc);
1294 if (lock->l_resource->lr_lvb_len > 0) {
1295 /* MDT path won't handle lr_lvb_data, so
1296 * lock/unlock better be contained in the
1300 lvb = req_capsule_server_get(&req->rq_pill,
1302 LASSERTF(lvb != NULL, "req %p, lock %p\n",
1304 lock_res(lock->l_resource);
1305 memcpy(lvb, lock->l_resource->lr_lvb_data,
1306 lock->l_resource->lr_lvb_len);
1307 unlock_res(lock->l_resource);
1310 lock_res_and_lock(lock);
1311 ldlm_resource_unlink_lock(lock);
1312 ldlm_lock_destroy_nolock(lock);
1313 unlock_res_and_lock(lock);
1316 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1317 ldlm_reprocess_all(lock->l_resource);
1319 LDLM_LOCK_RELEASE(lock);
1322 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1328 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1329 ldlm_completion_callback completion_callback,
1330 ldlm_blocking_callback blocking_callback,
1331 ldlm_glimpse_callback glimpse_callback)
1333 struct ldlm_request *dlm_req;
1334 struct ldlm_callback_suite cbs = {
1335 .lcs_completion = completion_callback,
1336 .lcs_blocking = blocking_callback,
1337 .lcs_glimpse = glimpse_callback
1341 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1342 if (dlm_req != NULL) {
1343 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1344 req, dlm_req, &cbs);
1351 int ldlm_handle_convert0(struct ptlrpc_request *req,
1352 const struct ldlm_request *dlm_req)
1354 struct ldlm_reply *dlm_rep;
1355 struct ldlm_lock *lock;
1359 if (req->rq_export && req->rq_export->exp_nid_stats &&
1360 req->rq_export->exp_nid_stats->nid_ldlm_stats)
1361 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1362 LDLM_CONVERT - LDLM_FIRST_OPC);
1364 rc = req_capsule_server_pack(&req->rq_pill);
1368 dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1369 dlm_rep->lock_flags = dlm_req->lock_flags;
1371 lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1373 req->rq_status = EINVAL;
1377 LDLM_DEBUG(lock, "server-side convert handler START");
1379 lock->l_last_activity = cfs_time_current_sec();
1380 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1381 &dlm_rep->lock_flags);
1383 if (ldlm_del_waiting_lock(lock))
1384 LDLM_DEBUG(lock, "converted waiting lock");
1387 req->rq_status = EDEADLOCK;
1392 if (!req->rq_status)
1393 ldlm_reprocess_all(lock->l_resource);
1394 LDLM_DEBUG(lock, "server-side convert handler END");
1395 LDLM_LOCK_PUT(lock);
1397 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1402 int ldlm_handle_convert(struct ptlrpc_request *req)
1405 struct ldlm_request *dlm_req;
1407 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1408 if (dlm_req != NULL) {
1409 rc = ldlm_handle_convert0(req, dlm_req);
1411 CERROR ("Can't unpack dlm_req\n");
1417 /* Cancel all the locks whos handles are packed into ldlm_request */
1418 int ldlm_request_cancel(struct ptlrpc_request *req,
1419 const struct ldlm_request *dlm_req, int first)
1421 struct ldlm_resource *res, *pres = NULL;
1422 struct ldlm_lock *lock;
1423 int i, count, done = 0;
1426 count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1430 /* There is no lock on the server at the replay time,
1431 * skip lock cancelling to make replay tests to pass. */
1432 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1435 LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
1436 "starting at %d", count, first);
1438 for (i = first; i < count; i++) {
1439 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1441 LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1442 "lock (cookie "LPU64")",
1443 dlm_req->lock_handle[i].cookie);
1447 res = lock->l_resource;
1452 ldlm_reprocess_all(pres);
1453 LDLM_RESOURCE_DELREF(pres);
1454 ldlm_resource_putref(pres);
1457 ldlm_resource_getref(res);
1458 LDLM_RESOURCE_ADDREF(res);
1459 ldlm_res_lvbo_update(res, NULL, 1);
1463 ldlm_lock_cancel(lock);
1464 LDLM_LOCK_PUT(lock);
1467 ldlm_reprocess_all(pres);
1468 LDLM_RESOURCE_DELREF(pres);
1469 ldlm_resource_putref(pres);
1471 LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1475 int ldlm_handle_cancel(struct ptlrpc_request *req)
1477 struct ldlm_request *dlm_req;
1481 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1482 if (dlm_req == NULL) {
1483 CDEBUG(D_INFO, "bad request buffer for cancel\n");
1487 if (req->rq_export && req->rq_export->exp_nid_stats &&
1488 req->rq_export->exp_nid_stats->nid_ldlm_stats)
1489 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1490 LDLM_CANCEL - LDLM_FIRST_OPC);
1492 rc = req_capsule_server_pack(&req->rq_pill);
1496 if (!ldlm_request_cancel(req, dlm_req, 0))
1497 req->rq_status = ESTALE;
1499 RETURN(ptlrpc_reply(req));
1502 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1503 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1508 LDLM_DEBUG(lock, "client blocking AST callback handler");
1510 lock_res_and_lock(lock);
1511 lock->l_flags |= LDLM_FL_CBPENDING;
1513 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1514 lock->l_flags |= LDLM_FL_CANCEL;
1516 do_ast = (!lock->l_readers && !lock->l_writers);
1517 unlock_res_and_lock(lock);
1520 CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
1521 lock, lock->l_blocking_ast);
1522 if (lock->l_blocking_ast != NULL)
1523 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1526 CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
1530 LDLM_DEBUG(lock, "client blocking callback handler END");
1531 LDLM_LOCK_RELEASE(lock);
1535 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1536 struct ldlm_namespace *ns,
1537 struct ldlm_request *dlm_req,
1538 struct ldlm_lock *lock)
1540 CFS_LIST_HEAD(ast_list);
1543 LDLM_DEBUG(lock, "client completion callback handler START");
1545 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
1546 int to = cfs_time_seconds(1);
1548 cfs_schedule_timeout_and_set_state(
1549 CFS_TASK_INTERRUPTIBLE, to);
1550 if (lock->l_granted_mode == lock->l_req_mode ||
1556 lock_res_and_lock(lock);
1557 if (lock->l_destroyed ||
1558 lock->l_granted_mode == lock->l_req_mode) {
1559 /* bug 11300: the lock has already been granted */
1560 unlock_res_and_lock(lock);
1561 LDLM_DEBUG(lock, "Double grant race happened");
1562 LDLM_LOCK_RELEASE(lock);
1567 /* If we receive the completion AST before the actual enqueue returned,
1568 * then we might need to switch lock modes, resources, or extents. */
1569 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1570 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1571 LDLM_DEBUG(lock, "completion AST, new lock mode");
1574 if (lock->l_resource->lr_type != LDLM_PLAIN) {
1575 ldlm_convert_policy_to_local(
1576 dlm_req->lock_desc.l_resource.lr_type,
1577 &dlm_req->lock_desc.l_policy_data,
1578 &lock->l_policy_data);
1579 LDLM_DEBUG(lock, "completion AST, new policy data");
1582 ldlm_resource_unlink_lock(lock);
1583 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1584 &lock->l_resource->lr_name,
1585 sizeof(lock->l_resource->lr_name)) != 0) {
1586 unlock_res_and_lock(lock);
1587 if (ldlm_lock_change_resource(ns, lock,
1588 &dlm_req->lock_desc.l_resource.lr_name) != 0) {
1589 LDLM_ERROR(lock, "Failed to allocate resource");
1590 LDLM_LOCK_RELEASE(lock);
1594 LDLM_DEBUG(lock, "completion AST, new resource");
1595 CERROR("change resource!\n");
1596 lock_res_and_lock(lock);
1599 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1600 /* BL_AST locks are not needed in lru.
1601 * let ldlm_cancel_lru() be fast. */
1602 ldlm_lock_remove_from_lru(lock);
1603 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
1604 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1607 if (lock->l_lvb_len) {
1608 if (req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
1609 RCL_CLIENT) < lock->l_lvb_len) {
1610 LDLM_ERROR(lock, "completion AST did not contain "
1613 void *lvb = req_capsule_client_get(&req->rq_pill,
1615 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1619 ldlm_grant_lock(lock, &ast_list);
1620 unlock_res_and_lock(lock);
1622 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1624 /* Let Enqueue to call osc_lock_upcall() and initialize
1626 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
1628 ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
1630 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1632 LDLM_LOCK_RELEASE(lock);
1636 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1637 struct ldlm_namespace *ns,
1638 struct ldlm_request *dlm_req,
1639 struct ldlm_lock *lock)
1644 LDLM_DEBUG(lock, "client glimpse AST callback handler");
1646 if (lock->l_glimpse_ast != NULL)
1647 rc = lock->l_glimpse_ast(lock, req);
1649 if (req->rq_repmsg != NULL) {
1652 req->rq_status = rc;
1656 lock_res_and_lock(lock);
1657 if (lock->l_granted_mode == LCK_PW &&
1658 !lock->l_readers && !lock->l_writers &&
1659 cfs_time_after(cfs_time_current(),
1660 cfs_time_add(lock->l_last_used,
1661 cfs_time_seconds(10)))) {
1662 unlock_res_and_lock(lock);
1663 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
1664 ldlm_handle_bl_callback(ns, NULL, lock);
1669 unlock_res_and_lock(lock);
1670 LDLM_LOCK_RELEASE(lock);
1674 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1676 if (req->rq_no_reply)
1679 req->rq_status = rc;
1680 if (!req->rq_packed_final) {
1681 rc = lustre_pack_reply(req, 1, NULL, NULL);
1685 return ptlrpc_reply(req);
1689 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode)
1691 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1694 cfs_spin_lock(&blp->blp_lock);
1695 if (blwi->blwi_lock && blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
1696 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
1697 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
1699 /* other blocking callbacks are added to the regular list */
1700 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1702 cfs_spin_unlock(&blp->blp_lock);
1704 cfs_waitq_signal(&blp->blp_waitq);
1706 /* can not use blwi->blwi_mode as blwi could be already freed in
1708 if (mode == LDLM_SYNC)
1709 cfs_wait_for_completion(&blwi->blwi_comp);
1714 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
1715 struct ldlm_namespace *ns,
1716 struct ldlm_lock_desc *ld,
1717 cfs_list_t *cancels, int count,
1718 struct ldlm_lock *lock,
1721 cfs_init_completion(&blwi->blwi_comp);
1722 CFS_INIT_LIST_HEAD(&blwi->blwi_head);
1724 if (cfs_memory_pressure_get())
1725 blwi->blwi_mem_pressure = 1;
1728 blwi->blwi_mode = mode;
1730 blwi->blwi_ld = *ld;
1732 cfs_list_add(&blwi->blwi_head, cancels);
1733 cfs_list_del_init(cancels);
1734 blwi->blwi_count = count;
1736 blwi->blwi_lock = lock;
1740 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
1741 struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
1742 cfs_list_t *cancels, int count, int mode)
1746 if (cancels && count == 0)
1749 if (mode == LDLM_SYNC) {
1750 /* if it is synchronous call do minimum mem alloc, as it could
1751 * be triggered from kernel shrinker
1753 struct ldlm_bl_work_item blwi;
1754 memset(&blwi, 0, sizeof(blwi));
1755 init_blwi(&blwi, ns, ld, cancels, count, lock, LDLM_SYNC);
1756 RETURN(__ldlm_bl_to_thread(&blwi, LDLM_SYNC));
1758 struct ldlm_bl_work_item *blwi;
1759 OBD_ALLOC(blwi, sizeof(*blwi));
1762 init_blwi(blwi, ns, ld, cancels, count, lock, LDLM_ASYNC);
1764 RETURN(__ldlm_bl_to_thread(blwi, LDLM_ASYNC));
1770 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1771 struct ldlm_lock *lock)
1774 RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LDLM_ASYNC));
1780 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1781 cfs_list_t *cancels, int count, int mode)
1784 RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count, mode));
1790 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
1791 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
1793 struct obd_device *obd = req->rq_export->exp_obd;
1800 DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
1802 req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
1804 key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1806 DEBUG_REQ(D_IOCTL, req, "no set_info key");
1809 keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
1811 val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1813 DEBUG_REQ(D_IOCTL, req, "no set_info val");
1816 vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
1819 /* We are responsible for swabbing contents of val */
1821 if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
1822 /* Pass it on to mdc (the "export" in this case) */
1823 rc = obd_set_info_async(req->rq_export,
1824 sizeof(KEY_HSM_COPYTOOL_SEND),
1825 KEY_HSM_COPYTOOL_SEND,
1828 DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
1833 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
1834 const char *msg, int rc,
1835 struct lustre_handle *handle)
1837 DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
1838 "%s: [nid %s] [rc %d] [lock "LPX64"]",
1839 msg, libcfs_id2str(req->rq_peer), rc,
1840 handle ? handle->cookie : 0);
1841 if (req->rq_no_reply)
1842 CWARN("No reply was sent, maybe cause bug 21636.\n");
1844 CWARN("Send reply failed, maybe cause bug 21636.\n");
1847 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
1848 static int ldlm_callback_handler(struct ptlrpc_request *req)
1850 struct ldlm_namespace *ns;
1851 struct ldlm_request *dlm_req;
1852 struct ldlm_lock *lock;
1856 /* Requests arrive in sender's byte order. The ptlrpc service
1857 * handler has already checked and, if necessary, byte-swapped the
1858 * incoming request message body, but I am responsible for the
1859 * message buffers. */
1861 /* do nothing for sec context finalize */
1862 if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
1865 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
1867 if (req->rq_export == NULL) {
1868 rc = ldlm_callback_reply(req, -ENOTCONN);
1869 ldlm_callback_errmsg(req, "Operate on unconnected server",
1874 LASSERT(req->rq_export != NULL);
1875 LASSERT(req->rq_export->exp_obd != NULL);
1877 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1878 case LDLM_BL_CALLBACK:
1879 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK))
1882 case LDLM_CP_CALLBACK:
1883 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK))
1886 case LDLM_GL_CALLBACK:
1887 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK))
1891 rc = ldlm_handle_setinfo(req);
1892 ldlm_callback_reply(req, rc);
1894 case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
1895 CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
1896 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
1897 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
1899 rc = llog_origin_handle_cancel(req);
1900 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
1902 ldlm_callback_reply(req, rc);
1904 case OBD_QC_CALLBACK:
1905 req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
1906 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
1908 rc = target_handle_qc_callback(req);
1909 ldlm_callback_reply(req, rc);
1913 /* reply in handler */
1914 req_capsule_set(&req->rq_pill, &RQF_MDS_QUOTA_DQACQ);
1915 rc = target_handle_dqacq_callback(req);
1917 case LLOG_ORIGIN_HANDLE_CREATE:
1918 req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
1919 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1921 rc = llog_origin_handle_create(req);
1922 ldlm_callback_reply(req, rc);
1924 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1925 req_capsule_set(&req->rq_pill,
1926 &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
1927 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1929 rc = llog_origin_handle_next_block(req);
1930 ldlm_callback_reply(req, rc);
1932 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1933 req_capsule_set(&req->rq_pill,
1934 &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
1935 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1937 rc = llog_origin_handle_read_header(req);
1938 ldlm_callback_reply(req, rc);
1940 case LLOG_ORIGIN_HANDLE_CLOSE:
1941 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1943 rc = llog_origin_handle_close(req);
1944 ldlm_callback_reply(req, rc);
1947 CERROR("unknown opcode %u\n",
1948 lustre_msg_get_opc(req->rq_reqmsg));
1949 ldlm_callback_reply(req, -EPROTO);
1953 ns = req->rq_export->exp_obd->obd_namespace;
1954 LASSERT(ns != NULL);
1956 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
1958 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1959 if (dlm_req == NULL) {
1960 rc = ldlm_callback_reply(req, -EPROTO);
1961 ldlm_callback_errmsg(req, "Operate without parameter", rc,
1966 /* Force a known safe race, send a cancel to the server for a lock
1967 * which the server has already started a blocking callback on. */
1968 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
1969 lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
1970 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0]);
1972 CERROR("ldlm_cli_cancel: %d\n", rc);
1975 lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
1977 CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
1978 "disappeared\n", dlm_req->lock_handle[0].cookie);
1979 rc = ldlm_callback_reply(req, -EINVAL);
1980 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
1981 &dlm_req->lock_handle[0]);
1985 if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
1986 lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
1987 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
1989 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1990 lock_res_and_lock(lock);
1991 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1992 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
1993 /* If somebody cancels lock and cache is already dropped,
1994 * or lock is failed before cp_ast received on client,
1995 * we can tell the server we have no lock. Otherwise, we
1996 * should send cancel after dropping the cache. */
1997 if (((lock->l_flags & LDLM_FL_CANCELING) &&
1998 (lock->l_flags & LDLM_FL_BL_DONE)) ||
1999 (lock->l_flags & LDLM_FL_FAILED)) {
2000 LDLM_DEBUG(lock, "callback on lock "
2001 LPX64" - lock disappeared\n",
2002 dlm_req->lock_handle[0].cookie);
2003 unlock_res_and_lock(lock);
2004 LDLM_LOCK_RELEASE(lock);
2005 rc = ldlm_callback_reply(req, -EINVAL);
2006 ldlm_callback_errmsg(req, "Operate on stale lock", rc,
2007 &dlm_req->lock_handle[0]);
2010 /* BL_AST locks are not needed in lru.
2011 * let ldlm_cancel_lru() be fast. */
2012 ldlm_lock_remove_from_lru(lock);
2013 lock->l_flags |= LDLM_FL_BL_AST;
2015 unlock_res_and_lock(lock);
2017 /* We want the ost thread to get this reply so that it can respond
2018 * to ost requests (write cache writeback) that might be triggered
2021 * But we'd also like to be able to indicate in the reply that we're
2022 * cancelling right now, because it's unused, or have an intent result
2023 * in the reply, so we might have to push the responsibility for sending
2024 * the reply down into the AST handlers, alas. */
2026 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2027 case LDLM_BL_CALLBACK:
2028 CDEBUG(D_INODE, "blocking ast\n");
2029 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
2030 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
2031 rc = ldlm_callback_reply(req, 0);
2032 if (req->rq_no_reply || rc)
2033 ldlm_callback_errmsg(req, "Normal process", rc,
2034 &dlm_req->lock_handle[0]);
2036 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
2037 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
2039 case LDLM_CP_CALLBACK:
2040 CDEBUG(D_INODE, "completion ast\n");
2041 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
2042 ldlm_callback_reply(req, 0);
2043 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
2045 case LDLM_GL_CALLBACK:
2046 CDEBUG(D_INODE, "glimpse ast\n");
2047 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
2048 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
2051 LBUG(); /* checked above */
2057 static int ldlm_cancel_handler(struct ptlrpc_request *req)
2062 /* Requests arrive in sender's byte order. The ptlrpc service
2063 * handler has already checked and, if necessary, byte-swapped the
2064 * incoming request message body, but I am responsible for the
2065 * message buffers. */
2067 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2069 if (req->rq_export == NULL) {
2070 struct ldlm_request *dlm_req;
2072 CERROR("%s from %s arrived at %lu with bad export cookie "
2074 ll_opcode2str(lustre_msg_get_opc(req->rq_reqmsg)),
2075 libcfs_nid2str(req->rq_peer.nid),
2076 req->rq_arrival_time.tv_sec,
2077 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
2079 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_CANCEL) {
2080 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2081 dlm_req = req_capsule_client_get(&req->rq_pill,
2083 if (dlm_req != NULL)
2084 ldlm_lock_dump_handle(D_ERROR,
2085 &dlm_req->lock_handle[0]);
2087 ldlm_callback_reply(req, -ENOTCONN);
2091 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2093 /* XXX FIXME move this back to mds/handler.c, bug 249 */
2095 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2096 CDEBUG(D_INODE, "cancel\n");
2097 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL))
2099 rc = ldlm_handle_cancel(req);
2103 case OBD_LOG_CANCEL:
2104 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
2105 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
2107 rc = llog_origin_handle_cancel(req);
2108 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
2110 ldlm_callback_reply(req, rc);
2113 CERROR("invalid opcode %d\n",
2114 lustre_msg_get_opc(req->rq_reqmsg));
2115 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2116 ldlm_callback_reply(req, -EINVAL);
2122 static int ldlm_cancel_hpreq_lock_match(struct ptlrpc_request *req,
2123 struct ldlm_lock *lock)
2125 struct ldlm_request *dlm_req;
2126 struct lustre_handle lockh;
2131 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2132 if (dlm_req == NULL)
2135 ldlm_lock2handle(lock, &lockh);
2136 for (i = 0; i < dlm_req->lock_count; i++) {
2137 if (lustre_handle_equal(&dlm_req->lock_handle[i],
2139 DEBUG_REQ(D_RPCTRACE, req,
2140 "Prio raised by lock "LPX64".", lockh.cookie);
2151 static int ldlm_cancel_hpreq_check(struct ptlrpc_request *req)
2153 struct ldlm_request *dlm_req;
2158 /* no prolong in recovery */
2159 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
2162 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2163 if (dlm_req == NULL)
2166 for (i = 0; i < dlm_req->lock_count; i++) {
2167 struct ldlm_lock *lock;
2169 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
2173 rc = !!(lock->l_flags & LDLM_FL_AST_SENT);
2175 LDLM_DEBUG(lock, "hpreq cancel lock");
2176 LDLM_LOCK_PUT(lock);
2185 static struct ptlrpc_hpreq_ops ldlm_cancel_hpreq_ops = {
2186 .hpreq_lock_match = ldlm_cancel_hpreq_lock_match,
2187 .hpreq_check = ldlm_cancel_hpreq_check
2190 static int ldlm_hpreq_handler(struct ptlrpc_request *req)
2194 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2196 if (req->rq_export == NULL)
2199 if (LDLM_CANCEL == lustre_msg_get_opc(req->rq_reqmsg)) {
2200 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2201 req->rq_ops = &ldlm_cancel_hpreq_ops;
2206 int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
2207 cfs_hlist_node_t *hnode, void *data)
2210 cfs_list_t *rpc_list = data;
2211 struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
2213 lock_res_and_lock(lock);
2215 if (lock->l_req_mode != lock->l_granted_mode) {
2216 unlock_res_and_lock(lock);
2220 LASSERT(lock->l_resource);
2221 if (lock->l_resource->lr_type != LDLM_IBITS &&
2222 lock->l_resource->lr_type != LDLM_PLAIN) {
2223 unlock_res_and_lock(lock);
2227 if (lock->l_flags & LDLM_FL_AST_SENT) {
2228 unlock_res_and_lock(lock);
2232 LASSERT(lock->l_blocking_ast);
2233 LASSERT(!lock->l_blocking_lock);
2235 lock->l_flags |= LDLM_FL_AST_SENT;
2236 if (lock->l_export && lock->l_export->exp_lock_hash &&
2237 !cfs_hlist_unhashed(&lock->l_exp_hash))
2238 cfs_hash_del(lock->l_export->exp_lock_hash,
2239 &lock->l_remote_handle, &lock->l_exp_hash);
2240 cfs_list_add_tail(&lock->l_rk_ast, rpc_list);
2241 LDLM_LOCK_GET(lock);
2243 unlock_res_and_lock(lock);
2247 void ldlm_revoke_export_locks(struct obd_export *exp)
2249 cfs_list_t rpc_list;
2252 CFS_INIT_LIST_HEAD(&rpc_list);
2253 cfs_hash_for_each_empty(exp->exp_lock_hash,
2254 ldlm_revoke_lock_cb, &rpc_list);
2255 ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
2256 LDLM_WORK_REVOKE_AST);
2262 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
2264 struct ldlm_bl_work_item *blwi = NULL;
2265 static unsigned int num_bl = 0;
2267 cfs_spin_lock(&blp->blp_lock);
2268 /* process a request from the blp_list at least every blp_num_threads */
2269 if (!cfs_list_empty(&blp->blp_list) &&
2270 (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
2271 blwi = cfs_list_entry(blp->blp_list.next,
2272 struct ldlm_bl_work_item, blwi_entry);
2274 if (!cfs_list_empty(&blp->blp_prio_list))
2275 blwi = cfs_list_entry(blp->blp_prio_list.next,
2276 struct ldlm_bl_work_item,
2280 if (++num_bl >= cfs_atomic_read(&blp->blp_num_threads))
2282 cfs_list_del(&blwi->blwi_entry);
2284 cfs_spin_unlock(&blp->blp_lock);
2289 /* This only contains temporary data until the thread starts */
2290 struct ldlm_bl_thread_data {
2291 char bltd_name[CFS_CURPROC_COMM_MAX];
2292 struct ldlm_bl_pool *bltd_blp;
2293 cfs_completion_t bltd_comp;
2297 static int ldlm_bl_thread_main(void *arg);
2299 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
2301 struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
2304 cfs_init_completion(&bltd.bltd_comp);
2305 rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
2307 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
2308 cfs_atomic_read(&blp->blp_num_threads), rc);
2311 cfs_wait_for_completion(&bltd.bltd_comp);
2316 static int ldlm_bl_thread_main(void *arg)
2318 struct ldlm_bl_pool *blp;
2322 struct ldlm_bl_thread_data *bltd = arg;
2324 blp = bltd->bltd_blp;
2327 cfs_atomic_inc_return(&blp->blp_num_threads) - 1;
2328 cfs_atomic_inc(&blp->blp_busy_threads);
2330 snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
2331 "ldlm_bl_%02d", bltd->bltd_num);
2332 cfs_daemonize(bltd->bltd_name);
2334 cfs_complete(&bltd->bltd_comp);
2335 /* cannot use bltd after this, it is only on caller's stack */
2339 struct l_wait_info lwi = { 0 };
2340 struct ldlm_bl_work_item *blwi = NULL;
2343 blwi = ldlm_bl_get_work(blp);
2346 cfs_atomic_dec(&blp->blp_busy_threads);
2347 l_wait_event_exclusive(blp->blp_waitq,
2348 (blwi = ldlm_bl_get_work(blp)) != NULL,
2350 busy = cfs_atomic_inc_return(&blp->blp_busy_threads);
2352 busy = cfs_atomic_read(&blp->blp_busy_threads);
2355 if (blwi->blwi_ns == NULL)
2356 /* added by ldlm_cleanup() */
2359 /* Not fatal if racy and have a few too many threads */
2360 if (unlikely(busy < blp->blp_max_threads &&
2361 busy >= cfs_atomic_read(&blp->blp_num_threads) &&
2362 !blwi->blwi_mem_pressure))
2363 /* discard the return value, we tried */
2364 ldlm_bl_thread_start(blp);
2366 if (blwi->blwi_mem_pressure)
2367 cfs_memory_pressure_set();
2369 if (blwi->blwi_count) {
2371 /* The special case when we cancel locks in lru
2372 * asynchronously, we pass the list of locks here.
2373 * Thus locks are marked LDLM_FL_CANCELING, but NOT
2374 * canceled locally yet. */
2375 count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
2378 ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0);
2380 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
2383 if (blwi->blwi_mem_pressure)
2384 cfs_memory_pressure_clr();
2386 if (blwi->blwi_mode == LDLM_ASYNC)
2387 OBD_FREE(blwi, sizeof(*blwi));
2389 cfs_complete(&blwi->blwi_comp);
2392 cfs_atomic_dec(&blp->blp_busy_threads);
2393 cfs_atomic_dec(&blp->blp_num_threads);
2394 cfs_complete(&blp->blp_comp);
2400 static int ldlm_setup(void);
2401 static int ldlm_cleanup(void);
2403 int ldlm_get_ref(void)
2407 cfs_mutex_lock(&ldlm_ref_mutex);
2408 if (++ldlm_refcount == 1) {
2413 cfs_mutex_unlock(&ldlm_ref_mutex);
2418 void ldlm_put_ref(void)
2421 cfs_mutex_lock(&ldlm_ref_mutex);
2422 if (ldlm_refcount == 1) {
2423 int rc = ldlm_cleanup();
2425 CERROR("ldlm_cleanup failed: %d\n", rc);
2431 cfs_mutex_unlock(&ldlm_ref_mutex);
2437 * Export handle<->lock hash operations.
2440 ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
2442 return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
2446 ldlm_export_lock_key(cfs_hlist_node_t *hnode)
2448 struct ldlm_lock *lock;
2450 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2451 return &lock->l_remote_handle;
2455 ldlm_export_lock_keycpy(cfs_hlist_node_t *hnode, void *key)
2457 struct ldlm_lock *lock;
2459 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2460 lock->l_remote_handle = *(struct lustre_handle *)key;
2464 ldlm_export_lock_keycmp(const void *key, cfs_hlist_node_t *hnode)
2466 return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
2470 ldlm_export_lock_object(cfs_hlist_node_t *hnode)
2472 return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2476 ldlm_export_lock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2478 struct ldlm_lock *lock;
2480 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2481 LDLM_LOCK_GET(lock);
2485 ldlm_export_lock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2487 struct ldlm_lock *lock;
2489 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2490 LDLM_LOCK_RELEASE(lock);
2493 static cfs_hash_ops_t ldlm_export_lock_ops = {
2494 .hs_hash = ldlm_export_lock_hash,
2495 .hs_key = ldlm_export_lock_key,
2496 .hs_keycmp = ldlm_export_lock_keycmp,
2497 .hs_keycpy = ldlm_export_lock_keycpy,
2498 .hs_object = ldlm_export_lock_object,
2499 .hs_get = ldlm_export_lock_get,
2500 .hs_put = ldlm_export_lock_put,
2501 .hs_put_locked = ldlm_export_lock_put,
2504 int ldlm_init_export(struct obd_export *exp)
2508 exp->exp_lock_hash =
2509 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
2510 HASH_EXP_LOCK_CUR_BITS,
2511 HASH_EXP_LOCK_MAX_BITS,
2512 HASH_EXP_LOCK_BKT_BITS, 0,
2513 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
2514 &ldlm_export_lock_ops,
2515 CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
2516 CFS_HASH_NBLK_CHANGE);
2518 if (!exp->exp_lock_hash)
2523 EXPORT_SYMBOL(ldlm_init_export);
2525 void ldlm_destroy_export(struct obd_export *exp)
2528 cfs_hash_putref(exp->exp_lock_hash);
2529 exp->exp_lock_hash = NULL;
2532 EXPORT_SYMBOL(ldlm_destroy_export);
2534 static int ldlm_setup(void)
2536 struct ldlm_bl_pool *blp;
2538 int ldlm_min_threads = LDLM_THREADS_AUTO_MIN;
2539 int ldlm_max_threads = LDLM_THREADS_AUTO_MAX;
2545 if (ldlm_state != NULL)
2548 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
2549 if (ldlm_state == NULL)
2553 rc = ldlm_proc_setup();
2559 if (ldlm_num_threads) {
2560 /* If ldlm_num_threads is set, it is the min and the max. */
2561 if (ldlm_num_threads > LDLM_THREADS_AUTO_MAX)
2562 ldlm_num_threads = LDLM_THREADS_AUTO_MAX;
2563 if (ldlm_num_threads < LDLM_THREADS_AUTO_MIN)
2564 ldlm_num_threads = LDLM_THREADS_AUTO_MIN;
2565 ldlm_min_threads = ldlm_max_threads = ldlm_num_threads;
2569 ldlm_state->ldlm_cb_service =
2570 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
2571 LDLM_MAXREPSIZE, LDLM_CB_REQUEST_PORTAL,
2572 LDLM_CB_REPLY_PORTAL, 2,
2573 ldlm_callback_handler, "ldlm_cbd",
2574 ldlm_svc_proc_dir, NULL,
2575 ldlm_min_threads, ldlm_max_threads,
2577 LCT_MD_THREAD|LCT_DT_THREAD, NULL);
2579 if (!ldlm_state->ldlm_cb_service) {
2580 CERROR("failed to start service\n");
2581 GOTO(out_proc, rc = -ENOMEM);
2584 ldlm_state->ldlm_cancel_service =
2585 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
2586 LDLM_MAXREPSIZE, LDLM_CANCEL_REQUEST_PORTAL,
2587 LDLM_CANCEL_REPLY_PORTAL, 6,
2588 ldlm_cancel_handler, "ldlm_canceld",
2589 ldlm_svc_proc_dir, NULL,
2590 ldlm_min_threads, ldlm_max_threads,
2592 LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
2593 ldlm_hpreq_handler);
2595 if (!ldlm_state->ldlm_cancel_service) {
2596 CERROR("failed to start service\n");
2597 GOTO(out_proc, rc = -ENOMEM);
2600 OBD_ALLOC(blp, sizeof(*blp));
2602 GOTO(out_proc, rc = -ENOMEM);
2603 ldlm_state->ldlm_bl_pool = blp;
2605 cfs_spin_lock_init(&blp->blp_lock);
2606 CFS_INIT_LIST_HEAD(&blp->blp_list);
2607 CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
2608 cfs_waitq_init(&blp->blp_waitq);
2609 cfs_atomic_set(&blp->blp_num_threads, 0);
2610 cfs_atomic_set(&blp->blp_busy_threads, 0);
2611 blp->blp_min_threads = ldlm_min_threads;
2612 blp->blp_max_threads = ldlm_max_threads;
2615 for (i = 0; i < blp->blp_min_threads; i++) {
2616 rc = ldlm_bl_thread_start(blp);
2618 GOTO(out_thread, rc);
2621 rc = ptlrpc_start_threads(ldlm_state->ldlm_cancel_service);
2623 GOTO(out_thread, rc);
2625 rc = ptlrpc_start_threads(ldlm_state->ldlm_cb_service);
2627 GOTO(out_thread, rc);
2629 CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
2630 expired_lock_thread.elt_state = ELT_STOPPED;
2631 cfs_waitq_init(&expired_lock_thread.elt_waitq);
2633 CFS_INIT_LIST_HEAD(&waiting_locks_list);
2634 cfs_spin_lock_init(&waiting_locks_spinlock);
2635 cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
2637 rc = cfs_create_thread(expired_lock_main, NULL, CFS_DAEMON_FLAGS);
2639 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
2640 GOTO(out_thread, rc);
2643 cfs_wait_event(expired_lock_thread.elt_waitq,
2644 expired_lock_thread.elt_state == ELT_READY);
2648 rc = ldlm_pools_init();
2650 GOTO(out_thread, rc);
2656 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2657 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2662 ldlm_proc_cleanup();
2665 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
2670 static int ldlm_cleanup(void)
2673 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
2677 if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
2678 !cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
2679 CERROR("ldlm still has namespaces; clean these up first.\n");
2680 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
2681 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
2690 while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
2691 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
2693 cfs_init_completion(&blp->blp_comp);
2695 cfs_spin_lock(&blp->blp_lock);
2696 cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
2697 cfs_waitq_signal(&blp->blp_waitq);
2698 cfs_spin_unlock(&blp->blp_lock);
2700 cfs_wait_for_completion(&blp->blp_comp);
2702 OBD_FREE(blp, sizeof(*blp));
2704 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2705 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2706 ldlm_proc_cleanup();
2708 expired_lock_thread.elt_state = ELT_TERMINATE;
2709 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
2710 cfs_wait_event(expired_lock_thread.elt_waitq,
2711 expired_lock_thread.elt_state == ELT_STOPPED);
2713 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2714 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2717 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
2725 cfs_mutex_init(&ldlm_ref_mutex);
2726 cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
2727 cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
2728 ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
2729 sizeof(struct ldlm_resource), 0,
2730 CFS_SLAB_HWCACHE_ALIGN);
2731 if (ldlm_resource_slab == NULL)
2734 ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
2735 sizeof(struct ldlm_lock), 0,
2736 CFS_SLAB_HWCACHE_ALIGN | CFS_SLAB_DESTROY_BY_RCU);
2737 if (ldlm_lock_slab == NULL) {
2738 cfs_mem_cache_destroy(ldlm_resource_slab);
2742 ldlm_interval_slab = cfs_mem_cache_create("interval_node",
2743 sizeof(struct ldlm_interval),
2744 0, CFS_SLAB_HWCACHE_ALIGN);
2745 if (ldlm_interval_slab == NULL) {
2746 cfs_mem_cache_destroy(ldlm_resource_slab);
2747 cfs_mem_cache_destroy(ldlm_lock_slab);
2750 #if LUSTRE_TRACKS_LOCK_EXP_REFS
2751 class_export_dump_hook = ldlm_dump_export_locks;
2756 void ldlm_exit(void)
2760 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
2761 rc = cfs_mem_cache_destroy(ldlm_resource_slab);
2762 LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
2764 /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
2765 * synchronize_rcu() to wait a grace period elapsed, so that
2766 * ldlm_lock_free() get a chance to be called. */
2769 rc = cfs_mem_cache_destroy(ldlm_lock_slab);
2770 LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
2771 rc = cfs_mem_cache_destroy(ldlm_interval_slab);
2772 LASSERTF(rc == 0, "couldn't free interval node slab\n");
2776 EXPORT_SYMBOL(ldlm_extent_shift_kms);
2779 EXPORT_SYMBOL(ldlm_get_processing_policy);
2780 EXPORT_SYMBOL(ldlm_lock2desc);
2781 EXPORT_SYMBOL(ldlm_register_intent);
2782 EXPORT_SYMBOL(ldlm_lockname);
2783 EXPORT_SYMBOL(ldlm_typename);
2784 EXPORT_SYMBOL(ldlm_lock2handle);
2785 EXPORT_SYMBOL(__ldlm_handle2lock);
2786 EXPORT_SYMBOL(ldlm_lock_get);
2787 EXPORT_SYMBOL(ldlm_lock_put);
2788 EXPORT_SYMBOL(ldlm_lock_match);
2789 EXPORT_SYMBOL(ldlm_lock_cancel);
2790 EXPORT_SYMBOL(ldlm_lock_addref);
2791 EXPORT_SYMBOL(ldlm_lock_addref_try);
2792 EXPORT_SYMBOL(ldlm_lock_decref);
2793 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
2794 EXPORT_SYMBOL(ldlm_lock_change_resource);
2795 EXPORT_SYMBOL(ldlm_it2str);
2796 EXPORT_SYMBOL(ldlm_lock_dump);
2797 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2798 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
2799 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
2800 EXPORT_SYMBOL(ldlm_lock_allow_match);
2801 EXPORT_SYMBOL(ldlm_lock_downgrade);
2802 EXPORT_SYMBOL(ldlm_lock_convert);
2804 /* ldlm_request.c */
2805 EXPORT_SYMBOL(ldlm_completion_ast_async);
2806 EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
2807 EXPORT_SYMBOL(ldlm_completion_ast);
2808 EXPORT_SYMBOL(ldlm_blocking_ast);
2809 EXPORT_SYMBOL(ldlm_glimpse_ast);
2810 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2811 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2812 EXPORT_SYMBOL(ldlm_prep_elc_req);
2813 EXPORT_SYMBOL(ldlm_cli_convert);
2814 EXPORT_SYMBOL(ldlm_cli_enqueue);
2815 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2816 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2817 EXPORT_SYMBOL(ldlm_cli_cancel);
2818 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2819 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2820 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2821 EXPORT_SYMBOL(ldlm_replay_locks);
2822 EXPORT_SYMBOL(ldlm_resource_foreach);
2823 EXPORT_SYMBOL(ldlm_namespace_foreach);
2824 EXPORT_SYMBOL(ldlm_resource_iterate);
2825 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2826 EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
2827 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2830 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2831 EXPORT_SYMBOL(ldlm_server_completion_ast);
2832 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2833 EXPORT_SYMBOL(ldlm_handle_enqueue);
2834 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2835 EXPORT_SYMBOL(ldlm_handle_cancel);
2836 EXPORT_SYMBOL(ldlm_request_cancel);
2837 EXPORT_SYMBOL(ldlm_handle_convert);
2838 EXPORT_SYMBOL(ldlm_handle_convert0);
2839 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2840 EXPORT_SYMBOL(ldlm_get_ref);
2841 EXPORT_SYMBOL(ldlm_put_ref);
2842 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2843 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2845 /* ldlm_resource.c */
2846 EXPORT_SYMBOL(ldlm_namespace_new);
2847 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2848 EXPORT_SYMBOL(ldlm_namespace_free);
2849 EXPORT_SYMBOL(ldlm_namespace_dump);
2850 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2851 EXPORT_SYMBOL(ldlm_resource_get);
2852 EXPORT_SYMBOL(ldlm_resource_putref);
2853 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
2856 EXPORT_SYMBOL(client_import_add_conn);
2857 EXPORT_SYMBOL(client_import_del_conn);
2858 EXPORT_SYMBOL(client_obd_setup);
2859 EXPORT_SYMBOL(client_obd_cleanup);
2860 EXPORT_SYMBOL(client_connect_import);
2861 EXPORT_SYMBOL(client_disconnect_export);
2862 EXPORT_SYMBOL(server_disconnect_export);
2863 EXPORT_SYMBOL(target_stop_recovery_thread);
2864 EXPORT_SYMBOL(target_handle_connect);
2865 EXPORT_SYMBOL(target_cleanup_recovery);
2866 EXPORT_SYMBOL(target_destroy_export);
2867 EXPORT_SYMBOL(target_cancel_recovery_timer);
2868 EXPORT_SYMBOL(target_send_reply);
2869 EXPORT_SYMBOL(target_queue_recovery_request);
2870 EXPORT_SYMBOL(target_handle_ping);
2871 EXPORT_SYMBOL(target_pack_pool_reply);
2872 EXPORT_SYMBOL(target_handle_disconnect);
2875 EXPORT_SYMBOL(lock_res_and_lock);
2876 EXPORT_SYMBOL(unlock_res_and_lock);