1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002-2004 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
28 # define EXPORT_SYMTAB
30 #define DEBUG_SUBSYSTEM S_LDLM
33 # include <libcfs/libcfs.h>
35 # include <liblustre.h>
38 #include <lustre_dlm.h>
39 #include <obd_class.h>
40 #include <libcfs/list.h>
41 #include "ldlm_internal.h"
43 extern cfs_mem_cache_t *ldlm_resource_slab;
44 extern cfs_mem_cache_t *ldlm_lock_slab;
45 static struct semaphore ldlm_ref_sem;
46 static int ldlm_refcount;
50 static struct ldlm_state *ldlm_state;
52 inline cfs_time_t round_timeout(cfs_time_t timeout)
54 return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
57 /* timeout for initial callback (AST) reply */
58 static inline unsigned int ldlm_get_rq_timeout(unsigned int ldlm_timeout,
59 unsigned int obd_timeout)
61 unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
63 return timeout < 1 ? 1 : timeout;
67 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
68 static spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
69 static struct list_head waiting_locks_list;
70 static cfs_timer_t waiting_locks_timer;
72 static struct expired_lock_thread {
73 cfs_waitq_t elt_waitq;
76 struct list_head elt_expired_locks;
77 } expired_lock_thread;
82 #define ELT_TERMINATE 2
86 struct list_head blp_list;
87 cfs_waitq_t blp_waitq;
88 atomic_t blp_num_threads;
89 struct completion blp_comp;
92 struct ldlm_bl_work_item {
93 struct list_head blwi_entry;
94 struct ldlm_namespace *blwi_ns;
95 struct ldlm_lock_desc blwi_ld;
96 struct ldlm_lock *blwi_lock;
97 struct list_head blwi_head;
103 static inline int have_expired_locks(void)
108 spin_lock_bh(&waiting_locks_spinlock);
109 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
110 spin_unlock_bh(&waiting_locks_spinlock);
115 static int expired_lock_main(void *arg)
117 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
118 struct l_wait_info lwi = { 0 };
122 cfs_daemonize("ldlm_elt");
124 expired_lock_thread.elt_state = ELT_READY;
125 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
128 l_wait_event(expired_lock_thread.elt_waitq,
129 have_expired_locks() ||
130 expired_lock_thread.elt_state == ELT_TERMINATE,
133 spin_lock_bh(&waiting_locks_spinlock);
134 if (expired_lock_thread.elt_dump) {
135 spin_unlock_bh(&waiting_locks_spinlock);
137 /* from waiting_locks_callback, but not in timer */
138 libcfs_debug_dumplog();
139 libcfs_run_lbug_upcall(__FILE__,
140 "waiting_locks_callback",
141 expired_lock_thread.elt_dump);
143 spin_lock_bh(&waiting_locks_spinlock);
144 expired_lock_thread.elt_dump = 0;
149 while (!list_empty(expired)) {
150 struct obd_export *export;
151 struct ldlm_lock *lock;
153 lock = list_entry(expired->next, struct ldlm_lock,
155 if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
156 (void *)lock >= LP_POISON) {
157 spin_unlock_bh(&waiting_locks_spinlock);
158 CERROR("free lock on elt list %p\n", lock);
161 list_del_init(&lock->l_pending_chain);
162 if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
163 (void *)lock->l_export >= LP_POISON) {
164 CERROR("lock with free export on elt list %p\n",
166 lock->l_export = NULL;
167 LDLM_ERROR(lock, "free export");
170 export = class_export_get(lock->l_export);
171 spin_unlock_bh(&waiting_locks_spinlock);
174 class_fail_export(export);
175 class_export_put(export);
176 spin_lock_bh(&waiting_locks_spinlock);
178 spin_unlock_bh(&waiting_locks_spinlock);
180 if (do_dump && obd_dump_on_eviction) {
181 CERROR("dump the log upon eviction\n");
182 libcfs_debug_dumplog();
185 if (expired_lock_thread.elt_state == ELT_TERMINATE)
189 expired_lock_thread.elt_state = ELT_STOPPED;
190 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
194 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
196 /* This is called from within a timer interrupt and cannot schedule */
197 static void waiting_locks_callback(unsigned long unused)
199 struct ldlm_lock *lock, *last = NULL;
202 spin_lock_bh(&waiting_locks_spinlock);
203 while (!list_empty(&waiting_locks_list)) {
204 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
207 if (cfs_time_after(lock->l_callback_timeout, cfs_time_current()) ||
208 (lock->l_req_mode == LCK_GROUP))
211 if (ptlrpc_check_suspend()) {
212 /* there is a case when we talk to one mds, holding
213 * lock from another mds. this way we easily can get
214 * here, if second mds is being recovered. so, we
215 * suspend timeouts. bug 6019 */
217 LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
218 lock->l_export->exp_client_uuid.uuid,
219 lock->l_export->exp_connection->c_remote_uuid.uuid,
220 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
222 list_del_init(&lock->l_pending_chain);
223 spin_unlock_bh(&waiting_locks_spinlock);
224 ldlm_add_waiting_lock(lock);
228 /* if timeout overlaps the activation time of suspended timeouts
229 * then extend it to give a chance for client to reconnect */
230 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
231 cfs_time_seconds(obd_timeout)/2),
232 ptlrpc_suspend_wakeup_time())) {
233 LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
234 lock->l_export->exp_client_uuid.uuid,
235 lock->l_export->exp_connection->c_remote_uuid.uuid,
236 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
238 list_del_init(&lock->l_pending_chain);
239 spin_unlock_bh(&waiting_locks_spinlock);
240 ldlm_add_waiting_lock(lock);
244 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
246 lock->l_export->exp_client_uuid.uuid,
247 lock->l_export->exp_connection->c_remote_uuid.uuid,
248 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
252 list_del(&lock->l_pending_chain);
253 list_add(&lock->l_pending_chain,
254 &expired_lock_thread.elt_expired_locks);
257 if (!list_empty(&expired_lock_thread.elt_expired_locks)) {
258 if (obd_dump_on_timeout)
259 expired_lock_thread.elt_dump = __LINE__;
261 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
265 * Make sure the timer will fire again if we have any locks
268 if (!list_empty(&waiting_locks_list)) {
269 cfs_time_t timeout_rounded;
270 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
272 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
273 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
275 spin_unlock_bh(&waiting_locks_spinlock);
279 * Indicate that we're waiting for a client to call us back cancelling a given
280 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
281 * timer to fire appropriately. (We round up to the next second, to avoid
282 * floods of timer firings during periods of high lock contention and traffic).
284 * Called with the namespace lock held.
286 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock)
288 cfs_time_t timeout_rounded;
290 if (!list_empty(&lock->l_pending_chain))
293 lock->l_callback_timeout =cfs_time_add(cfs_time_current(),
294 cfs_time_seconds(obd_timeout)/2);
296 timeout_rounded = round_timeout(lock->l_callback_timeout);
298 if (cfs_time_before(timeout_rounded, cfs_timer_deadline(&waiting_locks_timer)) ||
299 !cfs_timer_is_armed(&waiting_locks_timer)) {
300 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
303 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
307 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
311 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
313 spin_lock_bh(&waiting_locks_spinlock);
314 if (lock->l_destroyed) {
315 static cfs_time_t next;
316 spin_unlock_bh(&waiting_locks_spinlock);
317 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
318 if (cfs_time_after(cfs_time_current(), next)) {
319 next = cfs_time_shift(14400);
320 libcfs_debug_dumpstack(NULL);
325 ret = __ldlm_add_waiting_lock(lock);
326 spin_unlock_bh(&waiting_locks_spinlock);
328 LDLM_DEBUG(lock, "%sadding to wait list",
329 ret == 0 ? "not re-" : "");
334 * Remove a lock from the pending list, likely because it had its cancellation
335 * callback arrive without incident. This adjusts the lock-timeout timer if
336 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
338 * Called with namespace lock held.
340 int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
342 struct list_head *list_next;
344 if (list_empty(&lock->l_pending_chain))
347 list_next = lock->l_pending_chain.next;
348 if (lock->l_pending_chain.prev == &waiting_locks_list) {
349 /* Removing the head of the list, adjust timer. */
350 if (list_next == &waiting_locks_list) {
351 /* No more, just cancel. */
352 cfs_timer_disarm(&waiting_locks_timer);
354 struct ldlm_lock *next;
355 next = list_entry(list_next, struct ldlm_lock,
357 cfs_timer_arm(&waiting_locks_timer,
358 round_timeout(next->l_callback_timeout));
361 list_del_init(&lock->l_pending_chain);
366 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
370 if (lock->l_export == NULL) {
371 /* We don't have a "waiting locks list" on clients. */
372 LDLM_DEBUG(lock, "client lock: no-op");
376 spin_lock_bh(&waiting_locks_spinlock);
377 ret = __ldlm_del_waiting_lock(lock);
378 spin_unlock_bh(&waiting_locks_spinlock);
380 LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
387 * Called with namespace lock held.
389 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
391 if (lock->l_export == NULL) {
392 /* We don't have a "waiting locks list" on clients. */
393 LDLM_DEBUG(lock, "client lock: no-op");
397 spin_lock_bh(&waiting_locks_spinlock);
399 if (list_empty(&lock->l_pending_chain)) {
400 spin_unlock_bh(&waiting_locks_spinlock);
401 LDLM_DEBUG(lock, "wasn't waiting");
405 __ldlm_del_waiting_lock(lock);
406 __ldlm_add_waiting_lock(lock);
407 spin_unlock_bh(&waiting_locks_spinlock);
409 LDLM_DEBUG(lock, "refreshed");
413 #else /* !__KERNEL__ */
415 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
417 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
421 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
426 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
430 #endif /* __KERNEL__ */
432 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
433 const char *ast_type)
435 struct ptlrpc_connection *conn = lock->l_export->exp_connection;
436 char *str = libcfs_nid2str(conn->c_peer.nid);
438 LCONSOLE_ERROR_MSG(0x138, "A client on nid %s was evicted from "
439 "service %s.\n", str,
440 lock->l_export->exp_obd->obd_name);
442 LCONSOLE_ERROR_MSG(0x012, "Lock %s callback to %s timed out for "
443 "resource %d\n", ast_type,
444 obd_export_nid2str(lock->l_export), rc);
446 if (obd_dump_on_timeout)
447 libcfs_debug_dumplog();
448 class_fail_export(lock->l_export);
451 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
452 struct ptlrpc_request *req, int rc,
453 const char *ast_type)
455 lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
457 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
458 LASSERT(lock->l_export);
459 if (lock->l_export->exp_libclient) {
460 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
461 " timeout, just cancelling lock", ast_type,
462 libcfs_nid2str(peer.nid));
463 ldlm_lock_cancel(lock);
465 } else if (lock->l_flags & LDLM_FL_CANCEL) {
466 LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
467 "cancel was received (AST reply lost?)",
468 ast_type, libcfs_nid2str(peer.nid));
469 ldlm_lock_cancel(lock);
472 ldlm_del_waiting_lock(lock);
473 ldlm_failed_ast(lock, rc, ast_type);
477 LDLM_DEBUG(lock, "client (nid %s) returned %d"
478 " from %s AST - normal race",
479 libcfs_nid2str(peer.nid),
481 lustre_msg_get_status(req->rq_repmsg) : -1,
484 LDLM_ERROR(lock, "client (nid %s) returned %d "
485 "from %s AST", libcfs_nid2str(peer.nid),
486 (req->rq_repmsg != NULL) ?
487 lustre_msg_get_status(req->rq_repmsg) : 0,
489 ldlm_lock_cancel(lock);
490 /* Server-side AST functions are called from ldlm_reprocess_all,
491 * which needs to be told to please restart its reprocessing. */
498 static int ldlm_cb_interpret(struct ptlrpc_request *req, void *data, int rc)
500 struct ldlm_cb_set_arg *arg;
501 struct ldlm_lock *lock;
504 LASSERT(data != NULL);
506 arg = req->rq_async_args.pointer_arg[0];
507 lock = req->rq_async_args.pointer_arg[1];
508 LASSERT(lock != NULL);
510 /* If client canceled the lock but the cancel has not
511 * been recieved yet, we need to update lvbo to have the
512 * proper attributes cached. */
513 if (rc == -EINVAL && arg->type == LDLM_BL_CALLBACK)
514 ldlm_res_lvbo_update(lock->l_resource, NULL,
516 rc = ldlm_handle_ast_error(lock, req, rc,
517 arg->type == LDLM_BL_CALLBACK
518 ? "blocking" : "completion");
524 atomic_set(&arg->restart, 1);
529 static inline int ldlm_bl_and_cp_ast_fini(struct ptlrpc_request *req,
530 struct ldlm_cb_set_arg *arg,
531 struct ldlm_lock *lock,
537 if (unlikely(instant_cancel)) {
538 rc = ptl_send_rpc(req, 1);
539 ptlrpc_req_finished(req);
541 /* If we cancelled the lock, we need to restart
542 * ldlm_reprocess_queue */
543 atomic_set(&arg->restart, 1);
546 ptlrpc_set_add_req(arg->set, req);
553 * ->l_blocking_ast() method for server-side locks. This is invoked when newly
554 * enqueued server lock conflicts with given one.
556 * Sends blocking ast rpc to the client owning that lock; arms timeout timer
557 * to wait for client response.
559 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
560 struct ldlm_lock_desc *desc,
561 void *data, int flag)
563 struct ldlm_cb_set_arg *arg = (struct ldlm_cb_set_arg *)data;
564 struct ldlm_request *body;
565 struct ptlrpc_request *req;
566 int size[] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
567 [DLM_LOCKREQ_OFF] = sizeof(*body) };
568 int instant_cancel = 0, rc;
571 if (flag == LDLM_CB_CANCELING) {
572 /* Don't need to do anything here. */
577 LASSERT(data != NULL);
578 if (lock->l_export->exp_obd->obd_recovering != 0) {
579 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
580 ldlm_lock_dump(D_ERROR, lock, 0);
583 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
584 LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK, 2, size,
589 req->rq_async_args.pointer_arg[0] = arg;
590 req->rq_async_args.pointer_arg[1] = lock;
591 req->rq_interpret_reply = ldlm_cb_interpret;
592 req->rq_no_resend = 1;
594 lock_res(lock->l_resource);
595 if (lock->l_granted_mode != lock->l_req_mode) {
596 /* this blocking AST will be communicated as part of the
597 * completion AST instead */
598 unlock_res(lock->l_resource);
599 ptlrpc_req_finished(req);
600 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
604 if (lock->l_destroyed) {
605 /* What's the point? */
606 unlock_res(lock->l_resource);
607 ptlrpc_req_finished(req);
611 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
614 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
615 body->lock_handle[0] = lock->l_remote_handle;
616 body->lock_desc = *desc;
617 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
619 LDLM_DEBUG(lock, "server preparing blocking AST");
621 ptlrpc_req_set_repsize(req, 1, NULL);
622 if (instant_cancel) {
623 unlock_res(lock->l_resource);
624 ldlm_lock_cancel(lock);
626 LASSERT(lock->l_granted_mode == lock->l_req_mode);
627 ldlm_add_waiting_lock(lock);
628 unlock_res(lock->l_resource);
631 req->rq_send_state = LUSTRE_IMP_FULL;
632 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
634 if (lock->l_export && lock->l_export->exp_ldlm_stats)
635 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
636 LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
638 rc = ldlm_bl_and_cp_ast_fini(req, arg, lock, instant_cancel);
643 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
645 struct ldlm_cb_set_arg *arg = (struct ldlm_cb_set_arg *)data;
646 struct ldlm_request *body;
647 struct ptlrpc_request *req;
648 struct timeval granted_time;
649 long total_enqueue_wait;
650 int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
651 [DLM_LOCKREQ_OFF] = sizeof(*body) };
652 int rc, buffers = 2, instant_cancel = 0;
655 LASSERT(lock != NULL);
656 LASSERT(data != NULL);
658 do_gettimeofday(&granted_time);
659 total_enqueue_wait = cfs_timeval_sub(&granted_time,
660 &lock->l_enqueued_time, NULL);
662 if (total_enqueue_wait / 1000000 > obd_timeout)
663 LDLM_ERROR(lock, "enqueue wait took %luus from %lu",
664 total_enqueue_wait, lock->l_enqueued_time.tv_sec);
666 lock_res_and_lock(lock);
667 if (lock->l_resource->lr_lvb_len) {
668 size[DLM_REQ_REC_OFF] = lock->l_resource->lr_lvb_len;
671 unlock_res_and_lock(lock);
673 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
674 LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK, buffers,
679 req->rq_async_args.pointer_arg[0] = arg;
680 req->rq_async_args.pointer_arg[1] = lock;
681 req->rq_interpret_reply = ldlm_cb_interpret;
682 req->rq_no_resend = 1;
684 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
685 body->lock_handle[0] = lock->l_remote_handle;
686 body->lock_flags = flags;
687 ldlm_lock2desc(lock, &body->lock_desc);
692 lvb = lustre_msg_buf(req->rq_reqmsg, DLM_REQ_REC_OFF,
693 lock->l_resource->lr_lvb_len);
694 lock_res_and_lock(lock);
695 memcpy(lvb, lock->l_resource->lr_lvb_data,
696 lock->l_resource->lr_lvb_len);
697 unlock_res_and_lock(lock);
700 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
703 ptlrpc_req_set_repsize(req, 1, NULL);
705 req->rq_send_state = LUSTRE_IMP_FULL;
706 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
708 /* We only send real blocking ASTs after the lock is granted */
709 lock_res_and_lock(lock);
710 if (lock->l_flags & LDLM_FL_AST_SENT) {
711 body->lock_flags |= LDLM_FL_AST_SENT;
713 /* We might get here prior to ldlm_handle_enqueue setting
714 * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
715 * into waiting list, but this is safe and similar code in
716 * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
717 * that would not only cancel the lock, but will also remove
718 * it from waiting list */
719 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
720 unlock_res_and_lock(lock);
721 ldlm_lock_cancel(lock);
723 lock_res_and_lock(lock);
725 /* start the lock-timeout clock */
726 ldlm_add_waiting_lock(lock);
729 unlock_res_and_lock(lock);
731 if (lock->l_export && lock->l_export->exp_ldlm_stats)
732 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
733 LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
735 rc = ldlm_bl_and_cp_ast_fini(req, arg, lock, instant_cancel);
740 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
742 struct ldlm_resource *res = lock->l_resource;
743 struct ldlm_request *body;
744 struct ptlrpc_request *req;
745 int size[] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
746 [DLM_LOCKREQ_OFF] = sizeof(*body) };
750 LASSERT(lock != NULL);
752 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
753 LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK, 2, size,
758 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
759 body->lock_handle[0] = lock->l_remote_handle;
760 ldlm_lock2desc(lock, &body->lock_desc);
762 lock_res_and_lock(lock);
763 size[REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
764 unlock_res_and_lock(lock);
765 res = lock->l_resource;
766 ptlrpc_req_set_repsize(req, 2, size);
768 req->rq_send_state = LUSTRE_IMP_FULL;
769 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
771 if (lock->l_export && lock->l_export->exp_ldlm_stats)
772 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
773 LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
775 rc = ptlrpc_queue_wait(req);
776 if (rc == -ELDLM_NO_LOCK_DATA)
777 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
779 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
781 rc = ldlm_res_lvbo_update(res, req->rq_repmsg,
783 ptlrpc_req_finished(req);
787 static struct ldlm_lock *
788 find_existing_lock(struct obd_export *exp,
789 const struct lustre_handle *remote_hdl)
791 struct list_head *iter;
793 spin_lock(&exp->exp_ldlm_data.led_lock);
794 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
795 struct ldlm_lock *lock;
796 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
797 if (lock->l_remote_handle.cookie == remote_hdl->cookie) {
799 spin_unlock(&exp->exp_ldlm_data.led_lock);
803 spin_unlock(&exp->exp_ldlm_data.led_lock);
808 extern unsigned long long lu_time_stamp_get(void);
810 #define lu_time_stamp_get() time(NULL)
814 * Main server-side entry point into LDLM. This is called by ptlrpc service
815 * threads to carry out client lock enqueueing requests.
817 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
818 struct ptlrpc_request *req,
819 const struct ldlm_request *dlm_req,
820 const struct ldlm_callback_suite *cbs)
822 struct ldlm_reply *dlm_rep;
823 int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
824 [DLM_LOCKREPLY_OFF] = sizeof(*dlm_rep) };
827 ldlm_error_t err = ELDLM_OK;
828 struct ldlm_lock *lock = NULL;
832 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
834 ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
835 flags = dlm_req->lock_flags;
837 LASSERT(req->rq_export);
839 if (req->rq_export->exp_ldlm_stats)
840 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
841 LDLM_ENQUEUE - LDLM_FIRST_OPC);
843 if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
844 dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
845 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
846 dlm_req->lock_desc.l_resource.lr_type);
847 GOTO(out, rc = -EFAULT);
850 if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
851 dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
852 dlm_req->lock_desc.l_req_mode &
853 (dlm_req->lock_desc.l_req_mode-1))) {
854 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
855 dlm_req->lock_desc.l_req_mode);
856 GOTO(out, rc = -EFAULT);
859 if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
860 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
862 DEBUG_REQ(D_ERROR, req,
863 "PLAIN lock request from IBITS client?");
864 GOTO(out, rc = -EPROTO);
866 } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
868 DEBUG_REQ(D_ERROR, req,
869 "IBITS lock request from unaware client?");
870 GOTO(out, rc = -EPROTO);
874 /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
875 against server's _CONNECT_SUPPORTED flags? (I don't want to use
876 ibits for mgc/mgs) */
878 /* INODEBITS_INTEROP: Perform conversion from plain lock to
879 * inodebits lock if client does not support them. */
880 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
881 (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
882 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
883 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
884 MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
885 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
886 dlm_req->lock_desc.l_req_mode = LCK_CR;
890 if (unlikely(flags & LDLM_FL_REPLAY)) {
891 lock = find_existing_lock(req->rq_export,
892 &dlm_req->lock_handle[0]);
894 DEBUG_REQ(D_DLMTRACE, req, "found existing lock cookie "
895 LPX64, lock->l_handle.h_cookie);
896 GOTO(existing_lock, rc = 0);
900 /* The lock's callback data might be set in the policy function */
901 lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
902 dlm_req->lock_desc.l_resource.lr_type,
903 dlm_req->lock_desc.l_req_mode,
904 cbs->lcs_blocking, cbs->lcs_completion,
905 cbs->lcs_glimpse, NULL, 0);
908 GOTO(out, rc = -ENOMEM);
910 do_gettimeofday(&lock->l_enqueued_time);
911 lock->l_remote_handle = dlm_req->lock_handle[0];
912 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
914 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
915 /* Don't enqueue a lock onto the export if it has already
916 * been evicted. Cancel it now instead. (bug 3822) */
917 if (req->rq_export->exp_failed) {
918 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
919 GOTO(out, rc = -ENOTCONN);
921 lock->l_export = class_export_get(req->rq_export);
922 spin_lock(&lock->l_export->exp_ldlm_data.led_lock);
923 list_add(&lock->l_export_chain,
924 &lock->l_export->exp_ldlm_data.led_held_locks);
925 spin_unlock(&lock->l_export->exp_ldlm_data.led_lock);
929 if (flags & LDLM_FL_HAS_INTENT) {
930 /* In this case, the reply buffer is allocated deep in
931 * local_lock_enqueue by the policy function. */
936 lock_res_and_lock(lock);
937 if (lock->l_resource->lr_lvb_len) {
938 size[DLM_REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
941 unlock_res_and_lock(lock);
943 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
944 GOTO(out, rc = -ENOMEM);
946 rc = lustre_pack_reply(req, buffers, size, NULL);
951 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
952 lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
953 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
954 lock->l_req_extent = lock->l_policy_data.l_extent;
956 err = ldlm_lock_enqueue(ns, &lock, cookie, &flags);
960 dlm_rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
962 dlm_rep->lock_flags = flags;
964 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
965 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
967 /* We never send a blocking AST until the lock is granted, but
968 * we can tell it right now */
969 lock_res_and_lock(lock);
971 /* Now take into account flags to be inherited from original lock
972 request both in reply to client and in our own lock flags. */
973 dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
974 lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
976 /* Don't move a pending lock onto the export if it has already
977 * been evicted. Cancel it now instead. (bug 5683) */
978 if (unlikely(req->rq_export->exp_failed ||
979 OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
980 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
982 } else if (lock->l_flags & LDLM_FL_AST_SENT) {
983 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
984 if (lock->l_granted_mode == lock->l_req_mode) {
986 * Only cancel lock if it was granted, because it would
987 * be destroyed immediatelly and would never be granted
988 * in the future, causing timeouts on client. Not
989 * granted lock will be cancelled immediatelly after
990 * sending completion AST.
992 if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
993 unlock_res_and_lock(lock);
994 ldlm_lock_cancel(lock);
995 lock_res_and_lock(lock);
997 ldlm_add_waiting_lock(lock);
1000 /* Make sure we never ever grant usual metadata locks to liblustre
1002 if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
1003 dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
1004 req->rq_export->exp_libclient) {
1005 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
1006 !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
1007 CERROR("Granting sync lock to libclient. "
1008 "req fl %d, rep fl %d, lock fl %d\n",
1009 dlm_req->lock_flags, dlm_rep->lock_flags,
1011 LDLM_ERROR(lock, "sync lock");
1012 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
1013 struct ldlm_intent *it;
1014 it = lustre_msg_buf(req->rq_reqmsg,
1018 CERROR("This is intent %s ("LPU64")\n",
1019 ldlm_it2str(it->opc), it->opc);
1025 unlock_res_and_lock(lock);
1029 req->rq_status = err;
1030 if (req->rq_reply_state == NULL) {
1031 err = lustre_pack_reply(req, 1, NULL, NULL);
1034 req->rq_status = rc;
1037 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1038 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
1040 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
1041 "(err=%d, rc=%d)", err, rc);
1043 lock_res_and_lock(lock);
1045 size[DLM_REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
1046 if (size[DLM_REPLY_REC_OFF] > 0) {
1047 void *lvb = lustre_msg_buf(req->rq_repmsg,
1049 size[DLM_REPLY_REC_OFF]);
1050 LASSERTF(lvb != NULL, "req %p, lock %p\n",
1053 memcpy(lvb, lock->l_resource->lr_lvb_data,
1054 size[DLM_REPLY_REC_OFF]);
1057 ldlm_resource_unlink_lock(lock);
1058 ldlm_lock_destroy_nolock(lock);
1060 unlock_res_and_lock(lock);
1062 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1063 ldlm_reprocess_all(lock->l_resource);
1065 LDLM_LOCK_PUT(lock);
1068 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1074 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1075 ldlm_completion_callback completion_callback,
1076 ldlm_blocking_callback blocking_callback,
1077 ldlm_glimpse_callback glimpse_callback)
1080 struct ldlm_request *dlm_req;
1081 struct ldlm_callback_suite cbs = {
1082 .lcs_completion = completion_callback,
1083 .lcs_blocking = blocking_callback,
1084 .lcs_glimpse = glimpse_callback
1088 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1089 sizeof *dlm_req, lustre_swab_ldlm_request);
1090 if (dlm_req != NULL) {
1091 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1092 req, dlm_req, &cbs);
1094 CERROR ("Can't unpack dlm_req\n");
1100 int ldlm_handle_convert0(struct ptlrpc_request *req,
1101 const struct ldlm_request *dlm_req)
1103 struct ldlm_reply *dlm_rep;
1104 struct ldlm_lock *lock;
1106 int size[2] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
1107 [DLM_LOCKREPLY_OFF] = sizeof(*dlm_rep) };
1110 if (req->rq_export && req->rq_export->exp_ldlm_stats)
1111 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
1112 LDLM_CONVERT - LDLM_FIRST_OPC);
1114 rc = lustre_pack_reply(req, 2, size, NULL);
1116 CERROR("out of memory\n");
1119 dlm_rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
1121 dlm_rep->lock_flags = dlm_req->lock_flags;
1123 lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1125 req->rq_status = EINVAL;
1129 LDLM_DEBUG(lock, "server-side convert handler START");
1131 do_gettimeofday(&lock->l_enqueued_time);
1132 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1133 &dlm_rep->lock_flags);
1135 if (ldlm_del_waiting_lock(lock))
1136 LDLM_DEBUG(lock, "converted waiting lock");
1139 req->rq_status = EDEADLOCK;
1144 if (!req->rq_status)
1145 ldlm_reprocess_all(lock->l_resource);
1146 LDLM_DEBUG(lock, "server-side convert handler END");
1147 LDLM_LOCK_PUT(lock);
1149 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1154 int ldlm_handle_convert(struct ptlrpc_request *req)
1157 struct ldlm_request *dlm_req;
1159 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof *dlm_req,
1160 lustre_swab_ldlm_request);
1161 if (dlm_req != NULL) {
1162 rc = ldlm_handle_convert0(req, dlm_req);
1164 CERROR ("Can't unpack dlm_req\n");
1170 /* Cancel all the locks, which handles are packed into ldlm_request */
1171 int ldlm_request_cancel(struct ptlrpc_request *req,
1172 const struct ldlm_request *dlm_req, int first)
1174 struct ldlm_resource *res, *pres = NULL;
1175 struct ldlm_lock *lock;
1176 int i, count, done = 0;
1179 count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1183 /* There is no lock on the server at the replay time,
1184 * skip lock cancelling to make replay tests to pass. */
1185 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1188 LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
1189 "starting at %d", count, first);
1191 for (i = first; i < count; i++) {
1192 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1194 LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1195 "lock (cookie "LPU64")",
1196 dlm_req->lock_handle[i].cookie);
1200 res = lock->l_resource;
1205 ldlm_reprocess_all(pres);
1206 ldlm_resource_putref(pres);
1209 ldlm_resource_getref(res);
1210 ldlm_res_lvbo_update(res, NULL, 0, 1);
1214 ldlm_lock_cancel(lock);
1215 LDLM_LOCK_PUT(lock);
1218 ldlm_reprocess_all(pres);
1219 ldlm_resource_putref(pres);
1221 LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1225 int ldlm_handle_cancel(struct ptlrpc_request *req)
1227 struct ldlm_request *dlm_req;
1231 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof(*dlm_req),
1232 lustre_swab_ldlm_request);
1233 if (dlm_req == NULL) {
1234 CERROR("bad request buffer for cancel\n");
1238 if (req->rq_export && req->rq_export->exp_ldlm_stats)
1239 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
1240 LDLM_CANCEL - LDLM_FIRST_OPC);
1242 rc = lustre_pack_reply(req, 1, NULL, NULL);
1244 CERROR("out of memory\n");
1248 if (!ldlm_request_cancel(req, dlm_req, 0))
1249 req->rq_status = ESTALE;
1251 if (ptlrpc_reply(req) != 0)
1257 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1258 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1263 LDLM_DEBUG(lock, "client blocking AST callback handler START");
1265 lock_res_and_lock(lock);
1266 lock->l_flags |= LDLM_FL_CBPENDING;
1268 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1269 lock->l_flags |= LDLM_FL_CANCEL;
1271 do_ast = (!lock->l_readers && !lock->l_writers);
1272 unlock_res_and_lock(lock);
1275 LDLM_DEBUG(lock, "already unused, calling "
1276 "callback (%p)", lock->l_blocking_ast);
1277 if (lock->l_blocking_ast != NULL)
1278 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1281 LDLM_DEBUG(lock, "Lock still has references, will be"
1282 " cancelled later");
1285 LDLM_DEBUG(lock, "client blocking callback handler END");
1286 LDLM_LOCK_PUT(lock);
1290 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1291 struct ldlm_namespace *ns,
1292 struct ldlm_request *dlm_req,
1293 struct ldlm_lock *lock)
1295 CFS_LIST_HEAD(ast_list);
1298 LDLM_DEBUG(lock, "client completion callback handler START");
1300 lock_res_and_lock(lock);
1302 /* If we receive the completion AST before the actual enqueue returned,
1303 * then we might need to switch lock modes, resources, or extents. */
1304 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1305 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1306 LDLM_DEBUG(lock, "completion AST, new lock mode");
1309 if (lock->l_resource->lr_type != LDLM_PLAIN) {
1310 lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
1311 LDLM_DEBUG(lock, "completion AST, new policy data");
1314 ldlm_resource_unlink_lock(lock);
1315 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1316 &lock->l_resource->lr_name,
1317 sizeof(lock->l_resource->lr_name)) != 0) {
1318 unlock_res_and_lock(lock);
1319 ldlm_lock_change_resource(ns, lock,
1320 &dlm_req->lock_desc.l_resource.lr_name);
1321 LDLM_DEBUG(lock, "completion AST, new resource");
1322 CERROR("change resource!\n");
1323 lock_res_and_lock(lock);
1326 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1327 lock->l_flags |= LDLM_FL_CBPENDING;
1328 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1331 if (lock->l_lvb_len) {
1333 lvb = lustre_swab_reqbuf(req, DLM_REQ_REC_OFF, lock->l_lvb_len,
1334 lock->l_lvb_swabber);
1336 LDLM_ERROR(lock, "completion AST did not contain "
1339 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1343 ldlm_grant_lock(lock, &ast_list);
1344 unlock_res_and_lock(lock);
1346 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1348 ldlm_run_cp_ast_work(&ast_list);
1350 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1352 LDLM_LOCK_PUT(lock);
1356 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1357 struct ldlm_namespace *ns,
1358 struct ldlm_request *dlm_req,
1359 struct ldlm_lock *lock)
1364 LDLM_DEBUG(lock, "client glimpse AST callback handler");
1366 if (lock->l_glimpse_ast != NULL)
1367 rc = lock->l_glimpse_ast(lock, req);
1369 if (req->rq_repmsg != NULL) {
1372 req->rq_status = rc;
1376 lock_res_and_lock(lock);
1377 if (lock->l_granted_mode == LCK_PW &&
1378 !lock->l_readers && !lock->l_writers &&
1379 cfs_time_after(cfs_time_current(),
1380 cfs_time_add(lock->l_last_used,
1381 cfs_time_seconds(10)))) {
1382 unlock_res_and_lock(lock);
1383 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
1384 ldlm_handle_bl_callback(ns, NULL, lock);
1389 unlock_res_and_lock(lock);
1390 LDLM_LOCK_PUT(lock);
1394 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1396 req->rq_status = rc;
1397 if (req->rq_reply_state == NULL) {
1398 rc = lustre_pack_reply(req, 1, NULL, NULL);
1402 return ptlrpc_reply(req);
1406 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
1407 struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
1408 struct list_head *cancels, int count)
1410 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1411 struct ldlm_bl_work_item *blwi;
1414 if (cancels && count == 0)
1417 OBD_ALLOC(blwi, sizeof(*blwi));
1423 blwi->blwi_ld = *ld;
1425 list_add(&blwi->blwi_head, cancels);
1426 list_del_init(cancels);
1427 blwi->blwi_count = count;
1429 blwi->blwi_lock = lock;
1431 spin_lock(&blp->blp_lock);
1432 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1433 cfs_waitq_signal(&blp->blp_waitq);
1434 spin_unlock(&blp->blp_lock);
1440 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1441 struct ldlm_lock *lock)
1444 RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0));
1450 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1451 struct list_head *cancels, int count)
1454 RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count));
1460 static int ldlm_callback_handler(struct ptlrpc_request *req)
1462 struct ldlm_namespace *ns;
1463 struct ldlm_request *dlm_req;
1464 struct ldlm_lock *lock;
1468 /* Requests arrive in sender's byte order. The ptlrpc service
1469 * handler has already checked and, if necessary, byte-swapped the
1470 * incoming request message body, but I am responsible for the
1471 * message buffers. */
1473 if (req->rq_export == NULL) {
1474 struct ldlm_request *dlm_req;
1476 CDEBUG(D_RPCTRACE, "operation %d from %s with bad "
1477 "export cookie "LPX64"; this is "
1478 "normal if this node rebooted with a lock held\n",
1479 lustre_msg_get_opc(req->rq_reqmsg),
1480 libcfs_id2str(req->rq_peer),
1481 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
1483 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1485 lustre_swab_ldlm_request);
1486 if (dlm_req != NULL)
1487 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
1488 dlm_req->lock_handle[0].cookie);
1490 ldlm_callback_reply(req, -ENOTCONN);
1494 LASSERT(req->rq_export != NULL);
1495 LASSERT(req->rq_export->exp_obd != NULL);
1497 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1498 case LDLM_BL_CALLBACK:
1499 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1501 case LDLM_CP_CALLBACK:
1502 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
1504 case LDLM_GL_CALLBACK:
1505 OBD_FAIL_RETURN(OBD_FAIL_LDLM_GL_CALLBACK, 0);
1507 case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
1508 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1509 rc = llog_origin_handle_cancel(req);
1510 ldlm_callback_reply(req, rc);
1512 case OBD_QC_CALLBACK:
1513 OBD_FAIL_RETURN(OBD_FAIL_OBD_QC_CALLBACK_NET, 0);
1514 rc = target_handle_qc_callback(req);
1515 ldlm_callback_reply(req, rc);
1519 /* reply in handler */
1520 rc = target_handle_dqacq_callback(req);
1522 case LLOG_ORIGIN_HANDLE_CREATE:
1523 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1524 rc = llog_origin_handle_create(req);
1525 ldlm_callback_reply(req, rc);
1527 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1528 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1529 rc = llog_origin_handle_next_block(req);
1530 ldlm_callback_reply(req, rc);
1532 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1533 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1534 rc = llog_origin_handle_read_header(req);
1535 ldlm_callback_reply(req, rc);
1537 case LLOG_ORIGIN_HANDLE_CLOSE:
1538 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1539 rc = llog_origin_handle_close(req);
1540 ldlm_callback_reply(req, rc);
1546 CERROR("unknown opcode %u\n",
1547 lustre_msg_get_opc(req->rq_reqmsg));
1548 ldlm_callback_reply(req, -EPROTO);
1552 ns = req->rq_export->exp_obd->obd_namespace;
1553 LASSERT(ns != NULL);
1555 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof(*dlm_req),
1556 lustre_swab_ldlm_request);
1557 if (dlm_req == NULL) {
1558 CERROR ("can't unpack dlm_req\n");
1559 ldlm_callback_reply(req, -EPROTO);
1563 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle[0]);
1565 CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
1566 "disappeared\n", dlm_req->lock_handle[0].cookie);
1567 ldlm_callback_reply(req, -EINVAL);
1571 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1572 lock_res_and_lock(lock);
1573 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1574 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
1575 /* If somebody cancels locks and cache is already droped,
1576 * we can tell the server we have no lock. Otherwise, we
1577 * should send cancel after dropping the cache. */
1578 if ((lock->l_flags & LDLM_FL_CANCELING) &&
1579 (lock->l_flags & LDLM_FL_BL_DONE)) {
1580 LDLM_DEBUG(lock, "callback on lock "
1581 LPX64" - lock disappeared\n",
1582 dlm_req->lock_handle[0].cookie);
1583 unlock_res_and_lock(lock);
1584 LDLM_LOCK_PUT(lock);
1585 ldlm_callback_reply(req, -EINVAL);
1588 lock->l_flags |= LDLM_FL_BL_AST;
1590 unlock_res_and_lock(lock);
1592 /* We want the ost thread to get this reply so that it can respond
1593 * to ost requests (write cache writeback) that might be triggered
1596 * But we'd also like to be able to indicate in the reply that we're
1597 * cancelling right now, because it's unused, or have an intent result
1598 * in the reply, so we might have to push the responsibility for sending
1599 * the reply down into the AST handlers, alas. */
1601 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1602 case LDLM_BL_CALLBACK:
1603 CDEBUG(D_INODE, "blocking ast\n");
1604 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK))
1605 ldlm_callback_reply(req, 0);
1606 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
1607 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
1609 case LDLM_CP_CALLBACK:
1610 CDEBUG(D_INODE, "completion ast\n");
1611 ldlm_callback_reply(req, 0);
1612 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
1614 case LDLM_GL_CALLBACK:
1615 CDEBUG(D_INODE, "glimpse ast\n");
1616 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
1619 LBUG(); /* checked above */
1625 static int ldlm_cancel_handler(struct ptlrpc_request *req)
1630 /* Requests arrive in sender's byte order. The ptlrpc service
1631 * handler has already checked and, if necessary, byte-swapped the
1632 * incoming request message body, but I am responsible for the
1633 * message buffers. */
1635 if (req->rq_export == NULL) {
1636 struct ldlm_request *dlm_req;
1638 CERROR("operation %d from %s with bad export cookie "LPU64"\n",
1639 lustre_msg_get_opc(req->rq_reqmsg),
1640 libcfs_id2str(req->rq_peer),
1641 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
1643 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1645 lustre_swab_ldlm_request);
1646 if (dlm_req != NULL)
1647 ldlm_lock_dump_handle(D_ERROR,
1648 &dlm_req->lock_handle[0]);
1649 ldlm_callback_reply(req, -ENOTCONN);
1653 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1655 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1657 CDEBUG(D_INODE, "cancel\n");
1658 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
1659 rc = ldlm_handle_cancel(req);
1663 case OBD_LOG_CANCEL:
1664 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1665 rc = llog_origin_handle_cancel(req);
1666 ldlm_callback_reply(req, rc);
1669 CERROR("invalid opcode %d\n",
1670 lustre_msg_get_opc(req->rq_reqmsg));
1671 ldlm_callback_reply(req, -EINVAL);
1677 void ldlm_revoke_export_locks(struct obd_export *exp)
1679 struct list_head *locklist = &exp->exp_ldlm_data.led_held_locks;
1680 struct list_head rpc_list;
1681 struct ldlm_lock *lock, *next;
1682 struct ldlm_lock_desc desc;
1685 INIT_LIST_HEAD(&rpc_list);
1687 spin_lock(&exp->exp_ldlm_data.led_lock);
1688 list_for_each_entry_safe(lock, next, locklist, l_export_chain) {
1689 lock_res_and_lock(lock);
1691 if (lock->l_req_mode != lock->l_granted_mode) {
1692 unlock_res_and_lock(lock);
1696 LASSERT(lock->l_resource);
1697 if (lock->l_resource->lr_type != LDLM_IBITS &&
1698 lock->l_resource->lr_type != LDLM_PLAIN) {
1699 unlock_res_and_lock(lock);
1703 if (lock->l_flags & LDLM_FL_AST_SENT) {
1704 unlock_res_and_lock(lock);
1708 LASSERT(lock->l_blocking_ast);
1709 LASSERT(!lock->l_blocking_lock);
1711 lock->l_flags |= LDLM_FL_AST_SENT;
1712 list_move(&lock->l_export_chain, &rpc_list);
1714 unlock_res_and_lock(lock);
1716 spin_unlock(&exp->exp_ldlm_data.led_lock);
1718 while (!list_empty(&rpc_list)) {
1719 lock = list_entry(rpc_list.next, struct ldlm_lock,
1721 list_del_init(&lock->l_export_chain);
1723 /* the desc just pretend to exclusive */
1724 ldlm_lock2desc(lock, &desc);
1725 desc.l_req_mode = LCK_EX;
1726 desc.l_granted_mode = 0;
1728 LDLM_LOCK_GET(lock);
1729 lock->l_blocking_ast(lock, &desc, lock->l_ast_data,
1731 LDLM_LOCK_PUT(lock);
1737 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1739 struct ldlm_bl_work_item *blwi = NULL;
1741 spin_lock(&blp->blp_lock);
1742 if (!list_empty(&blp->blp_list)) {
1743 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1745 list_del(&blwi->blwi_entry);
1747 spin_unlock(&blp->blp_lock);
1752 struct ldlm_bl_thread_data {
1754 struct ldlm_bl_pool *bltd_blp;
1757 static int ldlm_bl_thread_main(void *arg)
1759 struct ldlm_bl_thread_data *bltd = arg;
1760 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1764 char name[CFS_CURPROC_COMM_MAX];
1765 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1767 cfs_daemonize(name);
1770 atomic_inc(&blp->blp_num_threads);
1771 complete(&blp->blp_comp);
1774 struct l_wait_info lwi = { 0 };
1775 struct ldlm_bl_work_item *blwi = NULL;
1777 l_wait_event_exclusive(blp->blp_waitq,
1778 (blwi = ldlm_bl_get_work(blp)) != NULL,
1781 if (blwi->blwi_ns == NULL)
1784 if (blwi->blwi_count) {
1785 /* The special case when we cancel locks in lru
1786 * asynchronously, we pass the list of locks here.
1787 * Thus lock is marked LDLM_FL_CANCELING, and already
1788 * canceled locally. */
1789 ldlm_cli_cancel_list(&blwi->blwi_head,
1790 blwi->blwi_count, NULL, 0, 0);
1792 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1795 OBD_FREE(blwi, sizeof(*blwi));
1798 atomic_dec(&blp->blp_num_threads);
1799 complete(&blp->blp_comp);
1805 static int ldlm_setup(void);
1806 static int ldlm_cleanup(int force);
1808 int ldlm_get_ref(void)
1812 mutex_down(&ldlm_ref_sem);
1813 if (++ldlm_refcount == 1) {
1818 mutex_up(&ldlm_ref_sem);
1823 void ldlm_put_ref(int force)
1826 mutex_down(&ldlm_ref_sem);
1827 if (ldlm_refcount == 1) {
1828 int rc = ldlm_cleanup(force);
1830 CERROR("ldlm_cleanup failed: %d\n", rc);
1836 mutex_up(&ldlm_ref_sem);
1841 static int ldlm_setup(void)
1843 struct ldlm_bl_pool *blp;
1850 if (ldlm_state != NULL)
1853 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
1854 if (ldlm_state == NULL)
1858 rc = ldlm_proc_setup();
1863 ldlm_state->ldlm_cb_service =
1864 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1865 LDLM_MAXREPSIZE, LDLM_CB_REQUEST_PORTAL,
1866 LDLM_CB_REPLY_PORTAL, ldlm_timeout * 900,
1867 ldlm_callback_handler, "ldlm_cbd",
1868 ldlm_svc_proc_dir, NULL,
1869 LDLM_THREADS_AUTO_MIN, LDLM_THREADS_AUTO_MAX,
1871 LCT_MD_THREAD|LCT_DT_THREAD);
1873 if (!ldlm_state->ldlm_cb_service) {
1874 CERROR("failed to start service\n");
1875 GOTO(out_proc, rc = -ENOMEM);
1878 ldlm_state->ldlm_cancel_service =
1879 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1880 LDLM_MAXREPSIZE, LDLM_CANCEL_REQUEST_PORTAL,
1881 LDLM_CANCEL_REPLY_PORTAL, ldlm_timeout * 6000,
1882 ldlm_cancel_handler, "ldlm_canceld",
1883 ldlm_svc_proc_dir, NULL,
1884 LDLM_THREADS_AUTO_MIN, LDLM_THREADS_AUTO_MAX,
1886 LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD);
1888 if (!ldlm_state->ldlm_cancel_service) {
1889 CERROR("failed to start service\n");
1890 GOTO(out_proc, rc = -ENOMEM);
1893 OBD_ALLOC(blp, sizeof(*blp));
1895 GOTO(out_proc, rc = -ENOMEM);
1896 ldlm_state->ldlm_bl_pool = blp;
1898 atomic_set(&blp->blp_num_threads, 0);
1899 cfs_waitq_init(&blp->blp_waitq);
1900 spin_lock_init(&blp->blp_lock);
1902 CFS_INIT_LIST_HEAD(&blp->blp_list);
1905 for (i = 0; i < LDLM_BL_THREADS; i++) {
1906 struct ldlm_bl_thread_data bltd = {
1910 init_completion(&blp->blp_comp);
1911 rc = cfs_kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1913 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1914 GOTO(out_thread, rc);
1916 wait_for_completion(&blp->blp_comp);
1919 rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cancel_service);
1921 GOTO(out_thread, rc);
1923 rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cb_service);
1925 GOTO(out_thread, rc);
1927 CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1928 expired_lock_thread.elt_state = ELT_STOPPED;
1929 cfs_waitq_init(&expired_lock_thread.elt_waitq);
1931 CFS_INIT_LIST_HEAD(&waiting_locks_list);
1932 spin_lock_init(&waiting_locks_spinlock);
1933 cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
1935 rc = cfs_kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FILES);
1937 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1938 GOTO(out_thread, rc);
1941 wait_event(expired_lock_thread.elt_waitq,
1942 expired_lock_thread.elt_state == ELT_READY);
1946 rc = ldlm_pools_init();
1948 GOTO(out_thread, rc);
1954 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1955 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1960 ldlm_proc_cleanup();
1963 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1968 static int ldlm_cleanup(int force)
1971 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1975 if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
1976 !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
1977 CERROR("ldlm still has namespaces; clean these up first.\n");
1978 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
1979 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
1988 while (atomic_read(&blp->blp_num_threads) > 0) {
1989 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1991 init_completion(&blp->blp_comp);
1993 spin_lock(&blp->blp_lock);
1994 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1995 cfs_waitq_signal(&blp->blp_waitq);
1996 spin_unlock(&blp->blp_lock);
1998 wait_for_completion(&blp->blp_comp);
2000 OBD_FREE(blp, sizeof(*blp));
2002 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2003 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2004 ldlm_proc_cleanup();
2006 expired_lock_thread.elt_state = ELT_TERMINATE;
2007 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
2008 wait_event(expired_lock_thread.elt_waitq,
2009 expired_lock_thread.elt_state == ELT_STOPPED);
2011 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2012 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2015 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
2021 int __init ldlm_init(void)
2023 init_mutex(&ldlm_ref_sem);
2024 init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
2025 init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
2026 ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
2027 sizeof(struct ldlm_resource), 0,
2028 SLAB_HWCACHE_ALIGN);
2029 if (ldlm_resource_slab == NULL)
2032 ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
2033 sizeof(struct ldlm_lock), 0,
2034 SLAB_HWCACHE_ALIGN);
2035 if (ldlm_lock_slab == NULL) {
2036 cfs_mem_cache_destroy(ldlm_resource_slab);
2043 void __exit ldlm_exit(void)
2047 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
2048 rc = cfs_mem_cache_destroy(ldlm_resource_slab);
2049 LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
2050 rc = cfs_mem_cache_destroy(ldlm_lock_slab);
2051 LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
2055 EXPORT_SYMBOL(ldlm_extent_shift_kms);
2058 EXPORT_SYMBOL(ldlm_get_processing_policy);
2059 EXPORT_SYMBOL(ldlm_lock2desc);
2060 EXPORT_SYMBOL(ldlm_register_intent);
2061 EXPORT_SYMBOL(ldlm_lockname);
2062 EXPORT_SYMBOL(ldlm_typename);
2063 EXPORT_SYMBOL(ldlm_lock2handle);
2064 EXPORT_SYMBOL(__ldlm_handle2lock);
2065 EXPORT_SYMBOL(ldlm_lock_get);
2066 EXPORT_SYMBOL(ldlm_lock_put);
2067 EXPORT_SYMBOL(ldlm_lock_match);
2068 EXPORT_SYMBOL(ldlm_lock_cancel);
2069 EXPORT_SYMBOL(ldlm_lock_addref);
2070 EXPORT_SYMBOL(ldlm_lock_decref);
2071 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
2072 EXPORT_SYMBOL(ldlm_lock_change_resource);
2073 EXPORT_SYMBOL(ldlm_lock_set_data);
2074 EXPORT_SYMBOL(ldlm_it2str);
2075 EXPORT_SYMBOL(ldlm_lock_dump);
2076 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2077 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
2078 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
2079 EXPORT_SYMBOL(ldlm_lock_allow_match);
2081 /* ldlm_request.c */
2082 EXPORT_SYMBOL(ldlm_completion_ast);
2083 EXPORT_SYMBOL(ldlm_blocking_ast);
2084 EXPORT_SYMBOL(ldlm_glimpse_ast);
2085 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2086 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2087 EXPORT_SYMBOL(ldlm_cli_convert);
2088 EXPORT_SYMBOL(ldlm_cli_enqueue);
2089 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2090 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2091 EXPORT_SYMBOL(ldlm_cli_cancel);
2092 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2093 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2094 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2095 EXPORT_SYMBOL(ldlm_cli_join_lru);
2096 EXPORT_SYMBOL(ldlm_replay_locks);
2097 EXPORT_SYMBOL(ldlm_resource_foreach);
2098 EXPORT_SYMBOL(ldlm_namespace_foreach);
2099 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
2100 EXPORT_SYMBOL(ldlm_resource_iterate);
2101 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2102 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2105 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2106 EXPORT_SYMBOL(ldlm_server_completion_ast);
2107 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2108 EXPORT_SYMBOL(ldlm_handle_enqueue);
2109 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2110 EXPORT_SYMBOL(ldlm_handle_cancel);
2111 EXPORT_SYMBOL(ldlm_request_cancel);
2112 EXPORT_SYMBOL(ldlm_handle_convert);
2113 EXPORT_SYMBOL(ldlm_handle_convert0);
2114 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2115 EXPORT_SYMBOL(ldlm_get_ref);
2116 EXPORT_SYMBOL(ldlm_put_ref);
2117 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2118 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2120 /* ldlm_resource.c */
2121 EXPORT_SYMBOL(ldlm_namespace_new);
2122 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2123 EXPORT_SYMBOL(ldlm_namespace_free);
2124 EXPORT_SYMBOL(ldlm_namespace_dump);
2125 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2126 EXPORT_SYMBOL(ldlm_resource_get);
2127 EXPORT_SYMBOL(ldlm_resource_putref);
2128 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
2131 EXPORT_SYMBOL(client_import_add_conn);
2132 EXPORT_SYMBOL(client_import_del_conn);
2133 EXPORT_SYMBOL(client_obd_setup);
2134 EXPORT_SYMBOL(client_obd_cleanup);
2135 EXPORT_SYMBOL(client_connect_import);
2136 EXPORT_SYMBOL(client_disconnect_export);
2137 EXPORT_SYMBOL(target_start_recovery_thread);
2138 EXPORT_SYMBOL(target_stop_recovery_thread);
2139 EXPORT_SYMBOL(target_handle_connect);
2140 EXPORT_SYMBOL(target_cleanup_recovery);
2141 EXPORT_SYMBOL(target_destroy_export);
2142 EXPORT_SYMBOL(target_cancel_recovery_timer);
2143 EXPORT_SYMBOL(target_send_reply);
2144 EXPORT_SYMBOL(target_queue_recovery_request);
2145 EXPORT_SYMBOL(target_handle_ping);
2146 EXPORT_SYMBOL(target_pack_pool_reply);
2147 EXPORT_SYMBOL(target_handle_disconnect);
2150 EXPORT_SYMBOL(lock_res_and_lock);
2151 EXPORT_SYMBOL(unlock_res_and_lock);