1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002-2004 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
28 # define EXPORT_SYMTAB
30 #define DEBUG_SUBSYSTEM S_LDLM
33 # include <libcfs/libcfs.h>
35 # include <liblustre.h>
38 #include <lustre_dlm.h>
39 #include <obd_class.h>
40 #include <libcfs/list.h>
41 #include "ldlm_internal.h"
44 static int ldlm_num_threads;
45 CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
46 "number of DLM service threads to start");
49 extern cfs_mem_cache_t *ldlm_resource_slab;
50 extern cfs_mem_cache_t *ldlm_lock_slab;
51 static struct semaphore ldlm_ref_sem;
52 static int ldlm_refcount;
56 static struct ldlm_state *ldlm_state;
58 inline cfs_time_t round_timeout(cfs_time_t timeout)
60 return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
63 /* timeout for initial callback (AST) reply */
64 static inline unsigned int ldlm_get_rq_timeout(unsigned int ldlm_timeout,
65 unsigned int obd_timeout)
67 unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
69 return timeout < 1 ? 1 : timeout;
73 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
74 static spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
75 static struct list_head waiting_locks_list;
76 static cfs_timer_t waiting_locks_timer;
78 static struct expired_lock_thread {
79 cfs_waitq_t elt_waitq;
82 struct list_head elt_expired_locks;
83 } expired_lock_thread;
88 #define ELT_TERMINATE 2
94 * blp_prio_list is used for callbacks that should be handled
95 * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
98 struct list_head blp_prio_list;
101 * blp_list is used for all other callbacks which are likely
102 * to take longer to process.
104 struct list_head blp_list;
106 cfs_waitq_t blp_waitq;
107 struct completion blp_comp;
108 atomic_t blp_num_threads;
109 atomic_t blp_busy_threads;
114 struct ldlm_bl_work_item {
115 struct list_head blwi_entry;
116 struct ldlm_namespace *blwi_ns;
117 struct ldlm_lock_desc blwi_ld;
118 struct ldlm_lock *blwi_lock;
119 struct list_head blwi_head;
125 static inline int have_expired_locks(void)
130 spin_lock_bh(&waiting_locks_spinlock);
131 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
132 spin_unlock_bh(&waiting_locks_spinlock);
137 static int expired_lock_main(void *arg)
139 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
140 struct l_wait_info lwi = { 0 };
144 cfs_daemonize("ldlm_elt");
146 expired_lock_thread.elt_state = ELT_READY;
147 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
150 l_wait_event(expired_lock_thread.elt_waitq,
151 have_expired_locks() ||
152 expired_lock_thread.elt_state == ELT_TERMINATE,
155 spin_lock_bh(&waiting_locks_spinlock);
156 if (expired_lock_thread.elt_dump) {
157 spin_unlock_bh(&waiting_locks_spinlock);
159 /* from waiting_locks_callback, but not in timer */
160 libcfs_debug_dumplog();
161 libcfs_run_lbug_upcall(__FILE__,
162 "waiting_locks_callback",
163 expired_lock_thread.elt_dump);
165 spin_lock_bh(&waiting_locks_spinlock);
166 expired_lock_thread.elt_dump = 0;
171 while (!list_empty(expired)) {
172 struct obd_export *export;
173 struct ldlm_lock *lock;
175 lock = list_entry(expired->next, struct ldlm_lock,
177 if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
178 (void *)lock >= LP_POISON) {
179 spin_unlock_bh(&waiting_locks_spinlock);
180 CERROR("free lock on elt list %p\n", lock);
183 list_del_init(&lock->l_pending_chain);
184 if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
185 (void *)lock->l_export >= LP_POISON) {
186 CERROR("lock with free export on elt list %p\n",
188 lock->l_export = NULL;
189 LDLM_ERROR(lock, "free export");
192 export = class_export_get(lock->l_export);
193 spin_unlock_bh(&waiting_locks_spinlock);
196 class_fail_export(export);
197 class_export_put(export);
198 spin_lock_bh(&waiting_locks_spinlock);
200 spin_unlock_bh(&waiting_locks_spinlock);
202 if (do_dump && obd_dump_on_eviction) {
203 CERROR("dump the log upon eviction\n");
204 libcfs_debug_dumplog();
207 if (expired_lock_thread.elt_state == ELT_TERMINATE)
211 expired_lock_thread.elt_state = ELT_STOPPED;
212 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
216 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
218 /* This is called from within a timer interrupt and cannot schedule */
219 static void waiting_locks_callback(unsigned long unused)
221 struct ldlm_lock *lock, *last = NULL;
224 spin_lock_bh(&waiting_locks_spinlock);
225 while (!list_empty(&waiting_locks_list)) {
226 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
229 if (cfs_time_after(lock->l_callback_timeout, cfs_time_current()) ||
230 (lock->l_req_mode == LCK_GROUP))
233 if (ptlrpc_check_suspend()) {
234 /* there is a case when we talk to one mds, holding
235 * lock from another mds. this way we easily can get
236 * here, if second mds is being recovered. so, we
237 * suspend timeouts. bug 6019 */
239 LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
240 lock->l_export->exp_client_uuid.uuid,
241 lock->l_export->exp_connection->c_remote_uuid.uuid,
242 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
244 list_del_init(&lock->l_pending_chain);
245 spin_unlock_bh(&waiting_locks_spinlock);
246 ldlm_add_waiting_lock(lock);
250 /* if timeout overlaps the activation time of suspended timeouts
251 * then extend it to give a chance for client to reconnect */
252 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
253 cfs_time_seconds(obd_timeout)/2),
254 ptlrpc_suspend_wakeup_time())) {
255 LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
256 lock->l_export->exp_client_uuid.uuid,
257 lock->l_export->exp_connection->c_remote_uuid.uuid,
258 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
260 list_del_init(&lock->l_pending_chain);
261 spin_unlock_bh(&waiting_locks_spinlock);
262 ldlm_add_waiting_lock(lock);
266 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
268 lock->l_export->exp_client_uuid.uuid,
269 lock->l_export->exp_connection->c_remote_uuid.uuid,
270 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
274 list_del(&lock->l_pending_chain);
275 list_add(&lock->l_pending_chain,
276 &expired_lock_thread.elt_expired_locks);
279 if (!list_empty(&expired_lock_thread.elt_expired_locks)) {
280 if (obd_dump_on_timeout)
281 expired_lock_thread.elt_dump = __LINE__;
283 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
287 * Make sure the timer will fire again if we have any locks
290 if (!list_empty(&waiting_locks_list)) {
291 cfs_time_t timeout_rounded;
292 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
294 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
295 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
297 spin_unlock_bh(&waiting_locks_spinlock);
301 * Indicate that we're waiting for a client to call us back cancelling a given
302 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
303 * timer to fire appropriately. (We round up to the next second, to avoid
304 * floods of timer firings during periods of high lock contention and traffic).
306 * Called with the namespace lock held.
308 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock)
310 cfs_time_t timeout_rounded;
312 if (!list_empty(&lock->l_pending_chain))
315 lock->l_callback_timeout =cfs_time_add(cfs_time_current(),
316 cfs_time_seconds(obd_timeout)/2);
318 timeout_rounded = round_timeout(lock->l_callback_timeout);
320 if (cfs_time_before(timeout_rounded, cfs_timer_deadline(&waiting_locks_timer)) ||
321 !cfs_timer_is_armed(&waiting_locks_timer)) {
322 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
325 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
329 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
333 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
335 spin_lock_bh(&waiting_locks_spinlock);
336 if (lock->l_destroyed) {
337 static cfs_time_t next;
338 spin_unlock_bh(&waiting_locks_spinlock);
339 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
340 if (cfs_time_after(cfs_time_current(), next)) {
341 next = cfs_time_shift(14400);
342 libcfs_debug_dumpstack(NULL);
347 ret = __ldlm_add_waiting_lock(lock);
348 spin_unlock_bh(&waiting_locks_spinlock);
350 LDLM_DEBUG(lock, "%sadding to wait list",
351 ret == 0 ? "not re-" : "");
356 * Remove a lock from the pending list, likely because it had its cancellation
357 * callback arrive without incident. This adjusts the lock-timeout timer if
358 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
360 * Called with namespace lock held.
362 int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
364 struct list_head *list_next;
366 if (list_empty(&lock->l_pending_chain))
369 list_next = lock->l_pending_chain.next;
370 if (lock->l_pending_chain.prev == &waiting_locks_list) {
371 /* Removing the head of the list, adjust timer. */
372 if (list_next == &waiting_locks_list) {
373 /* No more, just cancel. */
374 cfs_timer_disarm(&waiting_locks_timer);
376 struct ldlm_lock *next;
377 next = list_entry(list_next, struct ldlm_lock,
379 cfs_timer_arm(&waiting_locks_timer,
380 round_timeout(next->l_callback_timeout));
383 list_del_init(&lock->l_pending_chain);
388 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
392 if (lock->l_export == NULL) {
393 /* We don't have a "waiting locks list" on clients. */
394 LDLM_DEBUG(lock, "client lock: no-op");
398 spin_lock_bh(&waiting_locks_spinlock);
399 ret = __ldlm_del_waiting_lock(lock);
400 spin_unlock_bh(&waiting_locks_spinlock);
402 LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
409 * Called with namespace lock held.
411 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
413 if (lock->l_export == NULL) {
414 /* We don't have a "waiting locks list" on clients. */
415 LDLM_DEBUG(lock, "client lock: no-op");
419 spin_lock_bh(&waiting_locks_spinlock);
421 if (list_empty(&lock->l_pending_chain)) {
422 spin_unlock_bh(&waiting_locks_spinlock);
423 LDLM_DEBUG(lock, "wasn't waiting");
427 __ldlm_del_waiting_lock(lock);
428 __ldlm_add_waiting_lock(lock);
429 spin_unlock_bh(&waiting_locks_spinlock);
431 LDLM_DEBUG(lock, "refreshed");
435 #else /* !__KERNEL__ */
437 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
439 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
443 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
448 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
452 #endif /* __KERNEL__ */
454 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
455 const char *ast_type)
457 struct ptlrpc_connection *conn = lock->l_export->exp_connection;
458 char *str = libcfs_nid2str(conn->c_peer.nid);
460 LCONSOLE_ERROR_MSG(0x138, "A client on nid %s was evicted from "
461 "service %s.\n", str,
462 lock->l_export->exp_obd->obd_name);
464 LCONSOLE_ERROR_MSG(0x012, "Lock %s callback to %s timed out for "
465 "resource %d\n", ast_type,
466 obd_export_nid2str(lock->l_export), rc);
468 if (obd_dump_on_timeout)
469 libcfs_debug_dumplog();
470 class_fail_export(lock->l_export);
473 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
474 struct ptlrpc_request *req, int rc,
475 const char *ast_type)
477 lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
479 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
480 LASSERT(lock->l_export);
481 if (lock->l_export->exp_libclient) {
482 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
483 " timeout, just cancelling lock", ast_type,
484 libcfs_nid2str(peer.nid));
485 ldlm_lock_cancel(lock);
487 } else if (lock->l_flags & LDLM_FL_CANCEL) {
488 LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
489 "cancel was received (AST reply lost?)",
490 ast_type, libcfs_nid2str(peer.nid));
491 ldlm_lock_cancel(lock);
494 ldlm_del_waiting_lock(lock);
495 ldlm_failed_ast(lock, rc, ast_type);
499 LDLM_DEBUG(lock, "client (nid %s) returned %d"
500 " from %s AST - normal race",
501 libcfs_nid2str(peer.nid),
503 lustre_msg_get_status(req->rq_repmsg) : -1,
506 LDLM_ERROR(lock, "client (nid %s) returned %d "
507 "from %s AST", libcfs_nid2str(peer.nid),
508 (req->rq_repmsg != NULL) ?
509 lustre_msg_get_status(req->rq_repmsg) : 0,
511 ldlm_lock_cancel(lock);
512 /* Server-side AST functions are called from ldlm_reprocess_all,
513 * which needs to be told to please restart its reprocessing. */
520 static int ldlm_cb_interpret(struct ptlrpc_request *req, void *data, int rc)
522 struct ldlm_cb_set_arg *arg;
523 struct ldlm_lock *lock;
526 LASSERT(data != NULL);
528 arg = req->rq_async_args.pointer_arg[0];
529 lock = req->rq_async_args.pointer_arg[1];
530 LASSERT(lock != NULL);
532 /* If client canceled the lock but the cancel has not
533 * been recieved yet, we need to update lvbo to have the
534 * proper attributes cached. */
535 if (rc == -EINVAL && arg->type == LDLM_BL_CALLBACK)
536 ldlm_res_lvbo_update(lock->l_resource, NULL,
538 rc = ldlm_handle_ast_error(lock, req, rc,
539 arg->type == LDLM_BL_CALLBACK
540 ? "blocking" : "completion");
546 atomic_set(&arg->restart, 1);
551 static inline int ldlm_bl_and_cp_ast_fini(struct ptlrpc_request *req,
552 struct ldlm_cb_set_arg *arg,
553 struct ldlm_lock *lock,
559 if (unlikely(instant_cancel)) {
560 rc = ptl_send_rpc(req, 1);
561 ptlrpc_req_finished(req);
563 /* If we cancelled the lock, we need to restart
564 * ldlm_reprocess_queue */
565 atomic_set(&arg->restart, 1);
568 ptlrpc_set_add_req(arg->set, req);
575 * ->l_blocking_ast() method for server-side locks. This is invoked when newly
576 * enqueued server lock conflicts with given one.
578 * Sends blocking ast rpc to the client owning that lock; arms timeout timer
579 * to wait for client response.
581 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
582 struct ldlm_lock_desc *desc,
583 void *data, int flag)
585 struct ldlm_cb_set_arg *arg = data;
586 struct ldlm_request *body;
587 struct ptlrpc_request *req;
588 int size[] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
589 [DLM_LOCKREQ_OFF] = sizeof(*body) };
590 int instant_cancel = 0, rc;
593 if (flag == LDLM_CB_CANCELING) {
594 /* Don't need to do anything here. */
599 LASSERT(data != NULL);
600 if (lock->l_export->exp_obd->obd_recovering != 0) {
601 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
602 ldlm_lock_dump(D_ERROR, lock, 0);
605 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
606 LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK, 2, size,
611 req->rq_async_args.pointer_arg[0] = arg;
612 req->rq_async_args.pointer_arg[1] = lock;
613 req->rq_interpret_reply = ldlm_cb_interpret;
614 req->rq_no_resend = 1;
616 lock_res(lock->l_resource);
617 if (lock->l_granted_mode != lock->l_req_mode) {
618 /* this blocking AST will be communicated as part of the
619 * completion AST instead */
620 unlock_res(lock->l_resource);
621 ptlrpc_req_finished(req);
622 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
626 if (lock->l_destroyed) {
627 /* What's the point? */
628 unlock_res(lock->l_resource);
629 ptlrpc_req_finished(req);
633 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
636 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
637 body->lock_handle[0] = lock->l_remote_handle;
638 body->lock_desc = *desc;
639 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
641 LDLM_DEBUG(lock, "server preparing blocking AST");
643 ptlrpc_req_set_repsize(req, 1, NULL);
644 if (instant_cancel) {
645 unlock_res(lock->l_resource);
646 ldlm_lock_cancel(lock);
648 LASSERT(lock->l_granted_mode == lock->l_req_mode);
649 ldlm_add_waiting_lock(lock);
650 unlock_res(lock->l_resource);
653 req->rq_send_state = LUSTRE_IMP_FULL;
654 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
656 if (lock->l_export && lock->l_export->exp_ldlm_stats)
657 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
658 LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
660 rc = ldlm_bl_and_cp_ast_fini(req, arg, lock, instant_cancel);
665 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
667 struct ldlm_cb_set_arg *arg = data;
668 struct ldlm_request *body;
669 struct ptlrpc_request *req;
670 struct timeval granted_time;
671 long total_enqueue_wait;
672 int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
673 [DLM_LOCKREQ_OFF] = sizeof(*body) };
674 int rc, buffers = 2, instant_cancel = 0;
677 LASSERT(lock != NULL);
678 LASSERT(data != NULL);
680 do_gettimeofday(&granted_time);
681 total_enqueue_wait = cfs_timeval_sub(&granted_time,
682 &lock->l_enqueued_time, NULL);
684 if (total_enqueue_wait / 1000000 > obd_timeout)
685 LDLM_ERROR(lock, "enqueue wait took %luus from %lu",
686 total_enqueue_wait, lock->l_enqueued_time.tv_sec);
688 lock_res_and_lock(lock);
689 if (lock->l_resource->lr_lvb_len) {
690 size[DLM_REQ_REC_OFF] = lock->l_resource->lr_lvb_len;
693 unlock_res_and_lock(lock);
695 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
696 LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK, buffers,
701 req->rq_async_args.pointer_arg[0] = arg;
702 req->rq_async_args.pointer_arg[1] = lock;
703 req->rq_interpret_reply = ldlm_cb_interpret;
704 req->rq_no_resend = 1;
706 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
707 body->lock_handle[0] = lock->l_remote_handle;
708 body->lock_flags = flags;
709 ldlm_lock2desc(lock, &body->lock_desc);
714 lvb = lustre_msg_buf(req->rq_reqmsg, DLM_REQ_REC_OFF,
715 lock->l_resource->lr_lvb_len);
716 lock_res_and_lock(lock);
717 memcpy(lvb, lock->l_resource->lr_lvb_data,
718 lock->l_resource->lr_lvb_len);
719 unlock_res_and_lock(lock);
722 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
725 ptlrpc_req_set_repsize(req, 1, NULL);
727 req->rq_send_state = LUSTRE_IMP_FULL;
728 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
730 /* We only send real blocking ASTs after the lock is granted */
731 lock_res_and_lock(lock);
732 if (lock->l_flags & LDLM_FL_AST_SENT) {
733 body->lock_flags |= LDLM_FL_AST_SENT;
735 /* We might get here prior to ldlm_handle_enqueue setting
736 * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
737 * into waiting list, but this is safe and similar code in
738 * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
739 * that would not only cancel the lock, but will also remove
740 * it from waiting list */
741 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
742 unlock_res_and_lock(lock);
743 ldlm_lock_cancel(lock);
745 lock_res_and_lock(lock);
747 /* start the lock-timeout clock */
748 ldlm_add_waiting_lock(lock);
751 unlock_res_and_lock(lock);
753 if (lock->l_export && lock->l_export->exp_ldlm_stats)
754 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
755 LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
757 rc = ldlm_bl_and_cp_ast_fini(req, arg, lock, instant_cancel);
762 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
764 struct ldlm_resource *res = lock->l_resource;
765 struct ldlm_request *body;
766 struct ptlrpc_request *req;
767 int size[] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
768 [DLM_LOCKREQ_OFF] = sizeof(*body) };
772 LASSERT(lock != NULL);
774 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
775 LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK, 2, size,
780 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
781 body->lock_handle[0] = lock->l_remote_handle;
782 ldlm_lock2desc(lock, &body->lock_desc);
784 lock_res_and_lock(lock);
785 size[REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
786 unlock_res_and_lock(lock);
787 res = lock->l_resource;
788 ptlrpc_req_set_repsize(req, 2, size);
790 req->rq_send_state = LUSTRE_IMP_FULL;
791 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
793 if (lock->l_export && lock->l_export->exp_ldlm_stats)
794 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
795 LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
797 rc = ptlrpc_queue_wait(req);
798 if (rc == -ELDLM_NO_LOCK_DATA)
799 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
801 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
803 rc = ldlm_res_lvbo_update(res, req->rq_repmsg,
805 ptlrpc_req_finished(req);
809 static struct ldlm_lock *
810 find_existing_lock(struct obd_export *exp,
811 const struct lustre_handle *remote_hdl)
813 struct list_head *iter;
815 spin_lock(&exp->exp_ldlm_data.led_lock);
816 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
817 struct ldlm_lock *lock;
818 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
819 if (lock->l_remote_handle.cookie == remote_hdl->cookie) {
821 spin_unlock(&exp->exp_ldlm_data.led_lock);
825 spin_unlock(&exp->exp_ldlm_data.led_lock);
830 extern unsigned long long lu_time_stamp_get(void);
832 #define lu_time_stamp_get() time(NULL)
836 * Main server-side entry point into LDLM. This is called by ptlrpc service
837 * threads to carry out client lock enqueueing requests.
839 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
840 struct ptlrpc_request *req,
841 const struct ldlm_request *dlm_req,
842 const struct ldlm_callback_suite *cbs)
844 struct ldlm_reply *dlm_rep;
845 int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
846 [DLM_LOCKREPLY_OFF] = sizeof(*dlm_rep) };
849 ldlm_error_t err = ELDLM_OK;
850 struct ldlm_lock *lock = NULL;
854 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
856 ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
857 flags = dlm_req->lock_flags;
859 LASSERT(req->rq_export);
861 if (req->rq_export->exp_ldlm_stats)
862 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
863 LDLM_ENQUEUE - LDLM_FIRST_OPC);
865 if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
866 dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
867 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
868 dlm_req->lock_desc.l_resource.lr_type);
869 GOTO(out, rc = -EFAULT);
872 if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
873 dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
874 dlm_req->lock_desc.l_req_mode &
875 (dlm_req->lock_desc.l_req_mode-1))) {
876 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
877 dlm_req->lock_desc.l_req_mode);
878 GOTO(out, rc = -EFAULT);
881 if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
882 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
884 DEBUG_REQ(D_ERROR, req,
885 "PLAIN lock request from IBITS client?");
886 GOTO(out, rc = -EPROTO);
888 } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
890 DEBUG_REQ(D_ERROR, req,
891 "IBITS lock request from unaware client?");
892 GOTO(out, rc = -EPROTO);
896 /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
897 against server's _CONNECT_SUPPORTED flags? (I don't want to use
898 ibits for mgc/mgs) */
900 /* INODEBITS_INTEROP: Perform conversion from plain lock to
901 * inodebits lock if client does not support them. */
902 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
903 (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
904 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
905 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
906 MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
907 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
908 dlm_req->lock_desc.l_req_mode = LCK_CR;
912 if (unlikely(flags & LDLM_FL_REPLAY)) {
913 lock = find_existing_lock(req->rq_export,
914 &dlm_req->lock_handle[0]);
916 DEBUG_REQ(D_DLMTRACE, req, "found existing lock cookie "
917 LPX64, lock->l_handle.h_cookie);
918 GOTO(existing_lock, rc = 0);
922 /* The lock's callback data might be set in the policy function */
923 lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
924 dlm_req->lock_desc.l_resource.lr_type,
925 dlm_req->lock_desc.l_req_mode,
926 cbs->lcs_blocking, cbs->lcs_completion,
927 cbs->lcs_glimpse, NULL, 0);
930 GOTO(out, rc = -ENOMEM);
932 do_gettimeofday(&lock->l_enqueued_time);
933 lock->l_remote_handle = dlm_req->lock_handle[0];
934 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
936 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
937 /* Don't enqueue a lock onto the export if it has already
938 * been evicted. Cancel it now instead. (bug 3822) */
939 if (req->rq_export->exp_failed) {
940 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
941 GOTO(out, rc = -ENOTCONN);
943 lock->l_export = class_export_get(req->rq_export);
944 spin_lock(&lock->l_export->exp_ldlm_data.led_lock);
945 list_add(&lock->l_export_chain,
946 &lock->l_export->exp_ldlm_data.led_held_locks);
947 spin_unlock(&lock->l_export->exp_ldlm_data.led_lock);
951 if (flags & LDLM_FL_HAS_INTENT) {
952 /* In this case, the reply buffer is allocated deep in
953 * local_lock_enqueue by the policy function. */
958 lock_res_and_lock(lock);
959 if (lock->l_resource->lr_lvb_len) {
960 size[DLM_REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
963 unlock_res_and_lock(lock);
965 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
966 GOTO(out, rc = -ENOMEM);
968 rc = lustre_pack_reply(req, buffers, size, NULL);
973 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
974 lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
975 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
976 lock->l_req_extent = lock->l_policy_data.l_extent;
978 err = ldlm_lock_enqueue(ns, &lock, cookie, (int *)&flags);
982 dlm_rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
984 dlm_rep->lock_flags = flags;
986 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
987 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
989 /* We never send a blocking AST until the lock is granted, but
990 * we can tell it right now */
991 lock_res_and_lock(lock);
993 /* Now take into account flags to be inherited from original lock
994 request both in reply to client and in our own lock flags. */
995 dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
996 lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
998 /* Don't move a pending lock onto the export if it has already
999 * been evicted. Cancel it now instead. (bug 5683) */
1000 if (unlikely(req->rq_export->exp_failed ||
1001 OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
1002 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
1004 } else if (lock->l_flags & LDLM_FL_AST_SENT) {
1005 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
1006 if (lock->l_granted_mode == lock->l_req_mode) {
1008 * Only cancel lock if it was granted, because it would
1009 * be destroyed immediatelly and would never be granted
1010 * in the future, causing timeouts on client. Not
1011 * granted lock will be cancelled immediatelly after
1012 * sending completion AST.
1014 if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
1015 unlock_res_and_lock(lock);
1016 ldlm_lock_cancel(lock);
1017 lock_res_and_lock(lock);
1019 ldlm_add_waiting_lock(lock);
1022 /* Make sure we never ever grant usual metadata locks to liblustre
1024 if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
1025 dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
1026 req->rq_export->exp_libclient) {
1027 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
1028 !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
1029 CERROR("Granting sync lock to libclient. "
1030 "req fl %d, rep fl %d, lock fl %d\n",
1031 dlm_req->lock_flags, dlm_rep->lock_flags,
1033 LDLM_ERROR(lock, "sync lock");
1034 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
1035 struct ldlm_intent *it;
1036 it = lustre_msg_buf(req->rq_reqmsg,
1040 CERROR("This is intent %s ("LPU64")\n",
1041 ldlm_it2str(it->opc), it->opc);
1047 unlock_res_and_lock(lock);
1051 req->rq_status = err;
1052 if (req->rq_reply_state == NULL) {
1053 err = lustre_pack_reply(req, 1, NULL, NULL);
1056 req->rq_status = rc;
1059 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1060 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
1062 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
1063 "(err=%d, rc=%d)", err, rc);
1065 lock_res_and_lock(lock);
1067 size[DLM_REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
1068 if (size[DLM_REPLY_REC_OFF] > 0) {
1069 void *lvb = lustre_msg_buf(req->rq_repmsg,
1071 size[DLM_REPLY_REC_OFF]);
1072 LASSERTF(lvb != NULL, "req %p, lock %p\n",
1075 memcpy(lvb, lock->l_resource->lr_lvb_data,
1076 size[DLM_REPLY_REC_OFF]);
1079 ldlm_resource_unlink_lock(lock);
1080 ldlm_lock_destroy_nolock(lock);
1082 unlock_res_and_lock(lock);
1084 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1085 ldlm_reprocess_all(lock->l_resource);
1087 LDLM_LOCK_PUT(lock);
1090 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1096 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1097 ldlm_completion_callback completion_callback,
1098 ldlm_blocking_callback blocking_callback,
1099 ldlm_glimpse_callback glimpse_callback)
1102 struct ldlm_request *dlm_req;
1103 struct ldlm_callback_suite cbs = {
1104 .lcs_completion = completion_callback,
1105 .lcs_blocking = blocking_callback,
1106 .lcs_glimpse = glimpse_callback
1110 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1111 sizeof *dlm_req, lustre_swab_ldlm_request);
1112 if (dlm_req != NULL) {
1113 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1114 req, dlm_req, &cbs);
1116 CERROR ("Can't unpack dlm_req\n");
1122 int ldlm_handle_convert0(struct ptlrpc_request *req,
1123 const struct ldlm_request *dlm_req)
1125 struct ldlm_reply *dlm_rep;
1126 struct ldlm_lock *lock;
1128 int size[2] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
1129 [DLM_LOCKREPLY_OFF] = sizeof(*dlm_rep) };
1132 if (req->rq_export && req->rq_export->exp_ldlm_stats)
1133 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
1134 LDLM_CONVERT - LDLM_FIRST_OPC);
1136 rc = lustre_pack_reply(req, 2, size, NULL);
1140 dlm_rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
1142 dlm_rep->lock_flags = dlm_req->lock_flags;
1144 lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1146 req->rq_status = EINVAL;
1150 LDLM_DEBUG(lock, "server-side convert handler START");
1152 do_gettimeofday(&lock->l_enqueued_time);
1153 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1154 &dlm_rep->lock_flags);
1156 if (ldlm_del_waiting_lock(lock))
1157 LDLM_DEBUG(lock, "converted waiting lock");
1160 req->rq_status = EDEADLOCK;
1165 if (!req->rq_status)
1166 ldlm_reprocess_all(lock->l_resource);
1167 LDLM_DEBUG(lock, "server-side convert handler END");
1168 LDLM_LOCK_PUT(lock);
1170 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1175 int ldlm_handle_convert(struct ptlrpc_request *req)
1178 struct ldlm_request *dlm_req;
1180 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof *dlm_req,
1181 lustre_swab_ldlm_request);
1182 if (dlm_req != NULL) {
1183 rc = ldlm_handle_convert0(req, dlm_req);
1185 CERROR ("Can't unpack dlm_req\n");
1191 /* Cancel all the locks, which handles are packed into ldlm_request */
1192 int ldlm_request_cancel(struct ptlrpc_request *req,
1193 const struct ldlm_request *dlm_req, int first)
1195 struct ldlm_resource *res, *pres = NULL;
1196 struct ldlm_lock *lock;
1197 int i, count, done = 0;
1200 count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1204 /* There is no lock on the server at the replay time,
1205 * skip lock cancelling to make replay tests to pass. */
1206 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1209 LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
1210 "starting at %d", count, first);
1212 for (i = first; i < count; i++) {
1213 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1215 LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1216 "lock (cookie "LPU64")",
1217 dlm_req->lock_handle[i].cookie);
1221 res = lock->l_resource;
1226 ldlm_reprocess_all(pres);
1227 ldlm_resource_putref(pres);
1230 ldlm_resource_getref(res);
1231 ldlm_res_lvbo_update(res, NULL, 0, 1);
1235 ldlm_lock_cancel(lock);
1236 LDLM_LOCK_PUT(lock);
1239 ldlm_reprocess_all(pres);
1240 ldlm_resource_putref(pres);
1242 LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1246 int ldlm_handle_cancel(struct ptlrpc_request *req)
1248 struct ldlm_request *dlm_req;
1252 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof(*dlm_req),
1253 lustre_swab_ldlm_request);
1254 if (dlm_req == NULL) {
1255 CERROR("bad request buffer for cancel\n");
1259 if (req->rq_export && req->rq_export->exp_ldlm_stats)
1260 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
1261 LDLM_CANCEL - LDLM_FIRST_OPC);
1263 rc = lustre_pack_reply(req, 1, NULL, NULL);
1267 if (!ldlm_request_cancel(req, dlm_req, 0))
1268 req->rq_status = ESTALE;
1270 if (ptlrpc_reply(req) != 0)
1276 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1277 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1282 LDLM_DEBUG(lock, "client blocking AST callback handler START");
1284 lock_res_and_lock(lock);
1285 lock->l_flags |= LDLM_FL_CBPENDING;
1287 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1288 lock->l_flags |= LDLM_FL_CANCEL;
1290 do_ast = (!lock->l_readers && !lock->l_writers);
1291 unlock_res_and_lock(lock);
1294 LDLM_DEBUG(lock, "already unused, calling "
1295 "callback (%p)", lock->l_blocking_ast);
1296 if (lock->l_blocking_ast != NULL)
1297 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1300 LDLM_DEBUG(lock, "Lock still has references, will be"
1301 " cancelled later");
1304 LDLM_DEBUG(lock, "client blocking callback handler END");
1305 LDLM_LOCK_PUT(lock);
1309 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1310 struct ldlm_namespace *ns,
1311 struct ldlm_request *dlm_req,
1312 struct ldlm_lock *lock)
1314 CFS_LIST_HEAD(ast_list);
1317 LDLM_DEBUG(lock, "client completion callback handler START");
1319 lock_res_and_lock(lock);
1321 /* If we receive the completion AST before the actual enqueue returned,
1322 * then we might need to switch lock modes, resources, or extents. */
1323 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1324 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1325 LDLM_DEBUG(lock, "completion AST, new lock mode");
1328 if (lock->l_resource->lr_type != LDLM_PLAIN) {
1329 lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
1330 LDLM_DEBUG(lock, "completion AST, new policy data");
1333 ldlm_resource_unlink_lock(lock);
1334 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1335 &lock->l_resource->lr_name,
1336 sizeof(lock->l_resource->lr_name)) != 0) {
1337 unlock_res_and_lock(lock);
1338 ldlm_lock_change_resource(ns, lock,
1339 &dlm_req->lock_desc.l_resource.lr_name);
1340 LDLM_DEBUG(lock, "completion AST, new resource");
1341 CERROR("change resource!\n");
1342 lock_res_and_lock(lock);
1345 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1346 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
1347 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1350 if (lock->l_lvb_len) {
1352 lvb = lustre_swab_reqbuf(req, DLM_REQ_REC_OFF, lock->l_lvb_len,
1353 lock->l_lvb_swabber);
1355 LDLM_ERROR(lock, "completion AST did not contain "
1358 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1362 ldlm_grant_lock(lock, &ast_list);
1363 unlock_res_and_lock(lock);
1365 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1367 ldlm_run_cp_ast_work(&ast_list);
1369 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1371 LDLM_LOCK_PUT(lock);
1375 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1376 struct ldlm_namespace *ns,
1377 struct ldlm_request *dlm_req,
1378 struct ldlm_lock *lock)
1383 LDLM_DEBUG(lock, "client glimpse AST callback handler");
1385 if (lock->l_glimpse_ast != NULL)
1386 rc = lock->l_glimpse_ast(lock, req);
1388 if (req->rq_repmsg != NULL) {
1391 req->rq_status = rc;
1395 lock_res_and_lock(lock);
1396 if (lock->l_granted_mode == LCK_PW &&
1397 !lock->l_readers && !lock->l_writers &&
1398 cfs_time_after(cfs_time_current(),
1399 cfs_time_add(lock->l_last_used,
1400 cfs_time_seconds(10)))) {
1401 unlock_res_and_lock(lock);
1402 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
1403 ldlm_handle_bl_callback(ns, NULL, lock);
1408 unlock_res_and_lock(lock);
1409 LDLM_LOCK_PUT(lock);
1413 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1415 req->rq_status = rc;
1416 if (req->rq_reply_state == NULL) {
1417 rc = lustre_pack_reply(req, 1, NULL, NULL);
1421 return ptlrpc_reply(req);
1425 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
1426 struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
1427 struct list_head *cancels, int count)
1429 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1430 struct ldlm_bl_work_item *blwi;
1433 if (cancels && count == 0)
1436 OBD_ALLOC(blwi, sizeof(*blwi));
1442 blwi->blwi_ld = *ld;
1444 list_add(&blwi->blwi_head, cancels);
1445 list_del_init(cancels);
1446 blwi->blwi_count = count;
1448 blwi->blwi_lock = lock;
1450 spin_lock(&blp->blp_lock);
1451 if (lock && lock->l_flags & LDLM_FL_DISCARD_DATA) {
1452 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
1453 list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
1455 /* other blocking callbacks are added to the regular list */
1456 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1458 cfs_waitq_signal(&blp->blp_waitq);
1459 spin_unlock(&blp->blp_lock);
1465 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1466 struct ldlm_lock *lock)
1469 RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0));
1475 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1476 struct list_head *cancels, int count)
1479 RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count));
1485 static int ldlm_callback_handler(struct ptlrpc_request *req)
1487 struct ldlm_namespace *ns;
1488 struct ldlm_request *dlm_req;
1489 struct ldlm_lock *lock;
1493 /* Requests arrive in sender's byte order. The ptlrpc service
1494 * handler has already checked and, if necessary, byte-swapped the
1495 * incoming request message body, but I am responsible for the
1496 * message buffers. */
1498 if (req->rq_export == NULL) {
1499 struct ldlm_request *dlm_req;
1501 CDEBUG(D_RPCTRACE, "operation %d from %s with bad "
1502 "export cookie "LPX64"; this is "
1503 "normal if this node rebooted with a lock held\n",
1504 lustre_msg_get_opc(req->rq_reqmsg),
1505 libcfs_id2str(req->rq_peer),
1506 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
1508 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1510 lustre_swab_ldlm_request);
1511 if (dlm_req != NULL)
1512 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
1513 dlm_req->lock_handle[0].cookie);
1515 ldlm_callback_reply(req, -ENOTCONN);
1519 LASSERT(req->rq_export != NULL);
1520 LASSERT(req->rq_export->exp_obd != NULL);
1522 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1523 case LDLM_BL_CALLBACK:
1524 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK))
1527 case LDLM_CP_CALLBACK:
1528 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK))
1531 case LDLM_GL_CALLBACK:
1532 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK))
1535 case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
1536 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
1538 rc = llog_origin_handle_cancel(req);
1539 ldlm_callback_reply(req, rc);
1541 case OBD_QC_CALLBACK:
1542 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
1544 rc = target_handle_qc_callback(req);
1545 ldlm_callback_reply(req, rc);
1549 /* reply in handler */
1550 rc = target_handle_dqacq_callback(req);
1552 case LLOG_ORIGIN_HANDLE_CREATE:
1553 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1555 rc = llog_origin_handle_create(req);
1556 ldlm_callback_reply(req, rc);
1558 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1559 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1561 rc = llog_origin_handle_next_block(req);
1562 ldlm_callback_reply(req, rc);
1564 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1565 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1567 rc = llog_origin_handle_read_header(req);
1568 ldlm_callback_reply(req, rc);
1570 case LLOG_ORIGIN_HANDLE_CLOSE:
1571 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1573 rc = llog_origin_handle_close(req);
1574 ldlm_callback_reply(req, rc);
1580 CERROR("unknown opcode %u\n",
1581 lustre_msg_get_opc(req->rq_reqmsg));
1582 ldlm_callback_reply(req, -EPROTO);
1586 ns = req->rq_export->exp_obd->obd_namespace;
1587 LASSERT(ns != NULL);
1589 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof(*dlm_req),
1590 lustre_swab_ldlm_request);
1591 if (dlm_req == NULL) {
1592 CERROR ("can't unpack dlm_req\n");
1593 ldlm_callback_reply(req, -EPROTO);
1597 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle[0]);
1599 CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
1600 "disappeared\n", dlm_req->lock_handle[0].cookie);
1601 ldlm_callback_reply(req, -EINVAL);
1605 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1606 lock_res_and_lock(lock);
1607 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1608 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
1609 /* If somebody cancels locks and cache is already droped,
1610 * we can tell the server we have no lock. Otherwise, we
1611 * should send cancel after dropping the cache. */
1612 if ((lock->l_flags & LDLM_FL_CANCELING) &&
1613 (lock->l_flags & LDLM_FL_BL_DONE)) {
1614 LDLM_DEBUG(lock, "callback on lock "
1615 LPX64" - lock disappeared\n",
1616 dlm_req->lock_handle[0].cookie);
1617 unlock_res_and_lock(lock);
1618 LDLM_LOCK_PUT(lock);
1619 ldlm_callback_reply(req, -EINVAL);
1622 lock->l_flags |= LDLM_FL_BL_AST;
1624 unlock_res_and_lock(lock);
1626 /* We want the ost thread to get this reply so that it can respond
1627 * to ost requests (write cache writeback) that might be triggered
1630 * But we'd also like to be able to indicate in the reply that we're
1631 * cancelling right now, because it's unused, or have an intent result
1632 * in the reply, so we might have to push the responsibility for sending
1633 * the reply down into the AST handlers, alas. */
1635 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1636 case LDLM_BL_CALLBACK:
1637 CDEBUG(D_INODE, "blocking ast\n");
1638 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK))
1639 ldlm_callback_reply(req, 0);
1640 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
1641 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
1643 case LDLM_CP_CALLBACK:
1644 CDEBUG(D_INODE, "completion ast\n");
1645 ldlm_callback_reply(req, 0);
1646 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
1648 case LDLM_GL_CALLBACK:
1649 CDEBUG(D_INODE, "glimpse ast\n");
1650 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
1653 LBUG(); /* checked above */
1659 static int ldlm_cancel_handler(struct ptlrpc_request *req)
1664 /* Requests arrive in sender's byte order. The ptlrpc service
1665 * handler has already checked and, if necessary, byte-swapped the
1666 * incoming request message body, but I am responsible for the
1667 * message buffers. */
1669 if (req->rq_export == NULL) {
1670 struct ldlm_request *dlm_req;
1672 CERROR("operation %d from %s with bad export cookie "LPU64"\n",
1673 lustre_msg_get_opc(req->rq_reqmsg),
1674 libcfs_id2str(req->rq_peer),
1675 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
1677 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1679 lustre_swab_ldlm_request);
1680 if (dlm_req != NULL)
1681 ldlm_lock_dump_handle(D_ERROR,
1682 &dlm_req->lock_handle[0]);
1683 ldlm_callback_reply(req, -ENOTCONN);
1687 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1689 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1691 CDEBUG(D_INODE, "cancel\n");
1692 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL))
1694 rc = ldlm_handle_cancel(req);
1698 case OBD_LOG_CANCEL:
1699 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
1701 rc = llog_origin_handle_cancel(req);
1702 ldlm_callback_reply(req, rc);
1705 CERROR("invalid opcode %d\n",
1706 lustre_msg_get_opc(req->rq_reqmsg));
1707 ldlm_callback_reply(req, -EINVAL);
1713 void ldlm_revoke_export_locks(struct obd_export *exp)
1715 struct list_head *locklist = &exp->exp_ldlm_data.led_held_locks;
1716 struct list_head rpc_list;
1717 struct ldlm_lock *lock, *next;
1718 struct ldlm_lock_desc desc;
1721 INIT_LIST_HEAD(&rpc_list);
1723 spin_lock(&exp->exp_ldlm_data.led_lock);
1724 list_for_each_entry_safe(lock, next, locklist, l_export_chain) {
1725 lock_res_and_lock(lock);
1727 if (lock->l_req_mode != lock->l_granted_mode) {
1728 unlock_res_and_lock(lock);
1732 LASSERT(lock->l_resource);
1733 if (lock->l_resource->lr_type != LDLM_IBITS &&
1734 lock->l_resource->lr_type != LDLM_PLAIN) {
1735 unlock_res_and_lock(lock);
1739 if (lock->l_flags & LDLM_FL_AST_SENT) {
1740 unlock_res_and_lock(lock);
1744 LASSERT(lock->l_blocking_ast);
1745 LASSERT(!lock->l_blocking_lock);
1747 lock->l_flags |= LDLM_FL_AST_SENT;
1748 list_move(&lock->l_export_chain, &rpc_list);
1750 unlock_res_and_lock(lock);
1752 spin_unlock(&exp->exp_ldlm_data.led_lock);
1754 while (!list_empty(&rpc_list)) {
1755 lock = list_entry(rpc_list.next, struct ldlm_lock,
1757 list_del_init(&lock->l_export_chain);
1759 /* the desc just pretend to exclusive */
1760 ldlm_lock2desc(lock, &desc);
1761 desc.l_req_mode = LCK_EX;
1762 desc.l_granted_mode = 0;
1764 LDLM_LOCK_GET(lock);
1765 lock->l_blocking_ast(lock, &desc, lock->l_ast_data,
1767 LDLM_LOCK_PUT(lock);
1773 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1775 struct ldlm_bl_work_item *blwi = NULL;
1776 static unsigned int num_bl = 0;
1778 spin_lock(&blp->blp_lock);
1779 /* process a request from the blp_list at least every blp_num_threads */
1780 if (!list_empty(&blp->blp_list) &&
1781 (list_empty(&blp->blp_prio_list) || num_bl == 0))
1782 blwi = list_entry(blp->blp_list.next,
1783 struct ldlm_bl_work_item, blwi_entry);
1785 if (!list_empty(&blp->blp_prio_list))
1786 blwi = list_entry(blp->blp_prio_list.next,
1787 struct ldlm_bl_work_item, blwi_entry);
1790 if (++num_bl >= atomic_read(&blp->blp_num_threads))
1792 list_del(&blwi->blwi_entry);
1794 spin_unlock(&blp->blp_lock);
1799 /* This only contains temporary data until the thread starts */
1800 struct ldlm_bl_thread_data {
1801 char bltd_name[CFS_CURPROC_COMM_MAX];
1802 struct ldlm_bl_pool *bltd_blp;
1803 struct completion bltd_comp;
1807 static int ldlm_bl_thread_main(void *arg);
1809 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
1811 struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
1814 init_completion(&bltd.bltd_comp);
1815 rc = cfs_kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1817 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
1818 atomic_read(&blp->blp_num_threads), rc);
1821 wait_for_completion(&bltd.bltd_comp);
1826 static int ldlm_bl_thread_main(void *arg)
1828 struct ldlm_bl_pool *blp;
1832 struct ldlm_bl_thread_data *bltd = arg;
1834 blp = bltd->bltd_blp;
1836 bltd->bltd_num = atomic_inc_return(&blp->blp_num_threads) - 1;
1837 atomic_inc(&blp->blp_busy_threads);
1839 snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
1840 "ldlm_bl_%02d", bltd->bltd_num);
1841 cfs_daemonize(bltd->bltd_name);
1843 complete(&bltd->bltd_comp);
1844 /* cannot use bltd after this, it is only on caller's stack */
1848 struct l_wait_info lwi = { 0 };
1849 struct ldlm_bl_work_item *blwi = NULL;
1851 blwi = ldlm_bl_get_work(blp);
1856 atomic_dec(&blp->blp_busy_threads);
1857 l_wait_event_exclusive(blp->blp_waitq,
1858 (blwi = ldlm_bl_get_work(blp)) != NULL,
1860 busy = atomic_inc_return(&blp->blp_busy_threads);
1862 if (blwi->blwi_ns == NULL)
1863 /* added by ldlm_cleanup() */
1866 /* Not fatal if racy and have a few too many threads */
1867 if (unlikely(busy < blp->blp_max_threads &&
1868 busy >= atomic_read(&blp->blp_num_threads)))
1869 /* discard the return value, we tried */
1870 ldlm_bl_thread_start(blp);
1872 if (blwi->blwi_ns == NULL)
1873 /* added by ldlm_cleanup() */
1877 if (blwi->blwi_count) {
1878 /* The special case when we cancel locks in lru
1879 * asynchronously, we pass the list of locks here.
1880 * Thus lock is marked LDLM_FL_CANCELING, and already
1881 * canceled locally. */
1882 ldlm_cli_cancel_list(&blwi->blwi_head,
1883 blwi->blwi_count, NULL, 0, 0);
1885 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1888 OBD_FREE(blwi, sizeof(*blwi));
1891 atomic_dec(&blp->blp_busy_threads);
1892 atomic_dec(&blp->blp_num_threads);
1893 complete(&blp->blp_comp);
1899 static int ldlm_setup(void);
1900 static int ldlm_cleanup(int force);
1902 int ldlm_get_ref(void)
1906 mutex_down(&ldlm_ref_sem);
1907 if (++ldlm_refcount == 1) {
1912 mutex_up(&ldlm_ref_sem);
1917 void ldlm_put_ref(int force)
1920 mutex_down(&ldlm_ref_sem);
1921 if (ldlm_refcount == 1) {
1922 int rc = ldlm_cleanup(force);
1924 CERROR("ldlm_cleanup failed: %d\n", rc);
1930 mutex_up(&ldlm_ref_sem);
1935 static int ldlm_setup(void)
1937 struct ldlm_bl_pool *blp;
1939 int ldlm_min_threads = LDLM_THREADS_AUTO_MIN;
1940 int ldlm_max_threads = LDLM_THREADS_AUTO_MAX;
1946 if (ldlm_state != NULL)
1949 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
1950 if (ldlm_state == NULL)
1954 rc = ldlm_proc_setup();
1960 if (ldlm_num_threads) {
1961 /* If ldlm_num_threads is set, it is the min and the max. */
1962 if (ldlm_num_threads > LDLM_THREADS_AUTO_MAX)
1963 ldlm_num_threads = LDLM_THREADS_AUTO_MAX;
1964 if (ldlm_num_threads < LDLM_THREADS_AUTO_MIN)
1965 ldlm_num_threads = LDLM_THREADS_AUTO_MIN;
1966 ldlm_min_threads = ldlm_max_threads = ldlm_num_threads;
1970 ldlm_state->ldlm_cb_service =
1971 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1972 LDLM_MAXREPSIZE, LDLM_CB_REQUEST_PORTAL,
1973 LDLM_CB_REPLY_PORTAL, ldlm_timeout * 900,
1974 ldlm_callback_handler, "ldlm_cbd",
1975 ldlm_svc_proc_dir, NULL,
1976 ldlm_min_threads, ldlm_max_threads,
1978 LCT_MD_THREAD|LCT_DT_THREAD);
1980 if (!ldlm_state->ldlm_cb_service) {
1981 CERROR("failed to start service\n");
1982 GOTO(out_proc, rc = -ENOMEM);
1985 ldlm_state->ldlm_cancel_service =
1986 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1987 LDLM_MAXREPSIZE, LDLM_CANCEL_REQUEST_PORTAL,
1988 LDLM_CANCEL_REPLY_PORTAL, ldlm_timeout * 6000,
1989 ldlm_cancel_handler, "ldlm_canceld",
1990 ldlm_svc_proc_dir, NULL,
1991 ldlm_min_threads, ldlm_max_threads,
1993 LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD);
1995 if (!ldlm_state->ldlm_cancel_service) {
1996 CERROR("failed to start service\n");
1997 GOTO(out_proc, rc = -ENOMEM);
2000 OBD_ALLOC(blp, sizeof(*blp));
2002 GOTO(out_proc, rc = -ENOMEM);
2003 ldlm_state->ldlm_bl_pool = blp;
2005 spin_lock_init(&blp->blp_lock);
2006 CFS_INIT_LIST_HEAD(&blp->blp_list);
2007 CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
2008 cfs_waitq_init(&blp->blp_waitq);
2009 atomic_set(&blp->blp_num_threads, 0);
2010 atomic_set(&blp->blp_busy_threads, 0);
2011 blp->blp_min_threads = ldlm_min_threads;
2012 blp->blp_max_threads = ldlm_max_threads;
2015 for (i = 0; i < blp->blp_min_threads; i++) {
2016 rc = ldlm_bl_thread_start(blp);
2018 GOTO(out_thread, rc);
2021 rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cancel_service);
2023 GOTO(out_thread, rc);
2025 rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cb_service);
2027 GOTO(out_thread, rc);
2029 CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
2030 expired_lock_thread.elt_state = ELT_STOPPED;
2031 cfs_waitq_init(&expired_lock_thread.elt_waitq);
2033 CFS_INIT_LIST_HEAD(&waiting_locks_list);
2034 spin_lock_init(&waiting_locks_spinlock);
2035 cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
2037 rc = cfs_kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FILES);
2039 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
2040 GOTO(out_thread, rc);
2043 wait_event(expired_lock_thread.elt_waitq,
2044 expired_lock_thread.elt_state == ELT_READY);
2048 rc = ldlm_pools_init();
2050 GOTO(out_thread, rc);
2056 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2057 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2062 ldlm_proc_cleanup();
2065 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
2070 static int ldlm_cleanup(int force)
2073 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
2077 if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
2078 !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
2079 CERROR("ldlm still has namespaces; clean these up first.\n");
2080 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
2081 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
2090 while (atomic_read(&blp->blp_num_threads) > 0) {
2091 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
2093 init_completion(&blp->blp_comp);
2095 spin_lock(&blp->blp_lock);
2096 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
2097 cfs_waitq_signal(&blp->blp_waitq);
2098 spin_unlock(&blp->blp_lock);
2100 wait_for_completion(&blp->blp_comp);
2102 OBD_FREE(blp, sizeof(*blp));
2104 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2105 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2106 ldlm_proc_cleanup();
2108 expired_lock_thread.elt_state = ELT_TERMINATE;
2109 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
2110 wait_event(expired_lock_thread.elt_waitq,
2111 expired_lock_thread.elt_state == ELT_STOPPED);
2113 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2114 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2117 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
2123 int __init ldlm_init(void)
2125 init_mutex(&ldlm_ref_sem);
2126 init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
2127 init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
2128 ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
2129 sizeof(struct ldlm_resource), 0,
2130 SLAB_HWCACHE_ALIGN);
2131 if (ldlm_resource_slab == NULL)
2134 ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
2135 sizeof(struct ldlm_lock), 0,
2136 SLAB_HWCACHE_ALIGN);
2137 if (ldlm_lock_slab == NULL) {
2138 cfs_mem_cache_destroy(ldlm_resource_slab);
2145 void __exit ldlm_exit(void)
2149 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
2150 rc = cfs_mem_cache_destroy(ldlm_resource_slab);
2151 LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
2152 rc = cfs_mem_cache_destroy(ldlm_lock_slab);
2153 LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
2157 EXPORT_SYMBOL(ldlm_extent_shift_kms);
2160 EXPORT_SYMBOL(ldlm_get_processing_policy);
2161 EXPORT_SYMBOL(ldlm_lock2desc);
2162 EXPORT_SYMBOL(ldlm_register_intent);
2163 EXPORT_SYMBOL(ldlm_lockname);
2164 EXPORT_SYMBOL(ldlm_typename);
2165 EXPORT_SYMBOL(ldlm_lock2handle);
2166 EXPORT_SYMBOL(__ldlm_handle2lock);
2167 EXPORT_SYMBOL(ldlm_lock_get);
2168 EXPORT_SYMBOL(ldlm_lock_put);
2169 EXPORT_SYMBOL(ldlm_lock_match);
2170 EXPORT_SYMBOL(ldlm_lock_cancel);
2171 EXPORT_SYMBOL(ldlm_lock_addref);
2172 EXPORT_SYMBOL(ldlm_lock_decref);
2173 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
2174 EXPORT_SYMBOL(ldlm_lock_change_resource);
2175 EXPORT_SYMBOL(ldlm_lock_set_data);
2176 EXPORT_SYMBOL(ldlm_it2str);
2177 EXPORT_SYMBOL(ldlm_lock_dump);
2178 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2179 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
2180 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
2181 EXPORT_SYMBOL(ldlm_lock_allow_match);
2183 /* ldlm_request.c */
2184 EXPORT_SYMBOL(ldlm_completion_ast);
2185 EXPORT_SYMBOL(ldlm_blocking_ast);
2186 EXPORT_SYMBOL(ldlm_glimpse_ast);
2187 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2188 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2189 EXPORT_SYMBOL(ldlm_prep_elc_req);
2190 EXPORT_SYMBOL(ldlm_cli_convert);
2191 EXPORT_SYMBOL(ldlm_cli_enqueue);
2192 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2193 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2194 EXPORT_SYMBOL(ldlm_cli_cancel);
2195 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2196 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2197 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2198 EXPORT_SYMBOL(ldlm_cli_join_lru);
2199 EXPORT_SYMBOL(ldlm_replay_locks);
2200 EXPORT_SYMBOL(ldlm_resource_foreach);
2201 EXPORT_SYMBOL(ldlm_namespace_foreach);
2202 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
2203 EXPORT_SYMBOL(ldlm_resource_iterate);
2204 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2205 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2208 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2209 EXPORT_SYMBOL(ldlm_server_completion_ast);
2210 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2211 EXPORT_SYMBOL(ldlm_handle_enqueue);
2212 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2213 EXPORT_SYMBOL(ldlm_handle_cancel);
2214 EXPORT_SYMBOL(ldlm_request_cancel);
2215 EXPORT_SYMBOL(ldlm_handle_convert);
2216 EXPORT_SYMBOL(ldlm_handle_convert0);
2217 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2218 EXPORT_SYMBOL(ldlm_get_ref);
2219 EXPORT_SYMBOL(ldlm_put_ref);
2220 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2221 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2223 /* ldlm_resource.c */
2224 EXPORT_SYMBOL(ldlm_namespace_new);
2225 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2226 EXPORT_SYMBOL(ldlm_namespace_free);
2227 EXPORT_SYMBOL(ldlm_namespace_dump);
2228 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2229 EXPORT_SYMBOL(ldlm_resource_get);
2230 EXPORT_SYMBOL(ldlm_resource_putref);
2231 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
2234 EXPORT_SYMBOL(client_import_add_conn);
2235 EXPORT_SYMBOL(client_import_del_conn);
2236 EXPORT_SYMBOL(client_obd_setup);
2237 EXPORT_SYMBOL(client_obd_cleanup);
2238 EXPORT_SYMBOL(client_connect_import);
2239 EXPORT_SYMBOL(client_disconnect_export);
2240 EXPORT_SYMBOL(target_start_recovery_thread);
2241 EXPORT_SYMBOL(target_stop_recovery_thread);
2242 EXPORT_SYMBOL(target_handle_connect);
2243 EXPORT_SYMBOL(target_cleanup_recovery);
2244 EXPORT_SYMBOL(target_destroy_export);
2245 EXPORT_SYMBOL(target_cancel_recovery_timer);
2246 EXPORT_SYMBOL(target_send_reply);
2247 EXPORT_SYMBOL(target_queue_recovery_request);
2248 EXPORT_SYMBOL(target_handle_ping);
2249 EXPORT_SYMBOL(target_pack_pool_reply);
2250 EXPORT_SYMBOL(target_handle_disconnect);
2253 EXPORT_SYMBOL(lock_res_and_lock);
2254 EXPORT_SYMBOL(unlock_res_and_lock);