1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002-2004 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
28 # define EXPORT_SYMTAB
30 #define DEBUG_SUBSYSTEM S_LDLM
33 # include <libcfs/libcfs.h>
35 # include <liblustre.h>
38 #include <lustre_dlm.h>
39 #include <obd_class.h>
40 #include <libcfs/list.h>
41 #include "ldlm_internal.h"
43 extern cfs_mem_cache_t *ldlm_resource_slab;
44 extern cfs_mem_cache_t *ldlm_lock_slab;
45 extern struct list_head ldlm_namespace_list;
47 extern struct semaphore ldlm_namespace_lock;
48 static struct semaphore ldlm_ref_sem;
49 static int ldlm_refcount;
53 static struct ldlm_state *ldlm_state;
55 inline cfs_time_t round_timeout(cfs_time_t timeout)
57 return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
60 /* timeout for initial callback (AST) reply */
61 static inline unsigned int ldlm_get_rq_timeout(unsigned int ldlm_timeout,
62 unsigned int obd_timeout)
64 unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
66 return timeout < 1 ? 1 : timeout;
70 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
71 static spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
72 static struct list_head waiting_locks_list;
73 static cfs_timer_t waiting_locks_timer;
75 static struct expired_lock_thread {
76 cfs_waitq_t elt_waitq;
79 struct list_head elt_expired_locks;
80 } expired_lock_thread;
85 #define ELT_TERMINATE 2
89 struct list_head blp_list;
90 cfs_waitq_t blp_waitq;
91 atomic_t blp_num_threads;
92 struct completion blp_comp;
95 struct ldlm_bl_work_item {
96 struct list_head blwi_entry;
97 struct ldlm_namespace *blwi_ns;
98 struct ldlm_lock_desc blwi_ld;
99 struct ldlm_lock *blwi_lock;
100 struct list_head blwi_head;
106 static inline int have_expired_locks(void)
111 spin_lock_bh(&waiting_locks_spinlock);
112 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
113 spin_unlock_bh(&waiting_locks_spinlock);
118 static int expired_lock_main(void *arg)
120 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
121 struct l_wait_info lwi = { 0 };
125 cfs_daemonize("ldlm_elt");
127 expired_lock_thread.elt_state = ELT_READY;
128 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
131 l_wait_event(expired_lock_thread.elt_waitq,
132 have_expired_locks() ||
133 expired_lock_thread.elt_state == ELT_TERMINATE,
136 spin_lock_bh(&waiting_locks_spinlock);
137 if (expired_lock_thread.elt_dump) {
138 spin_unlock_bh(&waiting_locks_spinlock);
140 /* from waiting_locks_callback, but not in timer */
141 libcfs_debug_dumplog();
142 libcfs_run_lbug_upcall(__FILE__,
143 "waiting_locks_callback",
144 expired_lock_thread.elt_dump);
146 spin_lock_bh(&waiting_locks_spinlock);
147 expired_lock_thread.elt_dump = 0;
152 while (!list_empty(expired)) {
153 struct obd_export *export;
154 struct ldlm_lock *lock;
156 lock = list_entry(expired->next, struct ldlm_lock,
158 if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
159 (void *)lock >= LP_POISON) {
160 spin_unlock_bh(&waiting_locks_spinlock);
161 CERROR("free lock on elt list %p\n", lock);
164 list_del_init(&lock->l_pending_chain);
165 if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
166 (void *)lock->l_export >= LP_POISON) {
167 CERROR("lock with free export on elt list %p\n",
169 lock->l_export = NULL;
170 LDLM_ERROR(lock, "free export");
173 export = class_export_get(lock->l_export);
174 spin_unlock_bh(&waiting_locks_spinlock);
177 class_fail_export(export);
178 class_export_put(export);
179 spin_lock_bh(&waiting_locks_spinlock);
181 spin_unlock_bh(&waiting_locks_spinlock);
183 if (do_dump && obd_dump_on_eviction) {
184 CERROR("dump the log upon eviction\n");
185 libcfs_debug_dumplog();
188 if (expired_lock_thread.elt_state == ELT_TERMINATE)
192 expired_lock_thread.elt_state = ELT_STOPPED;
193 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
197 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
199 /* This is called from within a timer interrupt and cannot schedule */
200 static void waiting_locks_callback(unsigned long unused)
202 struct ldlm_lock *lock, *last = NULL;
205 spin_lock_bh(&waiting_locks_spinlock);
206 while (!list_empty(&waiting_locks_list)) {
207 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
210 if (cfs_time_after(lock->l_callback_timeout, cfs_time_current()) ||
211 (lock->l_req_mode == LCK_GROUP))
214 if (ptlrpc_check_suspend()) {
215 /* there is a case when we talk to one mds, holding
216 * lock from another mds. this way we easily can get
217 * here, if second mds is being recovered. so, we
218 * suspend timeouts. bug 6019 */
220 LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
221 lock->l_export->exp_client_uuid.uuid,
222 lock->l_export->exp_connection->c_remote_uuid.uuid,
223 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
225 list_del_init(&lock->l_pending_chain);
226 spin_unlock_bh(&waiting_locks_spinlock);
227 ldlm_add_waiting_lock(lock);
231 /* if timeout overlaps the activation time of suspended timeouts
232 * then extend it to give a chance for client to reconnect */
233 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
234 cfs_time_seconds(obd_timeout)/2),
235 ptlrpc_suspend_wakeup_time())) {
236 LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
237 lock->l_export->exp_client_uuid.uuid,
238 lock->l_export->exp_connection->c_remote_uuid.uuid,
239 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
241 list_del_init(&lock->l_pending_chain);
242 spin_unlock_bh(&waiting_locks_spinlock);
243 ldlm_add_waiting_lock(lock);
247 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
249 lock->l_export->exp_client_uuid.uuid,
250 lock->l_export->exp_connection->c_remote_uuid.uuid,
251 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
255 list_del(&lock->l_pending_chain);
256 list_add(&lock->l_pending_chain,
257 &expired_lock_thread.elt_expired_locks);
260 if (!list_empty(&expired_lock_thread.elt_expired_locks)) {
261 if (obd_dump_on_timeout)
262 expired_lock_thread.elt_dump = __LINE__;
264 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
268 * Make sure the timer will fire again if we have any locks
271 if (!list_empty(&waiting_locks_list)) {
272 cfs_time_t timeout_rounded;
273 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
275 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
276 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
278 spin_unlock_bh(&waiting_locks_spinlock);
282 * Indicate that we're waiting for a client to call us back cancelling a given
283 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
284 * timer to fire appropriately. (We round up to the next second, to avoid
285 * floods of timer firings during periods of high lock contention and traffic).
287 * Called with the namespace lock held.
289 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock)
291 cfs_time_t timeout_rounded;
293 if (!list_empty(&lock->l_pending_chain))
296 lock->l_callback_timeout =cfs_time_add(cfs_time_current(),
297 cfs_time_seconds(obd_timeout)/2);
299 timeout_rounded = round_timeout(lock->l_callback_timeout);
301 if (cfs_time_before(timeout_rounded, cfs_timer_deadline(&waiting_locks_timer)) ||
302 !cfs_timer_is_armed(&waiting_locks_timer)) {
303 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
306 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
310 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
314 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
316 spin_lock_bh(&waiting_locks_spinlock);
317 if (lock->l_destroyed) {
318 static cfs_time_t next;
319 spin_unlock_bh(&waiting_locks_spinlock);
320 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
321 if (cfs_time_after(cfs_time_current(), next)) {
322 next = cfs_time_shift(14400);
323 libcfs_debug_dumpstack(NULL);
328 ret = __ldlm_add_waiting_lock(lock);
329 spin_unlock_bh(&waiting_locks_spinlock);
331 LDLM_DEBUG(lock, "%sadding to wait list",
332 ret == 0 ? "not re-" : "");
337 * Remove a lock from the pending list, likely because it had its cancellation
338 * callback arrive without incident. This adjusts the lock-timeout timer if
339 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
341 * Called with namespace lock held.
343 int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
345 struct list_head *list_next;
347 if (list_empty(&lock->l_pending_chain))
350 list_next = lock->l_pending_chain.next;
351 if (lock->l_pending_chain.prev == &waiting_locks_list) {
352 /* Removing the head of the list, adjust timer. */
353 if (list_next == &waiting_locks_list) {
354 /* No more, just cancel. */
355 cfs_timer_disarm(&waiting_locks_timer);
357 struct ldlm_lock *next;
358 next = list_entry(list_next, struct ldlm_lock,
360 cfs_timer_arm(&waiting_locks_timer,
361 round_timeout(next->l_callback_timeout));
364 list_del_init(&lock->l_pending_chain);
369 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
373 if (lock->l_export == NULL) {
374 /* We don't have a "waiting locks list" on clients. */
375 LDLM_DEBUG(lock, "client lock: no-op");
379 spin_lock_bh(&waiting_locks_spinlock);
380 ret = __ldlm_del_waiting_lock(lock);
381 spin_unlock_bh(&waiting_locks_spinlock);
383 LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
390 * Called with namespace lock held.
392 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
394 if (lock->l_export == NULL) {
395 /* We don't have a "waiting locks list" on clients. */
396 LDLM_DEBUG(lock, "client lock: no-op");
400 spin_lock_bh(&waiting_locks_spinlock);
402 if (list_empty(&lock->l_pending_chain)) {
403 spin_unlock_bh(&waiting_locks_spinlock);
404 LDLM_DEBUG(lock, "wasn't waiting");
408 __ldlm_del_waiting_lock(lock);
409 __ldlm_add_waiting_lock(lock);
410 spin_unlock_bh(&waiting_locks_spinlock);
412 LDLM_DEBUG(lock, "refreshed");
416 #else /* !__KERNEL__ */
418 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
420 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
424 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
429 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
433 #endif /* __KERNEL__ */
435 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
436 const char *ast_type)
438 struct ptlrpc_connection *conn = lock->l_export->exp_connection;
439 char *str = libcfs_nid2str(conn->c_peer.nid);
441 LCONSOLE_ERROR_MSG(0x138, "A client on nid %s was evicted from "
442 "service %s.\n", str,
443 lock->l_export->exp_obd->obd_name);
445 LCONSOLE_ERROR_MSG(0x012, "Lock %s callback to %s timed out for "
446 "resource %d\n", ast_type,
447 obd_export_nid2str(lock->l_export), rc);
449 if (obd_dump_on_timeout)
450 libcfs_debug_dumplog();
451 class_fail_export(lock->l_export);
454 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
455 struct ptlrpc_request *req, int rc,
456 const char *ast_type)
458 lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
460 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
461 LASSERT(lock->l_export);
462 if (lock->l_export->exp_libclient) {
463 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
464 " timeout, just cancelling lock", ast_type,
465 libcfs_nid2str(peer.nid));
466 ldlm_lock_cancel(lock);
468 } else if (lock->l_flags & LDLM_FL_CANCEL) {
469 LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
470 "cancel was received (AST reply lost?)",
471 ast_type, libcfs_nid2str(peer.nid));
472 ldlm_lock_cancel(lock);
475 ldlm_del_waiting_lock(lock);
476 ldlm_failed_ast(lock, rc, ast_type);
480 LDLM_DEBUG(lock, "client (nid %s) returned %d"
481 " from %s AST - normal race",
482 libcfs_nid2str(peer.nid),
484 lustre_msg_get_status(req->rq_repmsg) : -1,
487 LDLM_ERROR(lock, "client (nid %s) returned %d "
488 "from %s AST", libcfs_nid2str(peer.nid),
489 (req->rq_repmsg != NULL) ?
490 lustre_msg_get_status(req->rq_repmsg) : 0,
492 ldlm_lock_cancel(lock);
493 /* Server-side AST functions are called from ldlm_reprocess_all,
494 * which needs to be told to please restart its reprocessing. */
502 * ->l_blocking_ast() method for server-side locks. This is invoked when newly
503 * enqueued server lock conflicts with given one.
505 * Sends blocking ast rpc to the client owning that lock; arms timeout timer
506 * to wait for client response.
508 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
509 struct ldlm_lock_desc *desc,
510 void *data, int flag)
512 struct ldlm_request *body;
513 struct ptlrpc_request *req;
514 int size[] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
515 [DLM_LOCKREQ_OFF] = sizeof(*body) };
516 int instant_cancel = 0, rc = 0;
519 if (flag == LDLM_CB_CANCELING) {
520 /* Don't need to do anything here. */
525 if (lock->l_export->exp_obd->obd_recovering != 0) {
526 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
527 ldlm_lock_dump(D_ERROR, lock, 0);
530 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
531 LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK, 2, size,
536 lock_res(lock->l_resource);
537 if (lock->l_granted_mode != lock->l_req_mode) {
538 /* this blocking AST will be communicated as part of the
539 * completion AST instead */
540 unlock_res(lock->l_resource);
541 ptlrpc_req_finished(req);
542 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
546 if (lock->l_destroyed) {
547 /* What's the point? */
548 unlock_res(lock->l_resource);
549 ptlrpc_req_finished(req);
553 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
556 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
557 body->lock_handle[0] = lock->l_remote_handle;
558 body->lock_desc = *desc;
559 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
561 LDLM_DEBUG(lock, "server preparing blocking AST");
563 ptlrpc_req_set_repsize(req, 1, NULL);
564 if (instant_cancel) {
565 unlock_res(lock->l_resource);
566 ldlm_lock_cancel(lock);
568 LASSERT(lock->l_granted_mode == lock->l_req_mode);
569 ldlm_add_waiting_lock(lock);
570 unlock_res(lock->l_resource);
573 req->rq_send_state = LUSTRE_IMP_FULL;
574 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
576 if (lock->l_export && lock->l_export->exp_ldlm_stats)
577 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
578 LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
580 if (unlikely(instant_cancel)) {
581 rc = ptl_send_rpc(req, 1);
583 rc = ptlrpc_queue_wait(req);
584 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2);
587 /* If client canceled the lock but the cancel has not been
588 * recieved yet, we need to update lvbo to have the proper
589 * attributes cached. */
591 ldlm_res_lvbo_update(lock->l_resource, NULL, 0, 1);
592 rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
595 ptlrpc_req_finished(req);
597 /* If we cancelled the lock, we need to restart ldlm_reprocess_queue */
598 if (!rc && instant_cancel)
604 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
606 struct ldlm_request *body;
607 struct ptlrpc_request *req;
608 struct timeval granted_time;
609 long total_enqueue_wait;
610 int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
611 [DLM_LOCKREQ_OFF] = sizeof(*body) };
612 int rc = 0, buffers = 2, instant_cancel = 0;
615 LASSERT(lock != NULL);
617 do_gettimeofday(&granted_time);
618 total_enqueue_wait = cfs_timeval_sub(&granted_time,
619 &lock->l_enqueued_time, NULL);
621 if (total_enqueue_wait / 1000000 > obd_timeout)
622 LDLM_ERROR(lock, "enqueue wait took %luus from %lu",
623 total_enqueue_wait, lock->l_enqueued_time.tv_sec);
625 lock_res_and_lock(lock);
626 if (lock->l_resource->lr_lvb_len) {
627 size[DLM_REQ_REC_OFF] = lock->l_resource->lr_lvb_len;
630 unlock_res_and_lock(lock);
632 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
633 LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK, buffers,
638 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
639 body->lock_handle[0] = lock->l_remote_handle;
640 body->lock_flags = flags;
641 ldlm_lock2desc(lock, &body->lock_desc);
646 lvb = lustre_msg_buf(req->rq_reqmsg, DLM_REQ_REC_OFF,
647 lock->l_resource->lr_lvb_len);
648 lock_res_and_lock(lock);
649 memcpy(lvb, lock->l_resource->lr_lvb_data,
650 lock->l_resource->lr_lvb_len);
651 unlock_res_and_lock(lock);
654 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
657 ptlrpc_req_set_repsize(req, 1, NULL);
659 req->rq_send_state = LUSTRE_IMP_FULL;
660 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
662 /* We only send real blocking ASTs after the lock is granted */
663 lock_res_and_lock(lock);
664 if (lock->l_flags & LDLM_FL_AST_SENT) {
665 body->lock_flags |= LDLM_FL_AST_SENT;
667 /* We might get here prior to ldlm_handle_enqueue setting
668 * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
669 * into waiting list, but this is safe and similar code in
670 * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
671 * that would not only cancel the lock, but will also remove
672 * it from waiting list */
673 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
674 unlock_res_and_lock(lock);
675 ldlm_lock_cancel(lock);
677 lock_res_and_lock(lock);
679 /* start the lock-timeout clock */
680 ldlm_add_waiting_lock(lock);
683 unlock_res_and_lock(lock);
685 if (lock->l_export && lock->l_export->exp_ldlm_stats)
686 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
687 LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
689 rc = ptlrpc_queue_wait(req);
691 rc = ldlm_handle_ast_error(lock, req, rc, "completion");
693 ptlrpc_req_finished(req);
695 /* If we cancelled the lock, we need to restart ldlm_reprocess_queue */
696 if (!rc && instant_cancel)
702 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
704 struct ldlm_resource *res = lock->l_resource;
705 struct ldlm_request *body;
706 struct ptlrpc_request *req;
707 int size[] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
708 [DLM_LOCKREQ_OFF] = sizeof(*body) };
712 LASSERT(lock != NULL);
714 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
715 LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK, 2, size,
720 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
721 body->lock_handle[0] = lock->l_remote_handle;
722 ldlm_lock2desc(lock, &body->lock_desc);
724 lock_res_and_lock(lock);
725 size[REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
726 unlock_res_and_lock(lock);
727 res = lock->l_resource;
728 ptlrpc_req_set_repsize(req, 2, size);
730 req->rq_send_state = LUSTRE_IMP_FULL;
731 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
733 if (lock->l_export && lock->l_export->exp_ldlm_stats)
734 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
735 LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
737 rc = ptlrpc_queue_wait(req);
738 if (rc == -ELDLM_NO_LOCK_DATA)
739 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
741 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
743 rc = ldlm_res_lvbo_update(res, req->rq_repmsg,
745 ptlrpc_req_finished(req);
749 static struct ldlm_lock *
750 find_existing_lock(struct obd_export *exp,
751 const struct lustre_handle *remote_hdl)
753 struct list_head *iter;
755 spin_lock(&exp->exp_ldlm_data.led_lock);
756 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
757 struct ldlm_lock *lock;
758 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
759 if (lock->l_remote_handle.cookie == remote_hdl->cookie) {
761 spin_unlock(&exp->exp_ldlm_data.led_lock);
765 spin_unlock(&exp->exp_ldlm_data.led_lock);
770 extern unsigned long long lu_time_stamp_get(void);
772 #define lu_time_stamp_get() time(NULL)
776 * Main server-side entry point into LDLM. This is called by ptlrpc service
777 * threads to carry out client lock enqueueing requests.
779 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
780 struct ptlrpc_request *req,
781 const struct ldlm_request *dlm_req,
782 const struct ldlm_callback_suite *cbs)
784 struct ldlm_reply *dlm_rep;
785 int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
786 [DLM_LOCKREPLY_OFF] = sizeof(*dlm_rep) };
789 ldlm_error_t err = ELDLM_OK;
790 struct ldlm_lock *lock = NULL;
794 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
796 ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
797 flags = dlm_req->lock_flags;
799 LASSERT(req->rq_export);
801 if (req->rq_export->exp_ldlm_stats)
802 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
803 LDLM_ENQUEUE - LDLM_FIRST_OPC);
805 if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
806 dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
807 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
808 dlm_req->lock_desc.l_resource.lr_type);
809 GOTO(out, rc = -EFAULT);
812 if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
813 dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
814 dlm_req->lock_desc.l_req_mode &
815 (dlm_req->lock_desc.l_req_mode-1))) {
816 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
817 dlm_req->lock_desc.l_req_mode);
818 GOTO(out, rc = -EFAULT);
821 if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
822 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
824 DEBUG_REQ(D_ERROR, req,
825 "PLAIN lock request from IBITS client?");
826 GOTO(out, rc = -EPROTO);
828 } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
830 DEBUG_REQ(D_ERROR, req,
831 "IBITS lock request from unaware client?");
832 GOTO(out, rc = -EPROTO);
836 /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
837 against server's _CONNECT_SUPPORTED flags? (I don't want to use
838 ibits for mgc/mgs) */
840 /* INODEBITS_INTEROP: Perform conversion from plain lock to
841 * inodebits lock if client does not support them. */
842 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
843 (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
844 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
845 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
846 MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
847 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
848 dlm_req->lock_desc.l_req_mode = LCK_CR;
852 if (unlikely(flags & LDLM_FL_REPLAY)) {
853 lock = find_existing_lock(req->rq_export,
854 &dlm_req->lock_handle[0]);
856 DEBUG_REQ(D_HA, req, "found existing lock cookie "LPX64,
857 lock->l_handle.h_cookie);
858 GOTO(existing_lock, rc = 0);
862 /* The lock's callback data might be set in the policy function */
863 lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
864 dlm_req->lock_desc.l_resource.lr_type,
865 dlm_req->lock_desc.l_req_mode,
866 cbs->lcs_blocking, cbs->lcs_completion,
867 cbs->lcs_glimpse, NULL, 0);
870 GOTO(out, rc = -ENOMEM);
872 do_gettimeofday(&lock->l_enqueued_time);
873 lock->l_remote_handle = dlm_req->lock_handle[0];
874 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
876 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
877 /* Don't enqueue a lock onto the export if it has already
878 * been evicted. Cancel it now instead. (bug 3822) */
879 if (req->rq_export->exp_failed) {
880 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
881 GOTO(out, rc = -ENOTCONN);
883 lock->l_export = class_export_get(req->rq_export);
884 spin_lock(&lock->l_export->exp_ldlm_data.led_lock);
885 list_add(&lock->l_export_chain,
886 &lock->l_export->exp_ldlm_data.led_held_locks);
887 spin_unlock(&lock->l_export->exp_ldlm_data.led_lock);
891 if (flags & LDLM_FL_HAS_INTENT) {
892 /* In this case, the reply buffer is allocated deep in
893 * local_lock_enqueue by the policy function. */
898 lock_res_and_lock(lock);
899 if (lock->l_resource->lr_lvb_len) {
900 size[DLM_REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
903 unlock_res_and_lock(lock);
905 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
906 GOTO(out, rc = -ENOMEM);
908 rc = lustre_pack_reply(req, buffers, size, NULL);
913 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
914 lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
915 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
916 lock->l_req_extent = lock->l_policy_data.l_extent;
918 err = ldlm_lock_enqueue(ns, &lock, cookie, &flags);
922 dlm_rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
924 dlm_rep->lock_flags = flags;
926 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
927 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
929 /* We never send a blocking AST until the lock is granted, but
930 * we can tell it right now */
931 lock_res_and_lock(lock);
933 /* Now take into account flags to be inherited from original lock
934 request both in reply to client and in our own lock flags. */
935 dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
936 lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
938 /* Don't move a pending lock onto the export if it has already
939 * been evicted. Cancel it now instead. (bug 5683) */
940 if (unlikely(req->rq_export->exp_failed ||
941 OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
942 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
944 } else if (lock->l_flags & LDLM_FL_AST_SENT) {
945 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
946 if (lock->l_granted_mode == lock->l_req_mode) {
948 * Only cancel lock if it was granted, because it would
949 * be destroyed immediatelly and would never be granted
950 * in the future, causing timeouts on client. Not
951 * granted lock will be cancelled immediatelly after
952 * sending completion AST.
954 if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
955 unlock_res_and_lock(lock);
956 ldlm_lock_cancel(lock);
957 lock_res_and_lock(lock);
959 ldlm_add_waiting_lock(lock);
962 /* Make sure we never ever grant usual metadata locks to liblustre
964 if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
965 dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
966 req->rq_export->exp_libclient) {
967 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
968 !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
969 CERROR("Granting sync lock to libclient. "
970 "req fl %d, rep fl %d, lock fl %d\n",
971 dlm_req->lock_flags, dlm_rep->lock_flags,
973 LDLM_ERROR(lock, "sync lock");
974 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
975 struct ldlm_intent *it;
976 it = lustre_msg_buf(req->rq_reqmsg,
980 CERROR("This is intent %s ("LPU64")\n",
981 ldlm_it2str(it->opc), it->opc);
987 unlock_res_and_lock(lock);
991 req->rq_status = err;
992 if (req->rq_reply_state == NULL) {
993 err = lustre_pack_reply(req, 1, NULL, NULL);
999 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1000 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
1002 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
1003 "(err=%d, rc=%d)", err, rc);
1005 lock_res_and_lock(lock);
1007 size[DLM_REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
1008 if (size[DLM_REPLY_REC_OFF] > 0) {
1009 void *lvb = lustre_msg_buf(req->rq_repmsg,
1011 size[DLM_REPLY_REC_OFF]);
1012 LASSERTF(lvb != NULL, "req %p, lock %p\n",
1015 memcpy(lvb, lock->l_resource->lr_lvb_data,
1016 size[DLM_REPLY_REC_OFF]);
1019 ldlm_resource_unlink_lock(lock);
1020 ldlm_lock_destroy_nolock(lock);
1022 unlock_res_and_lock(lock);
1024 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1025 ldlm_reprocess_all(lock->l_resource);
1027 LDLM_LOCK_PUT(lock);
1030 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1036 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1037 ldlm_completion_callback completion_callback,
1038 ldlm_blocking_callback blocking_callback,
1039 ldlm_glimpse_callback glimpse_callback)
1042 struct ldlm_request *dlm_req;
1043 struct ldlm_callback_suite cbs = {
1044 .lcs_completion = completion_callback,
1045 .lcs_blocking = blocking_callback,
1046 .lcs_glimpse = glimpse_callback
1050 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1051 sizeof *dlm_req, lustre_swab_ldlm_request);
1052 if (dlm_req != NULL) {
1053 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1054 req, dlm_req, &cbs);
1056 CERROR ("Can't unpack dlm_req\n");
1062 int ldlm_handle_convert0(struct ptlrpc_request *req,
1063 const struct ldlm_request *dlm_req)
1065 struct ldlm_reply *dlm_rep;
1066 struct ldlm_lock *lock;
1068 int size[2] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
1069 [DLM_LOCKREPLY_OFF] = sizeof(*dlm_rep) };
1072 if (req->rq_export && req->rq_export->exp_ldlm_stats)
1073 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
1074 LDLM_CONVERT - LDLM_FIRST_OPC);
1076 rc = lustre_pack_reply(req, 2, size, NULL);
1078 CERROR("out of memory\n");
1081 dlm_rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
1083 dlm_rep->lock_flags = dlm_req->lock_flags;
1085 lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1087 req->rq_status = EINVAL;
1091 LDLM_DEBUG(lock, "server-side convert handler START");
1093 do_gettimeofday(&lock->l_enqueued_time);
1094 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1095 &dlm_rep->lock_flags);
1097 if (ldlm_del_waiting_lock(lock))
1098 LDLM_DEBUG(lock, "converted waiting lock");
1101 req->rq_status = EDEADLOCK;
1106 if (!req->rq_status)
1107 ldlm_reprocess_all(lock->l_resource);
1108 LDLM_DEBUG(lock, "server-side convert handler END");
1109 LDLM_LOCK_PUT(lock);
1111 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1116 int ldlm_handle_convert(struct ptlrpc_request *req)
1119 struct ldlm_request *dlm_req;
1121 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof *dlm_req,
1122 lustre_swab_ldlm_request);
1123 if (dlm_req != NULL) {
1124 rc = ldlm_handle_convert0(req, dlm_req);
1126 CERROR ("Can't unpack dlm_req\n");
1132 /* Cancel all the locks, which handles are packed into ldlm_request */
1133 int ldlm_request_cancel(struct ptlrpc_request *req,
1134 const struct ldlm_request *dlm_req, int first)
1136 struct ldlm_resource *res, *pres = NULL;
1137 struct ldlm_lock *lock;
1138 int i, count, done = 0;
1141 LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
1142 "starting at %d", dlm_req->lock_count, first);
1143 count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1147 /* There is no lock on the server at the replay time,
1148 * skip lock cancelling to make replay tests to pass. */
1149 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1152 for (i = first; i < count; i++) {
1153 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1155 LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1156 "lock (cookie "LPU64")",
1157 dlm_req->lock_handle[i].cookie);
1161 res = lock->l_resource;
1166 ldlm_reprocess_all(pres);
1167 ldlm_resource_putref(pres);
1170 ldlm_resource_getref(res);
1171 ldlm_res_lvbo_update(res, NULL, 0, 1);
1175 ldlm_lock_cancel(lock);
1176 LDLM_LOCK_PUT(lock);
1179 ldlm_reprocess_all(pres);
1180 ldlm_resource_putref(pres);
1182 LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1186 int ldlm_handle_cancel(struct ptlrpc_request *req)
1188 struct ldlm_request *dlm_req;
1192 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof(*dlm_req),
1193 lustre_swab_ldlm_request);
1194 if (dlm_req == NULL) {
1195 CERROR("bad request buffer for cancel\n");
1199 if (req->rq_export && req->rq_export->exp_ldlm_stats)
1200 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
1201 LDLM_CANCEL - LDLM_FIRST_OPC);
1203 rc = lustre_pack_reply(req, 1, NULL, NULL);
1205 CERROR("out of memory\n");
1209 if (!ldlm_request_cancel(req, dlm_req, 0))
1210 req->rq_status = ESTALE;
1212 if (ptlrpc_reply(req) != 0)
1218 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1219 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1224 LDLM_DEBUG(lock, "client blocking AST callback handler START");
1226 lock_res_and_lock(lock);
1227 lock->l_flags |= LDLM_FL_CBPENDING;
1229 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1230 lock->l_flags |= LDLM_FL_CANCEL;
1232 do_ast = (!lock->l_readers && !lock->l_writers);
1233 unlock_res_and_lock(lock);
1236 LDLM_DEBUG(lock, "already unused, calling "
1237 "callback (%p)", lock->l_blocking_ast);
1238 if (lock->l_blocking_ast != NULL)
1239 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1242 LDLM_DEBUG(lock, "Lock still has references, will be"
1243 " cancelled later");
1246 LDLM_DEBUG(lock, "client blocking callback handler END");
1247 LDLM_LOCK_PUT(lock);
1251 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1252 struct ldlm_namespace *ns,
1253 struct ldlm_request *dlm_req,
1254 struct ldlm_lock *lock)
1256 CFS_LIST_HEAD(ast_list);
1259 LDLM_DEBUG(lock, "client completion callback handler START");
1261 lock_res_and_lock(lock);
1263 /* If we receive the completion AST before the actual enqueue returned,
1264 * then we might need to switch lock modes, resources, or extents. */
1265 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1266 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1267 LDLM_DEBUG(lock, "completion AST, new lock mode");
1270 if (lock->l_resource->lr_type != LDLM_PLAIN) {
1271 lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
1272 LDLM_DEBUG(lock, "completion AST, new policy data");
1275 ldlm_resource_unlink_lock(lock);
1276 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1277 &lock->l_resource->lr_name,
1278 sizeof(lock->l_resource->lr_name)) != 0) {
1279 unlock_res_and_lock(lock);
1280 ldlm_lock_change_resource(ns, lock,
1281 &dlm_req->lock_desc.l_resource.lr_name);
1282 LDLM_DEBUG(lock, "completion AST, new resource");
1283 CERROR("change resource!\n");
1284 lock_res_and_lock(lock);
1287 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1288 lock->l_flags |= LDLM_FL_CBPENDING;
1289 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1292 if (lock->l_lvb_len) {
1294 lvb = lustre_swab_reqbuf(req, DLM_REQ_REC_OFF, lock->l_lvb_len,
1295 lock->l_lvb_swabber);
1297 LDLM_ERROR(lock, "completion AST did not contain "
1300 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1304 ldlm_grant_lock(lock, &ast_list);
1305 unlock_res_and_lock(lock);
1307 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1309 ldlm_run_cp_ast_work(&ast_list);
1311 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1313 LDLM_LOCK_PUT(lock);
1317 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1318 struct ldlm_namespace *ns,
1319 struct ldlm_request *dlm_req,
1320 struct ldlm_lock *lock)
1325 LDLM_DEBUG(lock, "client glimpse AST callback handler");
1327 if (lock->l_glimpse_ast != NULL)
1328 rc = lock->l_glimpse_ast(lock, req);
1330 if (req->rq_repmsg != NULL) {
1333 req->rq_status = rc;
1337 lock_res_and_lock(lock);
1338 if (lock->l_granted_mode == LCK_PW &&
1339 !lock->l_readers && !lock->l_writers &&
1340 cfs_time_after(cfs_time_current(),
1341 cfs_time_add(lock->l_last_used,
1342 cfs_time_seconds(10)))) {
1343 unlock_res_and_lock(lock);
1344 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
1345 ldlm_handle_bl_callback(ns, NULL, lock);
1350 unlock_res_and_lock(lock);
1351 LDLM_LOCK_PUT(lock);
1355 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1357 req->rq_status = rc;
1358 if (req->rq_reply_state == NULL) {
1359 rc = lustre_pack_reply(req, 1, NULL, NULL);
1363 return ptlrpc_reply(req);
1367 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
1368 struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
1369 struct list_head *cancels, int count)
1371 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1372 struct ldlm_bl_work_item *blwi;
1375 if (cancels && count == 0)
1378 OBD_ALLOC(blwi, sizeof(*blwi));
1384 blwi->blwi_ld = *ld;
1386 list_add(&blwi->blwi_head, cancels);
1387 list_del_init(cancels);
1388 blwi->blwi_count = count;
1390 blwi->blwi_lock = lock;
1392 spin_lock(&blp->blp_lock);
1393 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1394 cfs_waitq_signal(&blp->blp_waitq);
1395 spin_unlock(&blp->blp_lock);
1401 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1402 struct ldlm_lock *lock)
1405 RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0));
1411 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1412 struct list_head *cancels, int count)
1415 RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count));
1421 static int ldlm_callback_handler(struct ptlrpc_request *req)
1423 struct ldlm_namespace *ns;
1424 struct ldlm_request *dlm_req;
1425 struct ldlm_lock *lock;
1429 /* Requests arrive in sender's byte order. The ptlrpc service
1430 * handler has already checked and, if necessary, byte-swapped the
1431 * incoming request message body, but I am responsible for the
1432 * message buffers. */
1434 if (req->rq_export == NULL) {
1435 struct ldlm_request *dlm_req;
1437 CDEBUG(D_RPCTRACE, "operation %d from %s with bad "
1438 "export cookie "LPX64"; this is "
1439 "normal if this node rebooted with a lock held\n",
1440 lustre_msg_get_opc(req->rq_reqmsg),
1441 libcfs_id2str(req->rq_peer),
1442 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
1444 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1446 lustre_swab_ldlm_request);
1447 if (dlm_req != NULL)
1448 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
1449 dlm_req->lock_handle[0].cookie);
1451 ldlm_callback_reply(req, -ENOTCONN);
1455 LASSERT(req->rq_export != NULL);
1456 LASSERT(req->rq_export->exp_obd != NULL);
1458 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1459 case LDLM_BL_CALLBACK:
1460 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1462 case LDLM_CP_CALLBACK:
1463 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
1465 case LDLM_GL_CALLBACK:
1466 OBD_FAIL_RETURN(OBD_FAIL_LDLM_GL_CALLBACK, 0);
1468 case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
1469 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1470 rc = llog_origin_handle_cancel(req);
1471 ldlm_callback_reply(req, rc);
1473 case OBD_QC_CALLBACK:
1474 OBD_FAIL_RETURN(OBD_FAIL_OBD_QC_CALLBACK_NET, 0);
1475 rc = target_handle_qc_callback(req);
1476 ldlm_callback_reply(req, rc);
1480 /* reply in handler */
1481 rc = target_handle_dqacq_callback(req);
1483 case LLOG_ORIGIN_HANDLE_CREATE:
1484 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1485 rc = llog_origin_handle_create(req);
1486 ldlm_callback_reply(req, rc);
1488 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1489 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1490 rc = llog_origin_handle_next_block(req);
1491 ldlm_callback_reply(req, rc);
1493 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1494 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1495 rc = llog_origin_handle_read_header(req);
1496 ldlm_callback_reply(req, rc);
1498 case LLOG_ORIGIN_HANDLE_CLOSE:
1499 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1500 rc = llog_origin_handle_close(req);
1501 ldlm_callback_reply(req, rc);
1507 CERROR("unknown opcode %u\n",
1508 lustre_msg_get_opc(req->rq_reqmsg));
1509 ldlm_callback_reply(req, -EPROTO);
1513 ns = req->rq_export->exp_obd->obd_namespace;
1514 LASSERT(ns != NULL);
1516 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof(*dlm_req),
1517 lustre_swab_ldlm_request);
1518 if (dlm_req == NULL) {
1519 CERROR ("can't unpack dlm_req\n");
1520 ldlm_callback_reply(req, -EPROTO);
1524 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle[0]);
1526 CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
1527 "disappeared\n", dlm_req->lock_handle[0].cookie);
1528 ldlm_callback_reply(req, -EINVAL);
1532 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1533 lock_res_and_lock(lock);
1534 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1535 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
1536 /* If somebody cancels locks and cache is already droped,
1537 * we can tell the server we have no lock. Otherwise, we
1538 * should send cancel after dropping the cache. */
1539 if ((lock->l_flags & LDLM_FL_CANCELING) &&
1540 (lock->l_flags & LDLM_FL_BL_DONE)) {
1541 LDLM_DEBUG(lock, "callback on lock "
1542 LPX64" - lock disappeared\n",
1543 dlm_req->lock_handle[0].cookie);
1544 unlock_res_and_lock(lock);
1545 LDLM_LOCK_PUT(lock);
1546 ldlm_callback_reply(req, -EINVAL);
1549 lock->l_flags |= LDLM_FL_BL_AST;
1551 unlock_res_and_lock(lock);
1553 /* We want the ost thread to get this reply so that it can respond
1554 * to ost requests (write cache writeback) that might be triggered
1557 * But we'd also like to be able to indicate in the reply that we're
1558 * cancelling right now, because it's unused, or have an intent result
1559 * in the reply, so we might have to push the responsibility for sending
1560 * the reply down into the AST handlers, alas. */
1562 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1563 case LDLM_BL_CALLBACK:
1564 CDEBUG(D_INODE, "blocking ast\n");
1565 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK))
1566 ldlm_callback_reply(req, 0);
1567 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
1568 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
1570 case LDLM_CP_CALLBACK:
1571 CDEBUG(D_INODE, "completion ast\n");
1572 ldlm_callback_reply(req, 0);
1573 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
1575 case LDLM_GL_CALLBACK:
1576 CDEBUG(D_INODE, "glimpse ast\n");
1577 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
1580 LBUG(); /* checked above */
1586 static int ldlm_cancel_handler(struct ptlrpc_request *req)
1591 /* Requests arrive in sender's byte order. The ptlrpc service
1592 * handler has already checked and, if necessary, byte-swapped the
1593 * incoming request message body, but I am responsible for the
1594 * message buffers. */
1596 if (req->rq_export == NULL) {
1597 struct ldlm_request *dlm_req;
1599 CERROR("operation %d from %s with bad export cookie "LPU64"\n",
1600 lustre_msg_get_opc(req->rq_reqmsg),
1601 libcfs_id2str(req->rq_peer),
1602 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
1604 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1606 lustre_swab_ldlm_request);
1607 if (dlm_req != NULL)
1608 ldlm_lock_dump_handle(D_ERROR,
1609 &dlm_req->lock_handle[0]);
1610 ldlm_callback_reply(req, -ENOTCONN);
1614 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1616 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1618 CDEBUG(D_INODE, "cancel\n");
1619 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
1620 rc = ldlm_handle_cancel(req);
1624 case OBD_LOG_CANCEL:
1625 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1626 rc = llog_origin_handle_cancel(req);
1627 ldlm_callback_reply(req, rc);
1630 CERROR("invalid opcode %d\n",
1631 lustre_msg_get_opc(req->rq_reqmsg));
1632 ldlm_callback_reply(req, -EINVAL);
1638 void ldlm_revoke_export_locks(struct obd_export *exp)
1640 struct list_head *locklist = &exp->exp_ldlm_data.led_held_locks;
1641 struct list_head rpc_list;
1642 struct ldlm_lock *lock, *next;
1643 struct ldlm_lock_desc desc;
1646 INIT_LIST_HEAD(&rpc_list);
1648 spin_lock(&exp->exp_ldlm_data.led_lock);
1649 list_for_each_entry_safe(lock, next, locklist, l_export_chain) {
1650 lock_res_and_lock(lock);
1652 if (lock->l_req_mode != lock->l_granted_mode) {
1653 unlock_res_and_lock(lock);
1657 LASSERT(lock->l_resource);
1658 if (lock->l_resource->lr_type != LDLM_IBITS &&
1659 lock->l_resource->lr_type != LDLM_PLAIN) {
1660 unlock_res_and_lock(lock);
1664 if (lock->l_flags & LDLM_FL_AST_SENT) {
1665 unlock_res_and_lock(lock);
1669 LASSERT(lock->l_blocking_ast);
1670 LASSERT(!lock->l_blocking_lock);
1672 lock->l_flags |= LDLM_FL_AST_SENT;
1673 list_move(&lock->l_export_chain, &rpc_list);
1675 unlock_res_and_lock(lock);
1677 spin_unlock(&exp->exp_ldlm_data.led_lock);
1679 while (!list_empty(&rpc_list)) {
1680 lock = list_entry(rpc_list.next, struct ldlm_lock,
1682 list_del_init(&lock->l_export_chain);
1684 /* the desc just pretend to exclusive */
1685 ldlm_lock2desc(lock, &desc);
1686 desc.l_req_mode = LCK_EX;
1687 desc.l_granted_mode = 0;
1689 LDLM_LOCK_GET(lock);
1690 lock->l_blocking_ast(lock, &desc, lock->l_ast_data,
1692 LDLM_LOCK_PUT(lock);
1698 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1700 struct ldlm_bl_work_item *blwi = NULL;
1702 spin_lock(&blp->blp_lock);
1703 if (!list_empty(&blp->blp_list)) {
1704 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1706 list_del(&blwi->blwi_entry);
1708 spin_unlock(&blp->blp_lock);
1713 struct ldlm_bl_thread_data {
1715 struct ldlm_bl_pool *bltd_blp;
1718 static int ldlm_bl_thread_main(void *arg)
1720 struct ldlm_bl_thread_data *bltd = arg;
1721 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1725 char name[CFS_CURPROC_COMM_MAX];
1726 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1728 cfs_daemonize(name);
1731 atomic_inc(&blp->blp_num_threads);
1732 complete(&blp->blp_comp);
1735 struct l_wait_info lwi = { 0 };
1736 struct ldlm_bl_work_item *blwi = NULL;
1738 l_wait_event_exclusive(blp->blp_waitq,
1739 (blwi = ldlm_bl_get_work(blp)) != NULL,
1742 if (blwi->blwi_ns == NULL)
1745 if (blwi->blwi_count) {
1746 /* The special case when we cancel locks in lru
1747 * asynchronously, we pass the list of locks here.
1748 * Thus lock is marked LDLM_FL_CANCELING, and already
1749 * canceled locally. */
1750 ldlm_cli_cancel_list(&blwi->blwi_head,
1751 blwi->blwi_count, NULL, 0, 0);
1753 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1756 OBD_FREE(blwi, sizeof(*blwi));
1759 atomic_dec(&blp->blp_num_threads);
1760 complete(&blp->blp_comp);
1766 static int ldlm_setup(void);
1767 static int ldlm_cleanup(int force);
1769 int ldlm_get_ref(void)
1773 mutex_down(&ldlm_ref_sem);
1774 if (++ldlm_refcount == 1) {
1779 mutex_up(&ldlm_ref_sem);
1784 void ldlm_put_ref(int force)
1787 mutex_down(&ldlm_ref_sem);
1788 if (ldlm_refcount == 1) {
1789 int rc = ldlm_cleanup(force);
1791 CERROR("ldlm_cleanup failed: %d\n", rc);
1797 mutex_up(&ldlm_ref_sem);
1802 static int ldlm_setup(void)
1804 struct ldlm_bl_pool *blp;
1811 if (ldlm_state != NULL)
1814 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
1815 if (ldlm_state == NULL)
1819 rc = ldlm_proc_setup();
1824 ldlm_state->ldlm_cb_service =
1825 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1826 LDLM_MAXREPSIZE, LDLM_CB_REQUEST_PORTAL,
1827 LDLM_CB_REPLY_PORTAL, ldlm_timeout * 900,
1828 ldlm_callback_handler, "ldlm_cbd",
1829 ldlm_svc_proc_dir, NULL,
1830 LDLM_THREADS_AUTO_MIN, LDLM_THREADS_AUTO_MAX,
1832 LCT_MD_THREAD|LCT_DT_THREAD);
1834 if (!ldlm_state->ldlm_cb_service) {
1835 CERROR("failed to start service\n");
1836 GOTO(out_proc, rc = -ENOMEM);
1839 ldlm_state->ldlm_cancel_service =
1840 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1841 LDLM_MAXREPSIZE, LDLM_CANCEL_REQUEST_PORTAL,
1842 LDLM_CANCEL_REPLY_PORTAL, ldlm_timeout * 6000,
1843 ldlm_cancel_handler, "ldlm_canceld",
1844 ldlm_svc_proc_dir, NULL,
1845 LDLM_THREADS_AUTO_MIN, LDLM_THREADS_AUTO_MAX,
1847 LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD);
1849 if (!ldlm_state->ldlm_cancel_service) {
1850 CERROR("failed to start service\n");
1851 GOTO(out_proc, rc = -ENOMEM);
1854 OBD_ALLOC(blp, sizeof(*blp));
1856 GOTO(out_proc, rc = -ENOMEM);
1857 ldlm_state->ldlm_bl_pool = blp;
1859 atomic_set(&blp->blp_num_threads, 0);
1860 cfs_waitq_init(&blp->blp_waitq);
1861 spin_lock_init(&blp->blp_lock);
1863 CFS_INIT_LIST_HEAD(&blp->blp_list);
1866 for (i = 0; i < LDLM_BL_THREADS; i++) {
1867 struct ldlm_bl_thread_data bltd = {
1871 init_completion(&blp->blp_comp);
1872 rc = cfs_kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1874 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1875 GOTO(out_thread, rc);
1877 wait_for_completion(&blp->blp_comp);
1880 rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cancel_service);
1882 GOTO(out_thread, rc);
1884 rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cb_service);
1886 GOTO(out_thread, rc);
1888 CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1889 expired_lock_thread.elt_state = ELT_STOPPED;
1890 cfs_waitq_init(&expired_lock_thread.elt_waitq);
1892 CFS_INIT_LIST_HEAD(&waiting_locks_list);
1893 spin_lock_init(&waiting_locks_spinlock);
1894 cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
1896 rc = cfs_kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FILES);
1898 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1899 GOTO(out_thread, rc);
1902 wait_event(expired_lock_thread.elt_waitq,
1903 expired_lock_thread.elt_state == ELT_READY);
1910 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1911 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1916 ldlm_proc_cleanup();
1919 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1924 static int ldlm_cleanup(int force)
1927 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1931 if (!list_empty(&ldlm_namespace_list)) {
1932 CERROR("ldlm still has namespaces; clean these up first.\n");
1933 ldlm_dump_all_namespaces(D_DLMTRACE);
1938 while (atomic_read(&blp->blp_num_threads) > 0) {
1939 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1941 init_completion(&blp->blp_comp);
1943 spin_lock(&blp->blp_lock);
1944 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1945 cfs_waitq_signal(&blp->blp_waitq);
1946 spin_unlock(&blp->blp_lock);
1948 wait_for_completion(&blp->blp_comp);
1950 OBD_FREE(blp, sizeof(*blp));
1952 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1953 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1954 ldlm_proc_cleanup();
1956 expired_lock_thread.elt_state = ELT_TERMINATE;
1957 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
1958 wait_event(expired_lock_thread.elt_waitq,
1959 expired_lock_thread.elt_state == ELT_STOPPED);
1961 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1962 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1965 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1971 int __init ldlm_init(void)
1973 init_mutex(&ldlm_ref_sem);
1974 init_mutex(&ldlm_namespace_lock);
1975 ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
1976 sizeof(struct ldlm_resource), 0,
1977 SLAB_HWCACHE_ALIGN);
1978 if (ldlm_resource_slab == NULL)
1981 ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
1982 sizeof(struct ldlm_lock), 0,
1983 SLAB_HWCACHE_ALIGN);
1984 if (ldlm_lock_slab == NULL) {
1985 cfs_mem_cache_destroy(ldlm_resource_slab);
1992 void __exit ldlm_exit(void)
1996 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1997 rc = cfs_mem_cache_destroy(ldlm_resource_slab);
1998 LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
1999 rc = cfs_mem_cache_destroy(ldlm_lock_slab);
2000 LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
2004 EXPORT_SYMBOL(ldlm_extent_shift_kms);
2007 EXPORT_SYMBOL(ldlm_get_processing_policy);
2008 EXPORT_SYMBOL(ldlm_lock2desc);
2009 EXPORT_SYMBOL(ldlm_register_intent);
2010 EXPORT_SYMBOL(ldlm_lockname);
2011 EXPORT_SYMBOL(ldlm_typename);
2012 EXPORT_SYMBOL(ldlm_lock2handle);
2013 EXPORT_SYMBOL(__ldlm_handle2lock);
2014 EXPORT_SYMBOL(ldlm_lock_get);
2015 EXPORT_SYMBOL(ldlm_lock_put);
2016 EXPORT_SYMBOL(ldlm_lock_match);
2017 EXPORT_SYMBOL(ldlm_lock_cancel);
2018 EXPORT_SYMBOL(ldlm_lock_addref);
2019 EXPORT_SYMBOL(ldlm_lock_decref);
2020 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
2021 EXPORT_SYMBOL(ldlm_lock_change_resource);
2022 EXPORT_SYMBOL(ldlm_lock_set_data);
2023 EXPORT_SYMBOL(ldlm_it2str);
2024 EXPORT_SYMBOL(ldlm_lock_dump);
2025 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2026 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
2027 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
2028 EXPORT_SYMBOL(ldlm_lock_allow_match);
2030 /* ldlm_request.c */
2031 EXPORT_SYMBOL(ldlm_completion_ast);
2032 EXPORT_SYMBOL(ldlm_blocking_ast);
2033 EXPORT_SYMBOL(ldlm_glimpse_ast);
2034 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2035 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2036 EXPORT_SYMBOL(ldlm_cli_convert);
2037 EXPORT_SYMBOL(ldlm_cli_enqueue);
2038 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2039 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2040 EXPORT_SYMBOL(ldlm_cli_cancel);
2041 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2042 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2043 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2044 EXPORT_SYMBOL(ldlm_cli_join_lru);
2045 EXPORT_SYMBOL(ldlm_replay_locks);
2046 EXPORT_SYMBOL(ldlm_resource_foreach);
2047 EXPORT_SYMBOL(ldlm_namespace_foreach);
2048 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
2049 EXPORT_SYMBOL(ldlm_resource_iterate);
2050 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2051 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2054 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2055 EXPORT_SYMBOL(ldlm_server_completion_ast);
2056 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2057 EXPORT_SYMBOL(ldlm_handle_enqueue);
2058 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2059 EXPORT_SYMBOL(ldlm_handle_cancel);
2060 EXPORT_SYMBOL(ldlm_request_cancel);
2061 EXPORT_SYMBOL(ldlm_handle_convert);
2062 EXPORT_SYMBOL(ldlm_handle_convert0);
2063 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2064 EXPORT_SYMBOL(ldlm_get_ref);
2065 EXPORT_SYMBOL(ldlm_put_ref);
2066 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2067 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2069 /* ldlm_resource.c */
2070 EXPORT_SYMBOL(ldlm_namespace_new);
2071 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2072 EXPORT_SYMBOL(ldlm_namespace_free);
2073 EXPORT_SYMBOL(ldlm_namespace_dump);
2074 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2075 EXPORT_SYMBOL(ldlm_resource_get);
2076 EXPORT_SYMBOL(ldlm_resource_putref);
2077 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
2080 EXPORT_SYMBOL(client_import_add_conn);
2081 EXPORT_SYMBOL(client_import_del_conn);
2082 EXPORT_SYMBOL(client_obd_setup);
2083 EXPORT_SYMBOL(client_obd_cleanup);
2084 EXPORT_SYMBOL(client_connect_import);
2085 EXPORT_SYMBOL(client_disconnect_export);
2086 EXPORT_SYMBOL(target_start_recovery_thread);
2087 EXPORT_SYMBOL(target_stop_recovery_thread);
2088 EXPORT_SYMBOL(target_handle_connect);
2089 EXPORT_SYMBOL(target_cleanup_recovery);
2090 EXPORT_SYMBOL(target_destroy_export);
2091 EXPORT_SYMBOL(target_cancel_recovery_timer);
2092 EXPORT_SYMBOL(target_send_reply);
2093 EXPORT_SYMBOL(target_queue_recovery_request);
2094 EXPORT_SYMBOL(target_handle_ping);
2095 EXPORT_SYMBOL(target_handle_disconnect);
2098 EXPORT_SYMBOL(lock_res_and_lock);
2099 EXPORT_SYMBOL(unlock_res_and_lock);