1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002-2004 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
28 # define EXPORT_SYMTAB
30 #define DEBUG_SUBSYSTEM S_LDLM
33 # include <libcfs/libcfs.h>
35 # include <liblustre.h>
38 #include <lustre_dlm.h>
39 #include <obd_class.h>
40 #include <libcfs/list.h>
41 #include "ldlm_internal.h"
43 extern cfs_mem_cache_t *ldlm_resource_slab;
44 extern cfs_mem_cache_t *ldlm_lock_slab;
45 extern struct list_head ldlm_namespace_list;
47 extern struct semaphore ldlm_namespace_lock;
48 static struct semaphore ldlm_ref_sem;
49 static int ldlm_refcount;
53 static struct ldlm_state *ldlm_state;
55 inline cfs_time_t round_timeout(cfs_time_t timeout)
57 return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
60 /* timeout for initial callback (AST) reply */
61 static inline unsigned int ldlm_get_rq_timeout(unsigned int ldlm_timeout,
62 unsigned int obd_timeout)
64 unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
66 return timeout < 1 ? 1 : timeout;
70 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
71 static spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
72 static struct list_head waiting_locks_list;
73 static cfs_timer_t waiting_locks_timer;
75 static struct expired_lock_thread {
76 cfs_waitq_t elt_waitq;
79 struct list_head elt_expired_locks;
80 } expired_lock_thread;
85 #define ELT_TERMINATE 2
89 struct list_head blp_list;
90 cfs_waitq_t blp_waitq;
91 atomic_t blp_num_threads;
92 struct completion blp_comp;
95 struct ldlm_bl_work_item {
96 struct list_head blwi_entry;
97 struct ldlm_namespace *blwi_ns;
98 struct ldlm_lock_desc blwi_ld;
99 struct ldlm_lock *blwi_lock;
105 static inline int have_expired_locks(void)
110 spin_lock_bh(&waiting_locks_spinlock);
111 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
112 spin_unlock_bh(&waiting_locks_spinlock);
117 static int expired_lock_main(void *arg)
119 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
120 struct l_wait_info lwi = { 0 };
124 cfs_daemonize("ldlm_elt");
126 expired_lock_thread.elt_state = ELT_READY;
127 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
130 l_wait_event(expired_lock_thread.elt_waitq,
131 have_expired_locks() ||
132 expired_lock_thread.elt_state == ELT_TERMINATE,
135 spin_lock_bh(&waiting_locks_spinlock);
136 if (expired_lock_thread.elt_dump) {
137 spin_unlock_bh(&waiting_locks_spinlock);
139 /* from waiting_locks_callback, but not in timer */
140 libcfs_debug_dumplog();
141 libcfs_run_lbug_upcall(__FILE__,
142 "waiting_locks_callback",
143 expired_lock_thread.elt_dump);
145 spin_lock_bh(&waiting_locks_spinlock);
146 expired_lock_thread.elt_dump = 0;
151 while (!list_empty(expired)) {
152 struct obd_export *export;
153 struct ldlm_lock *lock;
155 lock = list_entry(expired->next, struct ldlm_lock,
157 if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
158 (void *)lock >= LP_POISON) {
159 spin_unlock_bh(&waiting_locks_spinlock);
160 CERROR("free lock on elt list %p\n", lock);
163 list_del_init(&lock->l_pending_chain);
164 if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
165 (void *)lock->l_export >= LP_POISON) {
166 CERROR("lock with free export on elt list %p\n",
168 lock->l_export = NULL;
169 LDLM_ERROR(lock, "free export");
172 export = class_export_get(lock->l_export);
173 spin_unlock_bh(&waiting_locks_spinlock);
176 class_fail_export(export);
177 class_export_put(export);
178 spin_lock_bh(&waiting_locks_spinlock);
180 spin_unlock_bh(&waiting_locks_spinlock);
182 if (do_dump && obd_dump_on_eviction) {
183 CERROR("dump the log upon eviction\n");
184 libcfs_debug_dumplog();
187 if (expired_lock_thread.elt_state == ELT_TERMINATE)
191 expired_lock_thread.elt_state = ELT_STOPPED;
192 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
196 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
198 /* This is called from within a timer interrupt and cannot schedule */
199 static void waiting_locks_callback(unsigned long unused)
201 struct ldlm_lock *lock, *last = NULL;
204 spin_lock_bh(&waiting_locks_spinlock);
205 while (!list_empty(&waiting_locks_list)) {
206 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
209 if (cfs_time_after(lock->l_callback_timeout, cfs_time_current()) ||
210 (lock->l_req_mode == LCK_GROUP))
213 if (ptlrpc_check_suspend()) {
214 /* there is a case when we talk to one mds, holding
215 * lock from another mds. this way we easily can get
216 * here, if second mds is being recovered. so, we
217 * suspend timeouts. bug 6019 */
219 LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
220 lock->l_export->exp_client_uuid.uuid,
221 lock->l_export->exp_connection->c_remote_uuid.uuid,
222 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
224 list_del_init(&lock->l_pending_chain);
225 spin_unlock_bh(&waiting_locks_spinlock);
226 ldlm_add_waiting_lock(lock);
230 /* if timeout overlaps the activation time of suspended timeouts
231 * then extend it to give a chance for client to reconnect */
232 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
233 cfs_time_seconds(obd_timeout)/2),
234 ptlrpc_suspend_wakeup_time())) {
235 LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
236 lock->l_export->exp_client_uuid.uuid,
237 lock->l_export->exp_connection->c_remote_uuid.uuid,
238 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
240 list_del_init(&lock->l_pending_chain);
241 spin_unlock_bh(&waiting_locks_spinlock);
242 ldlm_add_waiting_lock(lock);
246 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
248 lock->l_export->exp_client_uuid.uuid,
249 lock->l_export->exp_connection->c_remote_uuid.uuid,
250 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
254 list_del(&lock->l_pending_chain);
255 list_add(&lock->l_pending_chain,
256 &expired_lock_thread.elt_expired_locks);
259 if (!list_empty(&expired_lock_thread.elt_expired_locks)) {
260 if (obd_dump_on_timeout)
261 expired_lock_thread.elt_dump = __LINE__;
263 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
267 * Make sure the timer will fire again if we have any locks
270 if (!list_empty(&waiting_locks_list)) {
271 cfs_time_t timeout_rounded;
272 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
274 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
275 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
277 spin_unlock_bh(&waiting_locks_spinlock);
281 * Indicate that we're waiting for a client to call us back cancelling a given
282 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
283 * timer to fire appropriately. (We round up to the next second, to avoid
284 * floods of timer firings during periods of high lock contention and traffic).
286 * Called with the namespace lock held.
288 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock)
290 cfs_time_t timeout_rounded;
292 if (!list_empty(&lock->l_pending_chain))
295 lock->l_callback_timeout =cfs_time_add(cfs_time_current(),
296 cfs_time_seconds(obd_timeout)/2);
298 timeout_rounded = round_timeout(lock->l_callback_timeout);
300 if (cfs_time_before(timeout_rounded, cfs_timer_deadline(&waiting_locks_timer)) ||
301 !cfs_timer_is_armed(&waiting_locks_timer)) {
302 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
305 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
309 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
313 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
315 spin_lock_bh(&waiting_locks_spinlock);
316 if (lock->l_destroyed) {
317 static cfs_time_t next;
318 spin_unlock_bh(&waiting_locks_spinlock);
319 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
320 if (cfs_time_after(cfs_time_current(), next)) {
321 next = cfs_time_shift(14400);
322 libcfs_debug_dumpstack(NULL);
327 ret = __ldlm_add_waiting_lock(lock);
328 spin_unlock_bh(&waiting_locks_spinlock);
330 LDLM_DEBUG(lock, "%sadding to wait list",
331 ret == 0 ? "not re-" : "");
336 * Remove a lock from the pending list, likely because it had its cancellation
337 * callback arrive without incident. This adjusts the lock-timeout timer if
338 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
340 * Called with namespace lock held.
342 int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
344 struct list_head *list_next;
346 if (list_empty(&lock->l_pending_chain))
349 list_next = lock->l_pending_chain.next;
350 if (lock->l_pending_chain.prev == &waiting_locks_list) {
351 /* Removing the head of the list, adjust timer. */
352 if (list_next == &waiting_locks_list) {
353 /* No more, just cancel. */
354 cfs_timer_disarm(&waiting_locks_timer);
356 struct ldlm_lock *next;
357 next = list_entry(list_next, struct ldlm_lock,
359 cfs_timer_arm(&waiting_locks_timer,
360 round_timeout(next->l_callback_timeout));
363 list_del_init(&lock->l_pending_chain);
368 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
372 if (lock->l_export == NULL) {
373 /* We don't have a "waiting locks list" on clients. */
374 LDLM_DEBUG(lock, "client lock: no-op");
378 spin_lock_bh(&waiting_locks_spinlock);
379 ret = __ldlm_del_waiting_lock(lock);
380 spin_unlock_bh(&waiting_locks_spinlock);
382 LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
389 * Called with namespace lock held.
391 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
393 if (lock->l_export == NULL) {
394 /* We don't have a "waiting locks list" on clients. */
395 LDLM_DEBUG(lock, "client lock: no-op");
399 spin_lock_bh(&waiting_locks_spinlock);
401 if (list_empty(&lock->l_pending_chain)) {
402 spin_unlock_bh(&waiting_locks_spinlock);
403 LDLM_DEBUG(lock, "wasn't waiting");
407 __ldlm_del_waiting_lock(lock);
408 __ldlm_add_waiting_lock(lock);
409 spin_unlock_bh(&waiting_locks_spinlock);
411 LDLM_DEBUG(lock, "refreshed");
415 #else /* !__KERNEL__ */
417 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
419 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
423 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
428 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
432 #endif /* __KERNEL__ */
434 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
435 const char *ast_type)
437 struct ptlrpc_connection *conn = lock->l_export->exp_connection;
438 char *str = libcfs_nid2str(conn->c_peer.nid);
440 LCONSOLE_ERROR_MSG(0x138, "A client on nid %s was evicted from "
441 "service %s.\n", str,
442 lock->l_export->exp_obd->obd_name);
444 LCONSOLE_ERROR_MSG(0x012, "Lock %s callback to %s timed out for "
445 "resource %d\n", ast_type,
446 obd_export_nid2str(lock->l_export), rc);
448 if (obd_dump_on_timeout)
449 libcfs_debug_dumplog();
450 class_fail_export(lock->l_export);
453 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
454 struct ptlrpc_request *req, int rc,
455 const char *ast_type)
457 lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
459 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
460 LASSERT(lock->l_export);
461 if (lock->l_export->exp_libclient) {
462 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
463 " timeout, just cancelling lock", ast_type,
464 libcfs_nid2str(peer.nid));
465 ldlm_lock_cancel(lock);
467 } else if (lock->l_flags & LDLM_FL_CANCEL) {
468 LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
469 "cancel was received (AST reply lost?)",
470 ast_type, libcfs_nid2str(peer.nid));
471 ldlm_lock_cancel(lock);
474 ldlm_del_waiting_lock(lock);
475 ldlm_failed_ast(lock, rc, ast_type);
479 LDLM_DEBUG(lock, "client (nid %s) returned %d"
480 " from %s AST - normal race",
481 libcfs_nid2str(peer.nid),
483 lustre_msg_get_status(req->rq_repmsg) : -1,
486 LDLM_ERROR(lock, "client (nid %s) returned %d "
487 "from %s AST", libcfs_nid2str(peer.nid),
488 (req->rq_repmsg != NULL) ?
489 lustre_msg_get_status(req->rq_repmsg) : 0,
491 ldlm_lock_cancel(lock);
492 /* Server-side AST functions are called from ldlm_reprocess_all,
493 * which needs to be told to please restart its reprocessing. */
501 * ->l_blocking_ast() method for server-side locks. This is invoked when newly
502 * enqueued server lock conflicts with given one.
504 * Sends blocking ast rpc to the client owning that lock; arms timeout timer
505 * to wait for client response.
507 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
508 struct ldlm_lock_desc *desc,
509 void *data, int flag)
511 struct ldlm_request *body;
512 struct ptlrpc_request *req;
513 int size[] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
514 [DLM_LOCKREQ_OFF] = sizeof(*body) };
515 int instant_cancel = 0, rc = 0;
518 if (flag == LDLM_CB_CANCELING) {
519 /* Don't need to do anything here. */
524 if (lock->l_export->exp_obd->obd_recovering != 0) {
525 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
526 ldlm_lock_dump(D_ERROR, lock, 0);
529 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
530 LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK, 2, size,
535 lock_res(lock->l_resource);
536 if (lock->l_granted_mode != lock->l_req_mode) {
537 /* this blocking AST will be communicated as part of the
538 * completion AST instead */
539 unlock_res(lock->l_resource);
540 ptlrpc_req_finished(req);
541 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
545 if (lock->l_destroyed) {
546 /* What's the point? */
547 unlock_res(lock->l_resource);
548 ptlrpc_req_finished(req);
552 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
555 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
556 body->lock_handle[0] = lock->l_remote_handle;
557 body->lock_desc = *desc;
558 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
560 LDLM_DEBUG(lock, "server preparing blocking AST");
562 ptlrpc_req_set_repsize(req, 1, NULL);
563 if (instant_cancel) {
564 unlock_res(lock->l_resource);
565 ldlm_lock_cancel(lock);
567 LASSERT(lock->l_granted_mode == lock->l_req_mode);
568 ldlm_add_waiting_lock(lock);
569 unlock_res(lock->l_resource);
572 req->rq_send_state = LUSTRE_IMP_FULL;
573 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
575 if (lock->l_export && lock->l_export->exp_ldlm_stats)
576 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
577 LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
579 if (unlikely(instant_cancel)) {
580 rc = ptl_send_rpc(req, 1);
582 rc = ptlrpc_queue_wait(req);
583 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2);
587 rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
589 ptlrpc_req_finished(req);
591 /* If we cancelled the lock, we need to restart ldlm_reprocess_queue */
592 if (!rc && instant_cancel)
598 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
600 struct ldlm_request *body;
601 struct ptlrpc_request *req;
602 struct timeval granted_time;
603 long total_enqueue_wait;
604 int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
605 [DLM_LOCKREQ_OFF] = sizeof(*body) };
606 int rc = 0, buffers = 2, instant_cancel = 0;
609 LASSERT(lock != NULL);
611 do_gettimeofday(&granted_time);
612 total_enqueue_wait = cfs_timeval_sub(&granted_time,
613 &lock->l_enqueued_time, NULL);
615 if (total_enqueue_wait / 1000000 > obd_timeout)
616 LDLM_ERROR(lock, "enqueue wait took %luus from %lu",
617 total_enqueue_wait, lock->l_enqueued_time.tv_sec);
619 lock_res_and_lock(lock);
620 if (lock->l_resource->lr_lvb_len) {
621 size[DLM_REQ_REC_OFF] = lock->l_resource->lr_lvb_len;
624 unlock_res_and_lock(lock);
626 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
627 LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK, buffers,
632 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
633 body->lock_handle[0] = lock->l_remote_handle;
634 body->lock_flags = flags;
635 ldlm_lock2desc(lock, &body->lock_desc);
640 lvb = lustre_msg_buf(req->rq_reqmsg, DLM_REQ_REC_OFF,
641 lock->l_resource->lr_lvb_len);
642 lock_res_and_lock(lock);
643 memcpy(lvb, lock->l_resource->lr_lvb_data,
644 lock->l_resource->lr_lvb_len);
645 unlock_res_and_lock(lock);
648 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
651 ptlrpc_req_set_repsize(req, 1, NULL);
653 req->rq_send_state = LUSTRE_IMP_FULL;
654 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
656 /* We only send real blocking ASTs after the lock is granted */
657 lock_res_and_lock(lock);
658 if (lock->l_flags & LDLM_FL_AST_SENT) {
659 body->lock_flags |= LDLM_FL_AST_SENT;
661 /* We might get here prior to ldlm_handle_enqueue setting
662 * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
663 * into waiting list, but this is safe and similar code in
664 * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
665 * that would not only cancel the lock, but will also remove
666 * it from waiting list */
667 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
668 unlock_res_and_lock(lock);
669 ldlm_lock_cancel(lock);
671 lock_res_and_lock(lock);
673 /* start the lock-timeout clock */
674 ldlm_add_waiting_lock(lock);
677 unlock_res_and_lock(lock);
679 if (lock->l_export && lock->l_export->exp_ldlm_stats)
680 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
681 LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
683 rc = ptlrpc_queue_wait(req);
685 rc = ldlm_handle_ast_error(lock, req, rc, "completion");
687 ptlrpc_req_finished(req);
689 /* If we cancelled the lock, we need to restart ldlm_reprocess_queue */
690 if (!rc && instant_cancel)
696 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
698 struct ldlm_resource *res = lock->l_resource;
699 struct ldlm_request *body;
700 struct ptlrpc_request *req;
701 int size[] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
702 [DLM_LOCKREQ_OFF] = sizeof(*body) };
706 LASSERT(lock != NULL);
708 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
709 LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK, 2, size,
714 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
715 body->lock_handle[0] = lock->l_remote_handle;
716 ldlm_lock2desc(lock, &body->lock_desc);
718 lock_res_and_lock(lock);
719 size[REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
720 unlock_res_and_lock(lock);
721 res = lock->l_resource;
722 ptlrpc_req_set_repsize(req, 2, size);
724 req->rq_send_state = LUSTRE_IMP_FULL;
725 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
727 if (lock->l_export && lock->l_export->exp_ldlm_stats)
728 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
729 LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
731 rc = ptlrpc_queue_wait(req);
732 if (rc == -ELDLM_NO_LOCK_DATA)
733 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
735 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
737 rc = res->lr_namespace->ns_lvbo->lvbo_update
738 (res, req->rq_repmsg, REPLY_REC_OFF, 1);
739 ptlrpc_req_finished(req);
743 static struct ldlm_lock *
744 find_existing_lock(struct obd_export *exp,
745 const struct lustre_handle *remote_hdl)
747 struct list_head *iter;
749 spin_lock(&exp->exp_ldlm_data.led_lock);
750 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
751 struct ldlm_lock *lock;
752 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
753 if (lock->l_remote_handle.cookie == remote_hdl->cookie) {
755 spin_unlock(&exp->exp_ldlm_data.led_lock);
759 spin_unlock(&exp->exp_ldlm_data.led_lock);
764 extern unsigned long long lu_time_stamp_get(void);
766 #define lu_time_stamp_get() time(NULL)
770 * Main server-side entry point into LDLM. This is called by ptlrpc service
771 * threads to carry out client lock enqueueing requests.
773 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
774 struct ptlrpc_request *req,
775 const struct ldlm_request *dlm_req,
776 const struct ldlm_callback_suite *cbs)
778 struct ldlm_reply *dlm_rep;
779 int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
780 [DLM_LOCKREPLY_OFF] = sizeof(*dlm_rep) };
783 ldlm_error_t err = ELDLM_OK;
784 struct ldlm_lock *lock = NULL;
788 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
790 ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
791 flags = dlm_req->lock_flags;
793 LASSERT(req->rq_export);
795 if (req->rq_export->exp_ldlm_stats)
796 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
797 LDLM_ENQUEUE - LDLM_FIRST_OPC);
799 if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
800 dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
801 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
802 dlm_req->lock_desc.l_resource.lr_type);
803 GOTO(out, rc = -EFAULT);
806 if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
807 dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
808 dlm_req->lock_desc.l_req_mode &
809 (dlm_req->lock_desc.l_req_mode-1))) {
810 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
811 dlm_req->lock_desc.l_req_mode);
812 GOTO(out, rc = -EFAULT);
815 if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
816 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
818 DEBUG_REQ(D_ERROR, req,
819 "PLAIN lock request from IBITS client?");
820 GOTO(out, rc = -EPROTO);
822 } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
824 DEBUG_REQ(D_ERROR, req,
825 "IBITS lock request from unaware client?");
826 GOTO(out, rc = -EPROTO);
830 /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
831 against server's _CONNECT_SUPPORTED flags? (I don't want to use
832 ibits for mgc/mgs) */
834 /* INODEBITS_INTEROP: Perform conversion from plain lock to
835 * inodebits lock if client does not support them. */
836 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
837 (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
838 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
839 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
840 MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
841 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
842 dlm_req->lock_desc.l_req_mode = LCK_CR;
846 if (unlikely(flags & LDLM_FL_REPLAY)) {
847 lock = find_existing_lock(req->rq_export,
848 &dlm_req->lock_handle[0]);
850 DEBUG_REQ(D_HA, req, "found existing lock cookie "LPX64,
851 lock->l_handle.h_cookie);
852 GOTO(existing_lock, rc = 0);
856 /* The lock's callback data might be set in the policy function */
857 lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
858 dlm_req->lock_desc.l_resource.lr_type,
859 dlm_req->lock_desc.l_req_mode,
860 cbs->lcs_blocking, cbs->lcs_completion,
861 cbs->lcs_glimpse, NULL, 0);
864 GOTO(out, rc = -ENOMEM);
866 do_gettimeofday(&lock->l_enqueued_time);
867 lock->l_remote_handle = dlm_req->lock_handle[0];
868 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
870 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
871 /* Don't enqueue a lock onto the export if it has already
872 * been evicted. Cancel it now instead. (bug 3822) */
873 if (req->rq_export->exp_failed) {
874 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
875 GOTO(out, rc = -ENOTCONN);
877 lock->l_export = class_export_get(req->rq_export);
878 spin_lock(&lock->l_export->exp_ldlm_data.led_lock);
879 list_add(&lock->l_export_chain,
880 &lock->l_export->exp_ldlm_data.led_held_locks);
881 spin_unlock(&lock->l_export->exp_ldlm_data.led_lock);
885 if (flags & LDLM_FL_HAS_INTENT) {
886 /* In this case, the reply buffer is allocated deep in
887 * local_lock_enqueue by the policy function. */
892 lock_res_and_lock(lock);
893 if (lock->l_resource->lr_lvb_len) {
894 size[DLM_REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
897 unlock_res_and_lock(lock);
899 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
900 GOTO(out, rc = -ENOMEM);
902 rc = lustre_pack_reply(req, buffers, size, NULL);
907 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
908 lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
909 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
910 lock->l_req_extent = lock->l_policy_data.l_extent;
912 err = ldlm_lock_enqueue(ns, &lock, cookie, &flags);
916 dlm_rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
918 dlm_rep->lock_flags = flags;
920 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
921 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
923 /* We never send a blocking AST until the lock is granted, but
924 * we can tell it right now */
925 lock_res_and_lock(lock);
927 /* Now take into account flags to be inherited from original lock
928 request both in reply to client and in our own lock flags. */
929 dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
930 lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
932 /* Don't move a pending lock onto the export if it has already
933 * been evicted. Cancel it now instead. (bug 5683) */
934 if (unlikely(req->rq_export->exp_failed ||
935 OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
936 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
938 } else if (lock->l_flags & LDLM_FL_AST_SENT) {
939 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
940 if (lock->l_granted_mode == lock->l_req_mode) {
942 * Only cancel lock if it was granted, because it would
943 * be destroyed immediatelly and would never be granted
944 * in the future, causing timeouts on client. Not
945 * granted lock will be cancelled immediatelly after
946 * sending completion AST.
948 if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
949 unlock_res_and_lock(lock);
950 ldlm_lock_cancel(lock);
951 lock_res_and_lock(lock);
953 ldlm_add_waiting_lock(lock);
956 /* Make sure we never ever grant usual metadata locks to liblustre
958 if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
959 dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
960 req->rq_export->exp_libclient) {
961 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
962 !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
963 CERROR("Granting sync lock to libclient. "
964 "req fl %d, rep fl %d, lock fl %d\n",
965 dlm_req->lock_flags, dlm_rep->lock_flags,
967 LDLM_ERROR(lock, "sync lock");
968 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
969 struct ldlm_intent *it;
970 it = lustre_msg_buf(req->rq_reqmsg,
974 CERROR("This is intent %s ("LPU64")\n",
975 ldlm_it2str(it->opc), it->opc);
981 unlock_res_and_lock(lock);
985 req->rq_status = err;
986 if (req->rq_reply_state == NULL) {
987 err = lustre_pack_reply(req, 1, NULL, NULL);
993 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
994 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
996 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
997 "(err=%d, rc=%d)", err, rc);
1000 lock_res_and_lock(lock);
1001 size[DLM_REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
1002 if (size[DLM_REPLY_REC_OFF] > 0) {
1003 void *lvb = lustre_msg_buf(req->rq_repmsg,
1005 size[DLM_REPLY_REC_OFF]);
1006 LASSERTF(lvb != NULL, "req %p, lock %p\n",
1009 memcpy(lvb, lock->l_resource->lr_lvb_data,
1010 size[DLM_REPLY_REC_OFF]);
1012 unlock_res_and_lock(lock);
1014 lock_res_and_lock(lock);
1015 ldlm_resource_unlink_lock(lock);
1016 ldlm_lock_destroy_nolock(lock);
1017 unlock_res_and_lock(lock);
1020 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1021 ldlm_reprocess_all(lock->l_resource);
1023 LDLM_LOCK_PUT(lock);
1026 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1032 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1033 ldlm_completion_callback completion_callback,
1034 ldlm_blocking_callback blocking_callback,
1035 ldlm_glimpse_callback glimpse_callback)
1038 struct ldlm_request *dlm_req;
1039 struct ldlm_callback_suite cbs = {
1040 .lcs_completion = completion_callback,
1041 .lcs_blocking = blocking_callback,
1042 .lcs_glimpse = glimpse_callback
1046 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1047 sizeof *dlm_req, lustre_swab_ldlm_request);
1048 if (dlm_req != NULL) {
1049 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1050 req, dlm_req, &cbs);
1052 CERROR ("Can't unpack dlm_req\n");
1058 int ldlm_handle_convert0(struct ptlrpc_request *req,
1059 const struct ldlm_request *dlm_req)
1061 struct ldlm_reply *dlm_rep;
1062 struct ldlm_lock *lock;
1064 int size[2] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
1065 [DLM_LOCKREPLY_OFF] = sizeof(*dlm_rep) };
1068 if (req->rq_export && req->rq_export->exp_ldlm_stats)
1069 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
1070 LDLM_CONVERT - LDLM_FIRST_OPC);
1072 rc = lustre_pack_reply(req, 2, size, NULL);
1074 CERROR("out of memory\n");
1077 dlm_rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
1079 dlm_rep->lock_flags = dlm_req->lock_flags;
1081 lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1083 req->rq_status = EINVAL;
1087 LDLM_DEBUG(lock, "server-side convert handler START");
1089 do_gettimeofday(&lock->l_enqueued_time);
1090 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1091 &dlm_rep->lock_flags);
1093 if (ldlm_del_waiting_lock(lock))
1094 LDLM_DEBUG(lock, "converted waiting lock");
1097 req->rq_status = EDEADLOCK;
1102 if (!req->rq_status)
1103 ldlm_reprocess_all(lock->l_resource);
1104 LDLM_DEBUG(lock, "server-side convert handler END");
1105 LDLM_LOCK_PUT(lock);
1107 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1112 int ldlm_handle_convert(struct ptlrpc_request *req)
1115 struct ldlm_request *dlm_req;
1117 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof *dlm_req,
1118 lustre_swab_ldlm_request);
1119 if (dlm_req != NULL) {
1120 rc = ldlm_handle_convert0(req, dlm_req);
1122 CERROR ("Can't unpack dlm_req\n");
1128 /* Cancel all the locks, which handles are packed into ldlm_request */
1129 int ldlm_request_cancel(struct ptlrpc_request *req,
1130 const struct ldlm_request *dlm_req, int first)
1132 struct ldlm_resource *res, *pres = NULL;
1133 struct ldlm_lock *lock;
1134 int i, count, done = 0;
1137 count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1141 /* There is no lock on the server at the replay time,
1142 * skip lock cancelling to make replay tests to pass. */
1143 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1146 LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks",
1148 for (i = first; i < count; i++) {
1149 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1151 LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1152 "lock (cookie "LPU64")",
1153 dlm_req->lock_handle[i].cookie);
1157 res = lock->l_resource;
1159 ldlm_lock_cancel(lock);
1160 if (ldlm_del_waiting_lock(lock))
1161 CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
1165 if (pres->lr_namespace->ns_lvbo &&
1166 pres->lr_namespace->ns_lvbo->lvbo_update) {
1167 (void)pres->lr_namespace->ns_lvbo->
1168 lvbo_update(pres, NULL, 0, 1);
1170 ldlm_reprocess_all(pres);
1171 ldlm_resource_putref(pres);
1174 ldlm_resource_getref(res);
1177 LDLM_LOCK_PUT(lock);
1180 if (pres->lr_namespace->ns_lvbo &&
1181 pres->lr_namespace->ns_lvbo->lvbo_update) {
1182 (void)pres->lr_namespace->ns_lvbo->
1183 lvbo_update(pres, NULL, 0, 1);
1185 ldlm_reprocess_all(pres);
1186 ldlm_resource_putref(pres);
1188 LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1192 int ldlm_handle_cancel(struct ptlrpc_request *req)
1194 struct ldlm_request *dlm_req;
1198 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof(*dlm_req),
1199 lustre_swab_ldlm_request);
1200 if (dlm_req == NULL) {
1201 CERROR("bad request buffer for cancel\n");
1205 if (req->rq_export && req->rq_export->exp_ldlm_stats)
1206 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
1207 LDLM_CANCEL - LDLM_FIRST_OPC);
1209 rc = lustre_pack_reply(req, 1, NULL, NULL);
1211 CERROR("out of memory\n");
1215 if (!ldlm_request_cancel(req, dlm_req, 0))
1216 req->rq_status = ESTALE;
1218 if (ptlrpc_reply(req) != 0)
1224 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1225 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1230 LDLM_DEBUG(lock, "client blocking AST callback handler START");
1232 lock_res_and_lock(lock);
1233 lock->l_flags |= LDLM_FL_CBPENDING;
1235 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1236 lock->l_flags |= LDLM_FL_CANCEL;
1238 do_ast = (!lock->l_readers && !lock->l_writers);
1239 unlock_res_and_lock(lock);
1242 LDLM_DEBUG(lock, "already unused, calling "
1243 "callback (%p)", lock->l_blocking_ast);
1244 if (lock->l_blocking_ast != NULL)
1245 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1248 LDLM_DEBUG(lock, "Lock still has references, will be"
1249 " cancelled later");
1252 LDLM_DEBUG(lock, "client blocking callback handler END");
1253 LDLM_LOCK_PUT(lock);
1257 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1258 struct ldlm_namespace *ns,
1259 struct ldlm_request *dlm_req,
1260 struct ldlm_lock *lock)
1262 CFS_LIST_HEAD(ast_list);
1265 LDLM_DEBUG(lock, "client completion callback handler START");
1267 lock_res_and_lock(lock);
1269 /* If we receive the completion AST before the actual enqueue returned,
1270 * then we might need to switch lock modes, resources, or extents. */
1271 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1272 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1273 LDLM_DEBUG(lock, "completion AST, new lock mode");
1276 if (lock->l_resource->lr_type != LDLM_PLAIN) {
1277 lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
1278 LDLM_DEBUG(lock, "completion AST, new policy data");
1281 ldlm_resource_unlink_lock(lock);
1282 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1283 &lock->l_resource->lr_name,
1284 sizeof(lock->l_resource->lr_name)) != 0) {
1285 unlock_res_and_lock(lock);
1286 ldlm_lock_change_resource(ns, lock,
1287 &dlm_req->lock_desc.l_resource.lr_name);
1288 LDLM_DEBUG(lock, "completion AST, new resource");
1289 CERROR("change resource!\n");
1290 lock_res_and_lock(lock);
1293 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1294 lock->l_flags |= LDLM_FL_CBPENDING;
1295 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1298 if (lock->l_lvb_len) {
1300 lvb = lustre_swab_reqbuf(req, DLM_REQ_REC_OFF, lock->l_lvb_len,
1301 lock->l_lvb_swabber);
1303 LDLM_ERROR(lock, "completion AST did not contain "
1306 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1310 ldlm_grant_lock(lock, &ast_list);
1311 unlock_res_and_lock(lock);
1313 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1315 ldlm_run_cp_ast_work(&ast_list);
1317 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1319 LDLM_LOCK_PUT(lock);
1323 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1324 struct ldlm_namespace *ns,
1325 struct ldlm_request *dlm_req,
1326 struct ldlm_lock *lock)
1331 LDLM_DEBUG(lock, "client glimpse AST callback handler");
1333 if (lock->l_glimpse_ast != NULL)
1334 rc = lock->l_glimpse_ast(lock, req);
1336 if (req->rq_repmsg != NULL) {
1339 req->rq_status = rc;
1343 lock_res_and_lock(lock);
1344 if (lock->l_granted_mode == LCK_PW &&
1345 !lock->l_readers && !lock->l_writers &&
1346 cfs_time_after(cfs_time_current(),
1347 cfs_time_add(lock->l_last_used,
1348 cfs_time_seconds(10)))) {
1349 unlock_res_and_lock(lock);
1350 if (ldlm_bl_to_thread(ns, NULL, lock, 0))
1351 ldlm_handle_bl_callback(ns, NULL, lock);
1356 unlock_res_and_lock(lock);
1357 LDLM_LOCK_PUT(lock);
1361 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1363 req->rq_status = rc;
1364 if (req->rq_reply_state == NULL) {
1365 rc = lustre_pack_reply(req, 1, NULL, NULL);
1369 return ptlrpc_reply(req);
1372 int ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1373 struct ldlm_lock *lock, int flags)
1376 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1377 struct ldlm_bl_work_item *blwi;
1380 OBD_ALLOC(blwi, sizeof(*blwi));
1386 blwi->blwi_ld = *ld;
1387 blwi->blwi_lock = lock;
1388 blwi->blwi_flags = flags;
1390 spin_lock(&blp->blp_lock);
1391 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1392 cfs_waitq_signal(&blp->blp_waitq);
1393 spin_unlock(&blp->blp_lock);
1401 static int ldlm_callback_handler(struct ptlrpc_request *req)
1403 struct ldlm_namespace *ns;
1404 struct ldlm_request *dlm_req;
1405 struct ldlm_lock *lock;
1409 /* Requests arrive in sender's byte order. The ptlrpc service
1410 * handler has already checked and, if necessary, byte-swapped the
1411 * incoming request message body, but I am responsible for the
1412 * message buffers. */
1414 if (req->rq_export == NULL) {
1415 struct ldlm_request *dlm_req;
1417 CDEBUG(D_RPCTRACE, "operation %d from %s with bad "
1418 "export cookie "LPX64"; this is "
1419 "normal if this node rebooted with a lock held\n",
1420 lustre_msg_get_opc(req->rq_reqmsg),
1421 libcfs_id2str(req->rq_peer),
1422 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
1424 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1426 lustre_swab_ldlm_request);
1427 if (dlm_req != NULL)
1428 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
1429 dlm_req->lock_handle[0].cookie);
1431 ldlm_callback_reply(req, -ENOTCONN);
1435 LASSERT(req->rq_export != NULL);
1436 LASSERT(req->rq_export->exp_obd != NULL);
1438 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1439 case LDLM_BL_CALLBACK:
1440 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1442 case LDLM_CP_CALLBACK:
1443 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
1445 case LDLM_GL_CALLBACK:
1446 OBD_FAIL_RETURN(OBD_FAIL_LDLM_GL_CALLBACK, 0);
1448 case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
1449 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1450 rc = llog_origin_handle_cancel(req);
1451 ldlm_callback_reply(req, rc);
1453 case OBD_QC_CALLBACK:
1454 OBD_FAIL_RETURN(OBD_FAIL_OBD_QC_CALLBACK_NET, 0);
1455 rc = target_handle_qc_callback(req);
1456 ldlm_callback_reply(req, rc);
1460 /* reply in handler */
1461 rc = target_handle_dqacq_callback(req);
1463 case LLOG_ORIGIN_HANDLE_CREATE:
1464 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1465 rc = llog_origin_handle_create(req);
1466 ldlm_callback_reply(req, rc);
1468 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1469 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1470 rc = llog_origin_handle_next_block(req);
1471 ldlm_callback_reply(req, rc);
1473 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1474 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1475 rc = llog_origin_handle_read_header(req);
1476 ldlm_callback_reply(req, rc);
1478 case LLOG_ORIGIN_HANDLE_CLOSE:
1479 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1480 rc = llog_origin_handle_close(req);
1481 ldlm_callback_reply(req, rc);
1484 CERROR("unknown opcode %u\n",
1485 lustre_msg_get_opc(req->rq_reqmsg));
1486 ldlm_callback_reply(req, -EPROTO);
1490 ns = req->rq_export->exp_obd->obd_namespace;
1491 LASSERT(ns != NULL);
1493 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof(*dlm_req),
1494 lustre_swab_ldlm_request);
1495 if (dlm_req == NULL) {
1496 CERROR ("can't unpack dlm_req\n");
1497 ldlm_callback_reply(req, -EPROTO);
1501 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle[0]);
1503 CDEBUG(D_INODE, "callback on lock "LPX64" - lock disappeared\n",
1504 dlm_req->lock_handle[0].cookie);
1505 ldlm_callback_reply(req, -EINVAL);
1509 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1510 lock_res_and_lock(lock);
1511 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1512 unlock_res_and_lock(lock);
1514 /* We want the ost thread to get this reply so that it can respond
1515 * to ost requests (write cache writeback) that might be triggered
1518 * But we'd also like to be able to indicate in the reply that we're
1519 * cancelling right now, because it's unused, or have an intent result
1520 * in the reply, so we might have to push the responsibility for sending
1521 * the reply down into the AST handlers, alas. */
1523 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1524 case LDLM_BL_CALLBACK:
1525 CDEBUG(D_INODE, "blocking ast\n");
1526 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK))
1527 ldlm_callback_reply(req, 0);
1528 if (ldlm_bl_to_thread(ns, &dlm_req->lock_desc, lock, 0))
1529 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
1531 case LDLM_CP_CALLBACK:
1532 CDEBUG(D_INODE, "completion ast\n");
1533 ldlm_callback_reply(req, 0);
1534 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
1536 case LDLM_GL_CALLBACK:
1537 CDEBUG(D_INODE, "glimpse ast\n");
1538 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
1541 LBUG(); /* checked above */
1547 static int ldlm_cancel_handler(struct ptlrpc_request *req)
1552 /* Requests arrive in sender's byte order. The ptlrpc service
1553 * handler has already checked and, if necessary, byte-swapped the
1554 * incoming request message body, but I am responsible for the
1555 * message buffers. */
1557 if (req->rq_export == NULL) {
1558 struct ldlm_request *dlm_req;
1560 CERROR("operation %d from %s with bad export cookie "LPU64"\n",
1561 lustre_msg_get_opc(req->rq_reqmsg),
1562 libcfs_id2str(req->rq_peer),
1563 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
1565 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1567 lustre_swab_ldlm_request);
1568 if (dlm_req != NULL)
1569 ldlm_lock_dump_handle(D_ERROR,
1570 &dlm_req->lock_handle[0]);
1571 ldlm_callback_reply(req, -ENOTCONN);
1575 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1577 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1579 CDEBUG(D_INODE, "cancel\n");
1580 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
1581 rc = ldlm_handle_cancel(req);
1585 case OBD_LOG_CANCEL:
1586 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1587 rc = llog_origin_handle_cancel(req);
1588 ldlm_callback_reply(req, rc);
1591 CERROR("invalid opcode %d\n",
1592 lustre_msg_get_opc(req->rq_reqmsg));
1593 ldlm_callback_reply(req, -EINVAL);
1599 void ldlm_revoke_export_locks(struct obd_export *exp)
1601 struct list_head *locklist = &exp->exp_ldlm_data.led_held_locks;
1602 struct list_head rpc_list;
1603 struct ldlm_lock *lock, *next;
1604 struct ldlm_lock_desc desc;
1607 INIT_LIST_HEAD(&rpc_list);
1609 spin_lock(&exp->exp_ldlm_data.led_lock);
1610 list_for_each_entry_safe(lock, next, locklist, l_export_chain) {
1611 lock_res_and_lock(lock);
1613 if (lock->l_req_mode != lock->l_granted_mode) {
1614 unlock_res_and_lock(lock);
1618 LASSERT(lock->l_resource);
1619 if (lock->l_resource->lr_type != LDLM_IBITS &&
1620 lock->l_resource->lr_type != LDLM_PLAIN) {
1621 unlock_res_and_lock(lock);
1625 if (lock->l_flags & LDLM_FL_AST_SENT) {
1626 unlock_res_and_lock(lock);
1630 LASSERT(lock->l_blocking_ast);
1631 LASSERT(!lock->l_blocking_lock);
1633 lock->l_flags |= LDLM_FL_AST_SENT;
1634 list_move(&lock->l_export_chain, &rpc_list);
1636 unlock_res_and_lock(lock);
1638 spin_unlock(&exp->exp_ldlm_data.led_lock);
1640 while (!list_empty(&rpc_list)) {
1641 lock = list_entry(rpc_list.next, struct ldlm_lock,
1643 list_del_init(&lock->l_export_chain);
1645 /* the desc just pretend to exclusive */
1646 ldlm_lock2desc(lock, &desc);
1647 desc.l_req_mode = LCK_EX;
1648 desc.l_granted_mode = 0;
1650 LDLM_LOCK_GET(lock);
1651 lock->l_blocking_ast(lock, &desc, lock->l_ast_data,
1653 LDLM_LOCK_PUT(lock);
1659 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1661 struct ldlm_bl_work_item *blwi = NULL;
1663 spin_lock(&blp->blp_lock);
1664 if (!list_empty(&blp->blp_list)) {
1665 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1667 list_del(&blwi->blwi_entry);
1669 spin_unlock(&blp->blp_lock);
1674 struct ldlm_bl_thread_data {
1676 struct ldlm_bl_pool *bltd_blp;
1679 static int ldlm_bl_thread_main(void *arg)
1681 struct ldlm_bl_thread_data *bltd = arg;
1682 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1686 char name[CFS_CURPROC_COMM_MAX];
1687 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1689 cfs_daemonize(name);
1692 atomic_inc(&blp->blp_num_threads);
1693 complete(&blp->blp_comp);
1696 struct l_wait_info lwi = { 0 };
1697 struct ldlm_bl_work_item *blwi = NULL;
1699 l_wait_event_exclusive(blp->blp_waitq,
1700 (blwi = ldlm_bl_get_work(blp)) != NULL,
1703 if (blwi->blwi_ns == NULL)
1706 if (blwi->blwi_flags == LDLM_FL_CANCELING) {
1707 /* The special case when we cancel locks in lru
1708 * asynchronously, then we first remove the lock from
1709 * l_bl_ast explicitely in ldlm_cancel_lru before
1710 * sending it to this thread. Thus lock is marked
1711 * LDLM_FL_CANCELING, and already cancelled locally. */
1712 CFS_LIST_HEAD(head);
1713 LASSERT(list_empty(&blwi->blwi_lock->l_bl_ast));
1714 list_add(&blwi->blwi_lock->l_bl_ast, &head);
1715 ldlm_cli_cancel_req(blwi->blwi_lock->l_conn_export,
1717 LDLM_LOCK_PUT(blwi->blwi_lock);
1719 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1722 OBD_FREE(blwi, sizeof(*blwi));
1725 atomic_dec(&blp->blp_num_threads);
1726 complete(&blp->blp_comp);
1732 static int ldlm_setup(void);
1733 static int ldlm_cleanup(int force);
1735 int ldlm_get_ref(void)
1739 mutex_down(&ldlm_ref_sem);
1740 if (++ldlm_refcount == 1) {
1745 mutex_up(&ldlm_ref_sem);
1750 void ldlm_put_ref(int force)
1753 mutex_down(&ldlm_ref_sem);
1754 if (ldlm_refcount == 1) {
1755 int rc = ldlm_cleanup(force);
1757 CERROR("ldlm_cleanup failed: %d\n", rc);
1763 mutex_up(&ldlm_ref_sem);
1768 static int ldlm_setup(void)
1770 struct ldlm_bl_pool *blp;
1777 if (ldlm_state != NULL)
1780 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
1781 if (ldlm_state == NULL)
1785 rc = ldlm_proc_setup();
1790 ldlm_state->ldlm_cb_service =
1791 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1792 LDLM_MAXREPSIZE, LDLM_CB_REQUEST_PORTAL,
1793 LDLM_CB_REPLY_PORTAL, ldlm_timeout * 900,
1794 ldlm_callback_handler, "ldlm_cbd",
1795 ldlm_svc_proc_dir, NULL,
1796 LDLM_THREADS_AUTO_MIN, LDLM_THREADS_AUTO_MAX,
1798 LCT_MD_THREAD|LCT_DT_THREAD);
1800 if (!ldlm_state->ldlm_cb_service) {
1801 CERROR("failed to start service\n");
1802 GOTO(out_proc, rc = -ENOMEM);
1805 ldlm_state->ldlm_cancel_service =
1806 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1807 LDLM_MAXREPSIZE, LDLM_CANCEL_REQUEST_PORTAL,
1808 LDLM_CANCEL_REPLY_PORTAL, ldlm_timeout * 6000,
1809 ldlm_cancel_handler, "ldlm_canceld",
1810 ldlm_svc_proc_dir, NULL,
1811 LDLM_THREADS_AUTO_MIN, LDLM_THREADS_AUTO_MAX,
1813 LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD);
1815 if (!ldlm_state->ldlm_cancel_service) {
1816 CERROR("failed to start service\n");
1817 GOTO(out_proc, rc = -ENOMEM);
1820 OBD_ALLOC(blp, sizeof(*blp));
1822 GOTO(out_proc, rc = -ENOMEM);
1823 ldlm_state->ldlm_bl_pool = blp;
1825 atomic_set(&blp->blp_num_threads, 0);
1826 cfs_waitq_init(&blp->blp_waitq);
1827 spin_lock_init(&blp->blp_lock);
1829 CFS_INIT_LIST_HEAD(&blp->blp_list);
1832 for (i = 0; i < LDLM_BL_THREADS; i++) {
1833 struct ldlm_bl_thread_data bltd = {
1837 init_completion(&blp->blp_comp);
1838 rc = cfs_kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1840 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1841 GOTO(out_thread, rc);
1843 wait_for_completion(&blp->blp_comp);
1846 rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cancel_service);
1848 GOTO(out_thread, rc);
1850 rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cb_service);
1852 GOTO(out_thread, rc);
1854 CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1855 expired_lock_thread.elt_state = ELT_STOPPED;
1856 cfs_waitq_init(&expired_lock_thread.elt_waitq);
1858 CFS_INIT_LIST_HEAD(&waiting_locks_list);
1859 spin_lock_init(&waiting_locks_spinlock);
1860 cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
1862 rc = cfs_kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FILES);
1864 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1865 GOTO(out_thread, rc);
1868 wait_event(expired_lock_thread.elt_waitq,
1869 expired_lock_thread.elt_state == ELT_READY);
1876 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1877 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1882 ldlm_proc_cleanup();
1885 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1890 static int ldlm_cleanup(int force)
1893 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1897 if (!list_empty(&ldlm_namespace_list)) {
1898 CERROR("ldlm still has namespaces; clean these up first.\n");
1899 ldlm_dump_all_namespaces(D_DLMTRACE);
1904 while (atomic_read(&blp->blp_num_threads) > 0) {
1905 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1907 init_completion(&blp->blp_comp);
1909 spin_lock(&blp->blp_lock);
1910 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1911 cfs_waitq_signal(&blp->blp_waitq);
1912 spin_unlock(&blp->blp_lock);
1914 wait_for_completion(&blp->blp_comp);
1916 OBD_FREE(blp, sizeof(*blp));
1918 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1919 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1920 ldlm_proc_cleanup();
1922 expired_lock_thread.elt_state = ELT_TERMINATE;
1923 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
1924 wait_event(expired_lock_thread.elt_waitq,
1925 expired_lock_thread.elt_state == ELT_STOPPED);
1927 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1928 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1931 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1937 int __init ldlm_init(void)
1939 init_mutex(&ldlm_ref_sem);
1940 init_mutex(&ldlm_namespace_lock);
1941 ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
1942 sizeof(struct ldlm_resource), 0,
1943 SLAB_HWCACHE_ALIGN);
1944 if (ldlm_resource_slab == NULL)
1947 ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
1948 sizeof(struct ldlm_lock), 0,
1949 SLAB_HWCACHE_ALIGN);
1950 if (ldlm_lock_slab == NULL) {
1951 cfs_mem_cache_destroy(ldlm_resource_slab);
1958 void __exit ldlm_exit(void)
1962 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1963 rc = cfs_mem_cache_destroy(ldlm_resource_slab);
1964 LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
1965 rc = cfs_mem_cache_destroy(ldlm_lock_slab);
1966 LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
1970 EXPORT_SYMBOL(ldlm_extent_shift_kms);
1973 EXPORT_SYMBOL(ldlm_get_processing_policy);
1974 EXPORT_SYMBOL(ldlm_lock2desc);
1975 EXPORT_SYMBOL(ldlm_register_intent);
1976 EXPORT_SYMBOL(ldlm_lockname);
1977 EXPORT_SYMBOL(ldlm_typename);
1978 EXPORT_SYMBOL(ldlm_lock2handle);
1979 EXPORT_SYMBOL(__ldlm_handle2lock);
1980 EXPORT_SYMBOL(ldlm_lock_get);
1981 EXPORT_SYMBOL(ldlm_lock_put);
1982 EXPORT_SYMBOL(ldlm_lock_match);
1983 EXPORT_SYMBOL(ldlm_lock_cancel);
1984 EXPORT_SYMBOL(ldlm_lock_addref);
1985 EXPORT_SYMBOL(ldlm_lock_decref);
1986 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1987 EXPORT_SYMBOL(ldlm_lock_change_resource);
1988 EXPORT_SYMBOL(ldlm_lock_set_data);
1989 EXPORT_SYMBOL(ldlm_it2str);
1990 EXPORT_SYMBOL(ldlm_lock_dump);
1991 EXPORT_SYMBOL(ldlm_lock_dump_handle);
1992 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
1993 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1994 EXPORT_SYMBOL(ldlm_lock_allow_match);
1996 /* ldlm_request.c */
1997 EXPORT_SYMBOL(ldlm_completion_ast);
1998 EXPORT_SYMBOL(ldlm_blocking_ast);
1999 EXPORT_SYMBOL(ldlm_glimpse_ast);
2000 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2001 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2002 EXPORT_SYMBOL(ldlm_cli_convert);
2003 EXPORT_SYMBOL(ldlm_cli_enqueue);
2004 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2005 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2006 EXPORT_SYMBOL(ldlm_cli_cancel);
2007 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2008 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2009 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2010 EXPORT_SYMBOL(ldlm_cli_join_lru);
2011 EXPORT_SYMBOL(ldlm_replay_locks);
2012 EXPORT_SYMBOL(ldlm_resource_foreach);
2013 EXPORT_SYMBOL(ldlm_namespace_foreach);
2014 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
2015 EXPORT_SYMBOL(ldlm_resource_iterate);
2016 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2017 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2020 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2021 EXPORT_SYMBOL(ldlm_server_completion_ast);
2022 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2023 EXPORT_SYMBOL(ldlm_handle_enqueue);
2024 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2025 EXPORT_SYMBOL(ldlm_handle_cancel);
2026 EXPORT_SYMBOL(ldlm_request_cancel);
2027 EXPORT_SYMBOL(ldlm_handle_convert);
2028 EXPORT_SYMBOL(ldlm_handle_convert0);
2029 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2030 EXPORT_SYMBOL(ldlm_get_ref);
2031 EXPORT_SYMBOL(ldlm_put_ref);
2032 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2033 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2035 /* ldlm_resource.c */
2036 EXPORT_SYMBOL(ldlm_namespace_new);
2037 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2038 EXPORT_SYMBOL(ldlm_namespace_free);
2039 EXPORT_SYMBOL(ldlm_namespace_dump);
2040 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2041 EXPORT_SYMBOL(ldlm_resource_get);
2042 EXPORT_SYMBOL(ldlm_resource_putref);
2043 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
2046 EXPORT_SYMBOL(client_import_add_conn);
2047 EXPORT_SYMBOL(client_import_del_conn);
2048 EXPORT_SYMBOL(client_obd_setup);
2049 EXPORT_SYMBOL(client_obd_cleanup);
2050 EXPORT_SYMBOL(client_connect_import);
2051 EXPORT_SYMBOL(client_disconnect_export);
2052 EXPORT_SYMBOL(target_start_recovery_thread);
2053 EXPORT_SYMBOL(target_stop_recovery_thread);
2054 EXPORT_SYMBOL(target_handle_connect);
2055 EXPORT_SYMBOL(target_cleanup_recovery);
2056 EXPORT_SYMBOL(target_destroy_export);
2057 EXPORT_SYMBOL(target_cancel_recovery_timer);
2058 EXPORT_SYMBOL(target_send_reply);
2059 EXPORT_SYMBOL(target_queue_recovery_request);
2060 EXPORT_SYMBOL(target_handle_ping);
2061 EXPORT_SYMBOL(target_handle_disconnect);
2064 EXPORT_SYMBOL(lock_res_and_lock);
2065 EXPORT_SYMBOL(unlock_res_and_lock);