1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 # define EXPORT_SYMTAB
27 #define DEBUG_SUBSYSTEM S_LDLM
30 # include <linux/module.h>
31 # include <linux/slab.h>
32 # include <linux/init.h>
33 # include <linux/wait.h>
35 # include <liblustre.h>
38 #include <linux/lustre_dlm.h>
39 #include <linux/obd_class.h>
40 #include <libcfs/list.h>
41 #include "ldlm_internal.h"
43 extern kmem_cache_t *ldlm_resource_slab;
44 extern kmem_cache_t *ldlm_lock_slab;
45 extern struct lustre_lock ldlm_handle_lock;
46 extern struct list_head ldlm_namespace_list;
48 static DECLARE_MUTEX(ldlm_ref_sem);
49 static int ldlm_refcount;
52 static struct ldlm_state *ldlm_state;
54 static inline unsigned long
55 round_timeout(unsigned long timeout)
57 return ((timeout / HZ) + 1) * HZ;
61 /* XXX should this be per-ldlm? */
62 static struct list_head waiting_locks_list;
63 static spinlock_t waiting_locks_spinlock;
64 static struct timer_list waiting_locks_timer;
66 static struct expired_lock_thread {
67 wait_queue_head_t elt_waitq;
69 struct list_head elt_expired_locks;
71 } expired_lock_thread;
74 #if !defined(ENOTSUPP)
80 #define ELT_TERMINATE 2
84 struct list_head blp_list;
85 wait_queue_head_t blp_waitq;
86 atomic_t blp_num_threads;
87 struct completion blp_comp;
90 struct ldlm_bl_work_item {
91 struct list_head blwi_entry;
92 struct ldlm_namespace *blwi_ns;
93 struct ldlm_lock_desc blwi_ld;
94 struct ldlm_lock *blwi_lock;
99 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
101 static inline int have_expired_locks(void)
105 spin_lock_bh(&expired_lock_thread.elt_lock);
106 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
107 spin_unlock_bh(&expired_lock_thread.elt_lock);
112 static int expired_lock_main(void *arg)
114 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
115 struct l_wait_info lwi = { 0 };
120 kportal_daemonize("ldlm_elt");
122 SIGNAL_MASK_LOCK(current, flags);
123 sigfillset(¤t->blocked);
125 SIGNAL_MASK_UNLOCK(current, flags);
129 expired_lock_thread.elt_state = ELT_READY;
130 wake_up(&expired_lock_thread.elt_waitq);
133 l_wait_event(expired_lock_thread.elt_waitq,
134 have_expired_locks() ||
135 expired_lock_thread.elt_state == ELT_TERMINATE,
138 spin_lock_bh(&expired_lock_thread.elt_lock);
139 while (!list_empty(expired)) {
140 struct obd_export *export;
141 struct ldlm_lock *lock;
143 lock = list_entry(expired->next, struct ldlm_lock,
145 if ((void *)lock < LP_POISON + PAGE_SIZE &&
146 (void *)lock >= LP_POISON) {
147 CERROR("free lock on elt list %p\n", lock);
150 list_del_init(&lock->l_pending_chain);
151 if ((void *)lock->l_export < LP_POISON + PAGE_SIZE &&
152 (void *)lock->l_export >= LP_POISON + PAGE_SIZE) {
153 CERROR("lock with free export on elt list %p\n",
155 lock->l_export = NULL;
156 LDLM_ERROR(lock, "free export\n");
159 export = class_export_get(lock->l_export);
160 spin_unlock_bh(&expired_lock_thread.elt_lock);
162 ptlrpc_fail_export(export);
163 class_export_put(export);
164 spin_lock_bh(&expired_lock_thread.elt_lock);
166 spin_unlock_bh(&expired_lock_thread.elt_lock);
168 if (expired_lock_thread.elt_state == ELT_TERMINATE)
172 expired_lock_thread.elt_state = ELT_STOPPED;
173 wake_up(&expired_lock_thread.elt_waitq);
177 static void waiting_locks_callback(unsigned long unused)
179 struct ldlm_lock *lock;
180 char str[PTL_NALFMT_SIZE];
182 if (obd_dump_on_timeout)
183 portals_debug_dumplog();
186 spin_lock_bh(&waiting_locks_spinlock);
187 while (!list_empty(&waiting_locks_list)) {
188 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
191 if ((lock->l_callback_timeout > jiffies) ||
192 (lock->l_req_mode == LCK_GROUP))
195 if (ptlrpc_check_suspend()) {
196 /* there is a case when we talk to one mds, holding
197 * lock from another mds. this way we easily can get
198 * here, if second mds is being recovered. so, we
199 * suspend timeouts. bug 6019 */
201 LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
202 lock->l_export->exp_client_uuid.uuid,
203 lock->l_export->exp_connection->c_remote_uuid.uuid,
204 ptlrpc_peernid2str(&lock->l_export->exp_connection->c_peer, str));
206 list_del_init(&lock->l_pending_chain);
207 spin_unlock_bh(&waiting_locks_spinlock);
208 ldlm_add_waiting_lock(lock);
213 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
215 lock->l_export->exp_client_uuid.uuid,
216 lock->l_export->exp_connection->c_remote_uuid.uuid,
217 ptlrpc_peernid2str(&lock->l_export->exp_connection->c_peer, str));
219 spin_lock_bh(&expired_lock_thread.elt_lock);
220 list_del(&lock->l_pending_chain);
221 list_add(&lock->l_pending_chain,
222 &expired_lock_thread.elt_expired_locks);
223 spin_unlock_bh(&expired_lock_thread.elt_lock);
224 wake_up(&expired_lock_thread.elt_waitq);
228 * Make sure the timer will fire again if we have any locks
231 if (!list_empty(&waiting_locks_list)) {
232 unsigned long timeout_rounded;
233 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
235 timeout_rounded = round_timeout(lock->l_callback_timeout);
236 mod_timer(&waiting_locks_timer, timeout_rounded);
238 spin_unlock_bh(&waiting_locks_spinlock);
242 * Indicate that we're waiting for a client to call us back cancelling a given
243 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
244 * timer to fire appropriately. (We round up to the next second, to avoid
245 * floods of timer firings during periods of high lock contention and traffic).
247 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
249 unsigned long timeout_rounded;
251 spin_lock_bh(&waiting_locks_spinlock);
252 if (!list_empty(&lock->l_pending_chain)) {
253 LDLM_DEBUG(lock, "not re-adding to wait list");
254 spin_unlock_bh(&waiting_locks_spinlock);
257 LDLM_DEBUG(lock, "adding to wait list");
259 lock->l_callback_timeout = jiffies + (obd_timeout * HZ / 2);
261 timeout_rounded = round_timeout(lock->l_callback_timeout);
263 if (timeout_rounded < waiting_locks_timer.expires ||
264 !timer_pending(&waiting_locks_timer)) {
265 mod_timer(&waiting_locks_timer, timeout_rounded);
267 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
268 spin_unlock_bh(&waiting_locks_spinlock);
273 * Remove a lock from the pending list, likely because it had its cancellation
274 * callback arrive without incident. This adjusts the lock-timeout timer if
275 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
277 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
279 struct list_head *list_next;
281 if (lock->l_export == NULL) {
282 /* We don't have a "waiting locks list" on clients. */
283 LDLM_DEBUG(lock, "client lock: no-op");
287 spin_lock_bh(&waiting_locks_spinlock);
289 if (list_empty(&lock->l_pending_chain)) {
290 spin_unlock_bh(&waiting_locks_spinlock);
291 LDLM_DEBUG(lock, "wasn't waiting");
295 list_next = lock->l_pending_chain.next;
296 if (lock->l_pending_chain.prev == &waiting_locks_list) {
297 /* Removing the head of the list, adjust timer. */
298 if (list_next == &waiting_locks_list) {
299 /* No more, just cancel. */
300 del_timer(&waiting_locks_timer);
302 struct ldlm_lock *next;
303 next = list_entry(list_next, struct ldlm_lock,
305 mod_timer(&waiting_locks_timer,
306 round_timeout(next->l_callback_timeout));
310 spin_lock_bh(&expired_lock_thread.elt_lock);
311 list_del_init(&lock->l_pending_chain);
312 spin_unlock_bh(&expired_lock_thread.elt_lock);
314 spin_unlock_bh(&waiting_locks_spinlock);
315 LDLM_DEBUG(lock, "removed");
319 #else /* !__KERNEL__ */
321 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
326 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
331 #endif /* __KERNEL__ */
333 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,const char *ast_type)
335 struct ptlrpc_connection *conn = lock->l_export->exp_connection;
336 char str[PTL_NALFMT_SIZE];
338 LDLM_ERROR(lock, "%s AST failed (%d): evicting client %s@%s NID "LPX64
339 " (%s)", ast_type, rc, lock->l_export->exp_client_uuid.uuid,
340 conn->c_remote_uuid.uuid, conn->c_peer.peer_id.nid,
341 ptlrpc_peernid2str(&conn->c_peer, str));
343 if (obd_dump_on_timeout)
344 portals_debug_dumplog();
345 ptlrpc_fail_export(lock->l_export);
348 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
349 struct ptlrpc_request *req, int rc,
350 const char *ast_type)
352 struct ptlrpc_peer *peer = &req->rq_import->imp_connection->c_peer;
353 char str[PTL_NALFMT_SIZE];
355 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
356 LASSERT(lock->l_export);
357 if (lock->l_export->exp_libclient) {
358 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
359 " timeout, just cancelling lock", ast_type,
360 ptlrpc_peernid2str(peer, str));
361 ldlm_lock_cancel(lock);
364 l_lock(&lock->l_resource->lr_namespace->ns_lock);
365 ldlm_del_waiting_lock(lock);
366 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
367 ldlm_failed_ast(lock, rc, ast_type);
371 LDLM_DEBUG(lock, "client (nid %s) returned %d"
372 " from %s AST - normal race",
373 ptlrpc_peernid2str(peer, str),
374 req->rq_repmsg->status, ast_type);
376 LDLM_ERROR(lock, "client (nid %s) returned %d "
377 "from %s AST", ptlrpc_peernid2str(peer, str),
378 (req->rq_repmsg != NULL) ?
379 req->rq_repmsg->status : 0, ast_type);
380 ldlm_lock_cancel(lock);
381 /* Server-side AST functions are called from ldlm_reprocess_all,
382 * which needs to be told to please restart its reprocessing. */
389 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
390 struct ldlm_lock_desc *desc,
391 void *data, int flag)
393 struct ldlm_request *body;
394 struct ptlrpc_request *req;
395 int rc = 0, size = sizeof(*body);
398 if (flag == LDLM_CB_CANCELING) {
399 /* Don't need to do anything here. */
403 LASSERTF(lock->l_export->exp_obd->obd_recovering == 0,
404 "BUG 6063: lock collide during recovery");
408 l_lock(&lock->l_resource->lr_namespace->ns_lock);
409 if (lock->l_granted_mode != lock->l_req_mode) {
410 /* this blocking AST will be communicated as part of the
411 * completion AST instead */
412 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
413 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
417 if (lock->l_destroyed) {
418 /* What's the point? */
419 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
424 if (LTIME_S(CURRENT_TIME) - lock->l_export->exp_last_request_time > 30){
425 ldlm_failed_ast(lock, -ETIMEDOUT, "Not-attempted blocking");
426 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
431 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
432 LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK,
435 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
439 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
440 memcpy(&body->lock_handle1, &lock->l_remote_handle,
441 sizeof(body->lock_handle1));
442 memcpy(&body->lock_desc, desc, sizeof(*desc));
443 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
445 LDLM_DEBUG(lock, "server preparing blocking AST");
446 req->rq_replen = lustre_msg_size(0, NULL);
448 if (lock->l_granted_mode == lock->l_req_mode)
449 ldlm_add_waiting_lock(lock);
450 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
452 req->rq_send_state = LUSTRE_IMP_FULL;
453 req->rq_timeout = ldlm_timeout; /* timeout for initial AST reply */
454 rc = ptlrpc_queue_wait(req);
456 rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
458 ptlrpc_req_finished(req);
463 /* XXX copied from ptlrpc/service.c */
464 static long timeval_sub(struct timeval *large, struct timeval *small)
466 return (large->tv_sec - small->tv_sec) * 1000000 +
467 (large->tv_usec - small->tv_usec);
470 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
472 struct ldlm_request *body;
473 struct ptlrpc_request *req;
474 struct timeval granted_time;
475 long total_enqueue_wait;
476 int rc = 0, size[2] = {sizeof(*body)}, buffers = 1;
479 LASSERT(lock != NULL);
481 do_gettimeofday(&granted_time);
482 total_enqueue_wait = timeval_sub(&granted_time, &lock->l_enqueued_time);
484 if (total_enqueue_wait / 1000000 > obd_timeout)
485 LDLM_ERROR(lock, "enqueue wait took %ldus", total_enqueue_wait);
487 down(&lock->l_resource->lr_lvb_sem);
488 if (lock->l_resource->lr_lvb_len) {
490 size[1] = lock->l_resource->lr_lvb_len;
492 up(&lock->l_resource->lr_lvb_sem);
494 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
495 LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK,
496 buffers, size, NULL);
500 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
501 memcpy(&body->lock_handle1, &lock->l_remote_handle,
502 sizeof(body->lock_handle1));
503 body->lock_flags = flags;
504 ldlm_lock2desc(lock, &body->lock_desc);
509 down(&lock->l_resource->lr_lvb_sem);
510 lvb = lustre_msg_buf(req->rq_reqmsg, 1,
511 lock->l_resource->lr_lvb_len);
513 memcpy(lvb, lock->l_resource->lr_lvb_data,
514 lock->l_resource->lr_lvb_len);
515 up(&lock->l_resource->lr_lvb_sem);
518 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
520 req->rq_replen = lustre_msg_size(0, NULL);
522 req->rq_send_state = LUSTRE_IMP_FULL;
523 req->rq_timeout = ldlm_timeout; /* timeout for initial AST reply */
525 /* We only send real blocking ASTs after the lock is granted */
526 l_lock(&lock->l_resource->lr_namespace->ns_lock);
527 if (lock->l_flags & LDLM_FL_AST_SENT) {
528 body->lock_flags |= LDLM_FL_AST_SENT;
529 ldlm_add_waiting_lock(lock); /* start the lock-timeout clock */
531 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
533 rc = ptlrpc_queue_wait(req);
535 rc = ldlm_handle_ast_error(lock, req, rc, "completion");
537 ptlrpc_req_finished(req);
542 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
544 struct ldlm_resource *res = lock->l_resource;
545 struct ldlm_request *body;
546 struct ptlrpc_request *req;
547 int rc = 0, size = sizeof(*body);
550 LASSERT(lock != NULL);
552 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
553 LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK,
558 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
559 memcpy(&body->lock_handle1, &lock->l_remote_handle,
560 sizeof(body->lock_handle1));
561 ldlm_lock2desc(lock, &body->lock_desc);
563 down(&lock->l_resource->lr_lvb_sem);
564 size = lock->l_resource->lr_lvb_len;
565 up(&lock->l_resource->lr_lvb_sem);
566 req->rq_replen = lustre_msg_size(1, &size);
568 req->rq_send_state = LUSTRE_IMP_FULL;
569 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
571 rc = ptlrpc_queue_wait(req);
572 if (rc == -ELDLM_NO_LOCK_DATA)
573 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
575 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
577 rc = res->lr_namespace->ns_lvbo->lvbo_update
578 (res, req->rq_repmsg, 0, 1);
579 ptlrpc_req_finished(req);
583 static struct ldlm_lock *
584 find_existing_lock(struct obd_export *exp, struct lustre_handle *remote_hdl)
586 struct obd_device *obd = exp->exp_obd;
587 struct list_head *iter;
589 l_lock(&obd->obd_namespace->ns_lock);
590 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
591 struct ldlm_lock *lock;
592 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
593 if (lock->l_remote_handle.cookie == remote_hdl->cookie) {
595 l_unlock(&obd->obd_namespace->ns_lock);
599 l_unlock(&obd->obd_namespace->ns_lock);
604 int ldlm_handle_enqueue(struct ptlrpc_request *req,
605 ldlm_completion_callback completion_callback,
606 ldlm_blocking_callback blocking_callback,
607 ldlm_glimpse_callback glimpse_callback)
609 struct obd_device *obddev = req->rq_export->exp_obd;
610 struct ldlm_reply *dlm_rep;
611 struct ldlm_request *dlm_req;
612 int rc = 0, size[2] = {sizeof(*dlm_rep)};
614 ldlm_error_t err = ELDLM_OK;
615 struct ldlm_lock *lock = NULL;
619 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
621 dlm_req = lustre_swab_reqbuf (req, MDS_REQ_INTENT_LOCKREQ_OFF,
623 lustre_swab_ldlm_request);
624 if (dlm_req == NULL) {
625 CERROR ("Can't unpack dlm_req\n");
626 GOTO(out, rc = -EFAULT);
629 flags = dlm_req->lock_flags;
631 LASSERT(req->rq_export);
633 if (flags & LDLM_FL_REPLAY) {
634 lock = find_existing_lock(req->rq_export,
635 &dlm_req->lock_handle1);
637 DEBUG_REQ(D_HA, req, "found existing lock cookie "LPX64,
638 lock->l_handle.h_cookie);
639 GOTO(existing_lock, rc = 0);
643 /* The lock's callback data might be set in the policy function */
644 lock = ldlm_lock_create(obddev->obd_namespace, &dlm_req->lock_handle2,
645 dlm_req->lock_desc.l_resource.lr_name,
646 dlm_req->lock_desc.l_resource.lr_type,
647 dlm_req->lock_desc.l_req_mode,
648 blocking_callback, completion_callback,
649 glimpse_callback, NULL, 0);
651 GOTO(out, rc = -ENOMEM);
653 do_gettimeofday(&lock->l_enqueued_time);
654 memcpy(&lock->l_remote_handle, &dlm_req->lock_handle1,
655 sizeof(lock->l_remote_handle));
656 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
658 LASSERT(req->rq_export);
659 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
660 l_lock(&lock->l_resource->lr_namespace->ns_lock);
661 if (req->rq_export->exp_failed) {
662 LDLM_ERROR(lock,"lock on destroyed export %p\n",req->rq_export);
663 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
664 GOTO(out, err = -ENOTCONN);
666 lock->l_export = class_export_get(req->rq_export);
668 list_add(&lock->l_export_chain,
669 &lock->l_export->exp_ldlm_data.led_held_locks);
670 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
674 if (flags & LDLM_FL_HAS_INTENT) {
675 /* In this case, the reply buffer is allocated deep in
676 * local_lock_enqueue by the policy function. */
680 down(&lock->l_resource->lr_lvb_sem);
681 if (lock->l_resource->lr_lvb_len) {
682 size[1] = lock->l_resource->lr_lvb_len;
685 up(&lock->l_resource->lr_lvb_sem);
686 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
687 GOTO(out, rc = -ENOMEM);
689 rc = lustre_pack_reply(req, buffers, size, NULL);
694 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
695 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
696 sizeof(ldlm_policy_data_t));
697 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
698 memcpy(&lock->l_req_extent, &lock->l_policy_data.l_extent,
699 sizeof(lock->l_req_extent));
701 err = ldlm_lock_enqueue(obddev->obd_namespace, &lock,
702 cookie, (int *)&flags);
706 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*dlm_rep));
707 dlm_rep->lock_flags = flags;
709 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
710 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
712 /* We never send a blocking AST until the lock is granted, but
713 * we can tell it right now */
714 l_lock(&lock->l_resource->lr_namespace->ns_lock);
715 if (lock->l_flags & LDLM_FL_AST_SENT) {
716 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
717 if (lock->l_granted_mode == lock->l_req_mode)
718 ldlm_add_waiting_lock(lock);
720 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
724 req->rq_status = err;
725 if (req->rq_reply_state == NULL) {
726 err = lustre_pack_reply(req, 0, NULL, NULL);
732 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
733 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
735 l_lock(&lock->l_resource->lr_namespace->ns_lock);
736 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
737 "(err=%d, rc=%d)", err, rc);
738 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
741 down(&lock->l_resource->lr_lvb_sem);
742 size[1] = lock->l_resource->lr_lvb_len;
744 void *lvb = lustre_msg_buf(req->rq_repmsg,
746 LASSERTF(lvb != NULL, "req %p, lock %p\n",
749 memcpy(lvb, lock->l_resource->lr_lvb_data,
752 up(&lock->l_resource->lr_lvb_sem);
754 ldlm_lock_destroy(lock);
757 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
758 ldlm_reprocess_all(lock->l_resource);
761 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
767 int ldlm_handle_convert(struct ptlrpc_request *req)
769 struct ldlm_request *dlm_req;
770 struct ldlm_reply *dlm_rep;
771 struct ldlm_lock *lock;
772 int rc, size = sizeof(*dlm_rep);
775 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
776 lustre_swab_ldlm_request);
777 if (dlm_req == NULL) {
778 CERROR ("Can't unpack dlm_req\n");
782 rc = lustre_pack_reply(req, 1, &size, NULL);
784 CERROR("out of memory\n");
787 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
788 dlm_rep->lock_flags = dlm_req->lock_flags;
790 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
792 req->rq_status = EINVAL;
796 l_lock(&lock->l_resource->lr_namespace->ns_lock);
797 LDLM_DEBUG(lock, "server-side convert handler START");
798 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
800 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
801 (int *)&dlm_rep->lock_flags);
803 l_lock(&lock->l_resource->lr_namespace->ns_lock);
804 if (ldlm_del_waiting_lock(lock))
805 CDEBUG(D_DLMTRACE, "converted waiting lock %p\n", lock);
806 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
809 req->rq_status = EDEADLOCK;
815 ldlm_reprocess_all(lock->l_resource);
816 l_lock(&lock->l_resource->lr_namespace->ns_lock);
817 LDLM_DEBUG(lock, "server-side convert handler END");
818 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
821 LDLM_DEBUG_NOLOCK("server-side convert handler END");
826 int ldlm_handle_cancel(struct ptlrpc_request *req)
828 struct ldlm_request *dlm_req;
829 struct ldlm_lock *lock;
830 struct ldlm_resource *res;
834 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
835 lustre_swab_ldlm_request);
836 if (dlm_req == NULL) {
837 CERROR("bad request buffer for cancel\n");
841 rc = lustre_pack_reply(req, 0, NULL, NULL);
843 CERROR("out of memory\n");
847 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
849 CERROR("received cancel for unknown lock cookie "LPX64
850 " from client %s id %s\n",
851 dlm_req->lock_handle1.cookie,
852 req->rq_export->exp_client_uuid.uuid,
854 LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock "
856 dlm_req->lock_handle1.cookie);
857 req->rq_status = ESTALE;
859 LDLM_DEBUG(lock, "server-side cancel handler START");
860 res = lock->l_resource;
861 if (res && res->lr_namespace->ns_lvbo &&
862 res->lr_namespace->ns_lvbo->lvbo_update) {
863 (void)res->lr_namespace->ns_lvbo->lvbo_update
865 //(res, req->rq_reqmsg, 1);
868 l_lock(&res->lr_namespace->ns_lock);
869 ldlm_lock_cancel(lock);
870 if (ldlm_del_waiting_lock(lock))
871 CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
872 l_unlock(&res->lr_namespace->ns_lock);
876 if (ptlrpc_reply(req) != 0)
880 ldlm_reprocess_all(lock->l_resource);
881 l_lock(&lock->l_resource->lr_namespace->ns_lock);
882 LDLM_DEBUG(lock, "server-side cancel handler END");
883 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
890 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
891 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
896 l_lock(&ns->ns_lock);
897 LDLM_DEBUG(lock, "client blocking AST callback handler START");
899 lock->l_flags |= LDLM_FL_CBPENDING;
900 do_ast = (!lock->l_readers && !lock->l_writers);
903 LDLM_DEBUG(lock, "already unused, calling "
904 "callback (%p)", lock->l_blocking_ast);
905 if (lock->l_blocking_ast != NULL) {
906 l_unlock(&ns->ns_lock);
907 l_check_no_ns_lock(ns);
908 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
910 l_lock(&ns->ns_lock);
913 LDLM_DEBUG(lock, "Lock still has references, will be"
917 LDLM_DEBUG(lock, "client blocking callback handler END");
918 l_unlock(&ns->ns_lock);
923 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
924 struct ldlm_namespace *ns,
925 struct ldlm_request *dlm_req,
926 struct ldlm_lock *lock)
931 l_lock(&ns->ns_lock);
932 LDLM_DEBUG(lock, "client completion callback handler START");
934 /* If we receive the completion AST before the actual enqueue returned,
935 * then we might need to switch lock modes, resources, or extents. */
936 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
937 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
938 LDLM_DEBUG(lock, "completion AST, new lock mode");
941 if (lock->l_resource->lr_type != LDLM_PLAIN) {
942 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
943 sizeof(lock->l_policy_data));
944 LDLM_DEBUG(lock, "completion AST, new policy data");
947 ldlm_resource_unlink_lock(lock);
948 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
949 &lock->l_resource->lr_name,
950 sizeof(lock->l_resource->lr_name)) != 0) {
951 ldlm_lock_change_resource(ns, lock,
952 dlm_req->lock_desc.l_resource.lr_name);
953 LDLM_DEBUG(lock, "completion AST, new resource");
956 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
957 lock->l_flags |= LDLM_FL_CBPENDING;
958 LDLM_DEBUG(lock, "completion AST includes blocking AST");
961 if (lock->l_lvb_len) {
963 lvb = lustre_swab_reqbuf(req, 1, lock->l_lvb_len,
964 lock->l_lvb_swabber);
966 LDLM_ERROR(lock, "completion AST did not contain "
969 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
973 lock->l_resource->lr_tmp = &ast_list;
974 ldlm_grant_lock(lock, req, sizeof(*req), 1);
975 lock->l_resource->lr_tmp = NULL;
976 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
977 l_unlock(&ns->ns_lock);
980 ldlm_run_ast_work(ns, &ast_list);
982 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
987 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
988 struct ldlm_namespace *ns,
989 struct ldlm_request *dlm_req,
990 struct ldlm_lock *lock)
995 l_lock(&ns->ns_lock);
996 LDLM_DEBUG(lock, "client glimpse AST callback handler");
998 if (lock->l_glimpse_ast != NULL) {
999 l_unlock(&ns->ns_lock);
1000 l_check_no_ns_lock(ns);
1001 rc = lock->l_glimpse_ast(lock, req);
1002 l_lock(&ns->ns_lock);
1005 if (req->rq_repmsg != NULL) {
1008 req->rq_status = rc;
1012 l_unlock(&ns->ns_lock);
1013 if (lock->l_granted_mode == LCK_PW &&
1014 !lock->l_readers && !lock->l_writers &&
1015 time_after(jiffies, lock->l_last_used + 10 * HZ)) {
1016 if (ldlm_bl_to_thread(ns, NULL, lock))
1017 ldlm_handle_bl_callback(ns, NULL, lock);
1022 LDLM_LOCK_PUT(lock);
1026 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1028 req->rq_status = rc;
1029 if (req->rq_reply_state == NULL) {
1030 rc = lustre_pack_reply(req, 0, NULL, NULL);
1034 return ptlrpc_reply(req);
1037 int ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1038 struct ldlm_lock *lock)
1041 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1042 struct ldlm_bl_work_item *blwi;
1045 OBD_ALLOC(blwi, sizeof(*blwi));
1051 blwi->blwi_ld = *ld;
1052 blwi->blwi_lock = lock;
1054 spin_lock(&blp->blp_lock);
1055 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1056 wake_up(&blp->blp_waitq);
1057 spin_unlock(&blp->blp_lock);
1066 static int ldlm_msg_check_version(struct lustre_msg *msg)
1074 case LDLM_BL_CALLBACK:
1075 case LDLM_CP_CALLBACK:
1076 case LDLM_GL_CALLBACK:
1077 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
1079 CERROR("bad opc %u version %08x, expecting %08x\n",
1080 msg->opc, msg->version, LUSTRE_DLM_VERSION);
1082 case OBD_LOG_CANCEL:
1083 case LLOG_ORIGIN_HANDLE_OPEN:
1084 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1085 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1086 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1087 case LLOG_ORIGIN_HANDLE_CLOSE:
1089 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
1091 CERROR("bad opc %u version %08x, expecting %08x\n",
1092 msg->opc, msg->version, LUSTRE_LOG_VERSION);
1095 CERROR("LDLM unknown opcode %d\n", msg->opc);
1103 static int ldlm_callback_handler(struct ptlrpc_request *req)
1105 struct ldlm_namespace *ns;
1106 struct ldlm_request *dlm_req;
1107 struct ldlm_lock *lock;
1111 rc = ldlm_msg_check_version(req->rq_reqmsg);
1113 CERROR("LDLM_CB drop mal-formed request\n");
1117 /* Requests arrive in sender's byte order. The ptlrpc service
1118 * handler has already checked and, if necessary, byte-swapped the
1119 * incoming request message body, but I am responsible for the
1120 * message buffers. */
1122 if (req->rq_export == NULL) {
1123 struct ldlm_request *dlm_req;
1125 CDEBUG(D_RPCTRACE, "operation %d from %s with bad "
1126 "export cookie "LPX64"; this is "
1127 "normal if this node rebooted with a lock held\n",
1128 req->rq_reqmsg->opc,
1130 req->rq_reqmsg->handle.cookie);
1131 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
1132 lustre_swab_ldlm_request);
1133 if (dlm_req != NULL)
1134 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
1135 dlm_req->lock_handle1.cookie);
1137 ldlm_callback_reply(req, -ENOTCONN);
1141 LASSERT(req->rq_export != NULL);
1142 LASSERT(req->rq_export->exp_obd != NULL);
1144 switch(req->rq_reqmsg->opc) {
1145 case LDLM_BL_CALLBACK:
1146 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1148 case LDLM_CP_CALLBACK:
1149 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
1151 case LDLM_GL_CALLBACK:
1152 OBD_FAIL_RETURN(OBD_FAIL_LDLM_GL_CALLBACK, 0);
1154 case OBD_LOG_CANCEL:
1155 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1156 rc = llog_origin_handle_cancel(req);
1157 ldlm_callback_reply(req, rc);
1159 case LLOG_ORIGIN_HANDLE_OPEN:
1160 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1161 rc = llog_origin_handle_open(req);
1162 ldlm_callback_reply(req, rc);
1164 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1165 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1166 rc = llog_origin_handle_next_block(req);
1167 ldlm_callback_reply(req, rc);
1169 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1170 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1171 rc = llog_origin_handle_prev_block(req);
1172 ldlm_callback_reply(req, rc);
1174 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1175 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1176 rc = llog_origin_handle_read_header(req);
1177 ldlm_callback_reply(req, rc);
1179 case LLOG_ORIGIN_HANDLE_CLOSE:
1180 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1181 rc = llog_origin_handle_close(req);
1182 ldlm_callback_reply(req, rc);
1185 CERROR("unknown opcode %u\n", req->rq_reqmsg->opc);
1186 ldlm_callback_reply(req, -EPROTO);
1190 ns = req->rq_export->exp_obd->obd_namespace;
1191 LASSERT(ns != NULL);
1193 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
1194 lustre_swab_ldlm_request);
1195 if (dlm_req == NULL) {
1196 CERROR ("can't unpack dlm_req\n");
1197 ldlm_callback_reply (req, -EPROTO);
1201 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle1);
1203 LDLM_DEBUG_NOLOCK("callback on lock "LPX64" - lock "
1204 "disappeared\n",dlm_req->lock_handle1.cookie);
1205 ldlm_callback_reply(req, -EINVAL);
1209 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1210 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1212 /* We want the ost thread to get this reply so that it can respond
1213 * to ost requests (write cache writeback) that might be triggered
1216 * But we'd also like to be able to indicate in the reply that we're
1217 * cancelling right now, because it's unused, or have an intent result
1218 * in the reply, so we might have to push the responsibility for sending
1219 * the reply down into the AST handlers, alas. */
1221 switch (req->rq_reqmsg->opc) {
1222 case LDLM_BL_CALLBACK:
1223 CDEBUG(D_INODE, "blocking ast\n");
1224 ldlm_callback_reply(req, 0);
1225 if (ldlm_bl_to_thread(ns, &dlm_req->lock_desc, lock))
1226 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
1229 case LDLM_CP_CALLBACK:
1230 CDEBUG(D_INODE, "completion ast\n");
1231 ldlm_callback_reply(req, 0);
1232 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
1234 case LDLM_GL_CALLBACK:
1235 CDEBUG(D_INODE, "glimpse ast\n");
1236 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
1239 LBUG(); /* checked above */
1245 static int ldlm_cancel_handler(struct ptlrpc_request *req)
1250 rc = ldlm_msg_check_version(req->rq_reqmsg);
1252 CERROR("LDLM_CL drop mal-formed request\n");
1256 /* Requests arrive in sender's byte order. The ptlrpc service
1257 * handler has already checked and, if necessary, byte-swapped the
1258 * incoming request message body, but I am responsible for the
1259 * message buffers. */
1261 if (req->rq_export == NULL) {
1262 struct ldlm_request *dlm_req;
1263 CERROR("operation %d with bad export from %s\n",
1264 req->rq_reqmsg->opc,
1266 CERROR("--> export cookie: "LPX64"\n",
1267 req->rq_reqmsg->handle.cookie);
1268 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
1269 lustre_swab_ldlm_request);
1270 if (dlm_req != NULL)
1271 ldlm_lock_dump_handle(D_ERROR, &dlm_req->lock_handle1);
1272 ldlm_callback_reply(req, -ENOTCONN);
1276 switch (req->rq_reqmsg->opc) {
1278 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1280 CDEBUG(D_INODE, "cancel\n");
1281 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
1282 rc = ldlm_handle_cancel(req);
1285 CERROR("invalid opcode %d\n", req->rq_reqmsg->opc);
1286 ldlm_callback_reply(req, -EINVAL);
1293 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1295 struct ldlm_bl_work_item *blwi = NULL;
1297 spin_lock(&blp->blp_lock);
1298 if (!list_empty(&blp->blp_list)) {
1299 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1301 list_del(&blwi->blwi_entry);
1303 spin_unlock(&blp->blp_lock);
1308 struct ldlm_bl_thread_data {
1310 struct ldlm_bl_pool *bltd_blp;
1313 static int ldlm_bl_thread_main(void *arg)
1315 struct ldlm_bl_thread_data *bltd = arg;
1316 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1317 unsigned long flags;
1320 /* XXX boiler-plate */
1322 char name[sizeof(current->comm)];
1323 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1325 kportal_daemonize(name);
1327 SIGNAL_MASK_LOCK(current, flags);
1328 sigfillset(¤t->blocked);
1330 SIGNAL_MASK_UNLOCK(current, flags);
1332 atomic_inc(&blp->blp_num_threads);
1333 complete(&blp->blp_comp);
1336 struct l_wait_info lwi = { 0 };
1337 struct ldlm_bl_work_item *blwi = NULL;
1339 l_wait_event_exclusive(blp->blp_waitq,
1340 (blwi = ldlm_bl_get_work(blp)) != NULL,
1343 if (blwi->blwi_ns == NULL)
1346 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1348 OBD_FREE(blwi, sizeof(*blwi));
1351 atomic_dec(&blp->blp_num_threads);
1352 complete(&blp->blp_comp);
1358 static int ldlm_setup(void);
1359 static int ldlm_cleanup(int force);
1361 int ldlm_get_ref(void)
1364 down(&ldlm_ref_sem);
1365 if (++ldlm_refcount == 1) {
1375 void ldlm_put_ref(int force)
1377 down(&ldlm_ref_sem);
1378 if (ldlm_refcount == 1) {
1379 int rc = ldlm_cleanup(force);
1381 CERROR("ldlm_cleanup failed: %d\n", rc);
1392 static int ldlm_setup(void)
1394 struct ldlm_bl_pool *blp;
1401 if (ldlm_state != NULL)
1404 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
1405 if (ldlm_state == NULL)
1409 rc = ldlm_proc_setup();
1414 ldlm_state->ldlm_cb_service =
1415 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1416 LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1417 1500, ldlm_callback_handler, "ldlm_cbd",
1420 if (!ldlm_state->ldlm_cb_service) {
1421 CERROR("failed to start service\n");
1422 GOTO(out_proc, rc = -ENOMEM);
1425 ldlm_state->ldlm_cancel_service =
1426 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1427 LDLM_CANCEL_REQUEST_PORTAL,
1428 LDLM_CANCEL_REPLY_PORTAL, 30000,
1429 ldlm_cancel_handler, "ldlm_canceld",
1432 if (!ldlm_state->ldlm_cancel_service) {
1433 CERROR("failed to start service\n");
1434 GOTO(out_proc, rc = -ENOMEM);
1437 OBD_ALLOC(blp, sizeof(*blp));
1439 GOTO(out_proc, rc = -ENOMEM);
1440 ldlm_state->ldlm_bl_pool = blp;
1442 atomic_set(&blp->blp_num_threads, 0);
1443 init_waitqueue_head(&blp->blp_waitq);
1444 spin_lock_init(&blp->blp_lock);
1446 INIT_LIST_HEAD(&blp->blp_list);
1449 for (i = 0; i < LDLM_NUM_THREADS; i++) {
1450 struct ldlm_bl_thread_data bltd = {
1454 init_completion(&blp->blp_comp);
1455 rc = kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1457 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1458 GOTO(out_thread, rc);
1460 wait_for_completion(&blp->blp_comp);
1463 rc = ptlrpc_start_n_threads(NULL, ldlm_state->ldlm_cancel_service,
1464 LDLM_NUM_THREADS, "ldlm_cn");
1466 GOTO(out_thread, rc);
1468 rc = ptlrpc_start_n_threads(NULL, ldlm_state->ldlm_cb_service,
1469 LDLM_NUM_THREADS, "ldlm_cb");
1471 GOTO(out_thread, rc);
1473 INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1474 spin_lock_init(&expired_lock_thread.elt_lock);
1475 expired_lock_thread.elt_state = ELT_STOPPED;
1476 init_waitqueue_head(&expired_lock_thread.elt_waitq);
1478 rc = kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FS);
1480 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1481 GOTO(out_thread, rc);
1484 wait_event(expired_lock_thread.elt_waitq,
1485 expired_lock_thread.elt_state == ELT_READY);
1487 INIT_LIST_HEAD(&waiting_locks_list);
1488 spin_lock_init(&waiting_locks_spinlock);
1489 waiting_locks_timer.function = waiting_locks_callback;
1490 waiting_locks_timer.data = 0;
1491 init_timer(&waiting_locks_timer);
1498 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1499 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1504 ldlm_proc_cleanup();
1507 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1512 static int ldlm_cleanup(int force)
1515 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1519 if (!list_empty(&ldlm_namespace_list)) {
1520 CERROR("ldlm still has namespaces; clean these up first.\n");
1521 ldlm_dump_all_namespaces(D_DLMTRACE);
1526 while (atomic_read(&blp->blp_num_threads) > 0) {
1527 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1529 init_completion(&blp->blp_comp);
1531 spin_lock(&blp->blp_lock);
1532 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1533 wake_up(&blp->blp_waitq);
1534 spin_unlock(&blp->blp_lock);
1536 wait_for_completion(&blp->blp_comp);
1538 OBD_FREE(blp, sizeof(*blp));
1540 ptlrpc_stop_all_threads(ldlm_state->ldlm_cb_service);
1541 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1542 ptlrpc_stop_all_threads(ldlm_state->ldlm_cancel_service);
1543 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1544 ldlm_proc_cleanup();
1546 expired_lock_thread.elt_state = ELT_TERMINATE;
1547 wake_up(&expired_lock_thread.elt_waitq);
1548 wait_event(expired_lock_thread.elt_waitq,
1549 expired_lock_thread.elt_state == ELT_STOPPED);
1551 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1552 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1555 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1561 int __init ldlm_init(void)
1563 ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1564 sizeof(struct ldlm_resource), 0,
1565 SLAB_HWCACHE_ALIGN, NULL, NULL);
1566 if (ldlm_resource_slab == NULL)
1569 ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1570 sizeof(struct ldlm_lock), 0,
1571 SLAB_HWCACHE_ALIGN, NULL, NULL);
1572 if (ldlm_lock_slab == NULL) {
1573 kmem_cache_destroy(ldlm_resource_slab);
1577 l_lock_init(&ldlm_handle_lock);
1582 void __exit ldlm_exit(void)
1584 if ( ldlm_refcount )
1585 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1586 LASSERTF(kmem_cache_destroy(ldlm_resource_slab) == 0,
1587 "couldn't free ldlm resource slab\n");
1588 LASSERTF(kmem_cache_destroy(ldlm_lock_slab) == 0,
1589 "couldn't free ldlm lock slab\n");
1594 EXPORT_SYMBOL(ldlm_flock_completion_ast);
1597 EXPORT_SYMBOL(ldlm_extent_shift_kms);
1600 EXPORT_SYMBOL(ldlm_get_processing_policy);
1601 EXPORT_SYMBOL(ldlm_lock2desc);
1602 EXPORT_SYMBOL(ldlm_register_intent);
1603 EXPORT_SYMBOL(ldlm_lockname);
1604 EXPORT_SYMBOL(ldlm_typename);
1605 EXPORT_SYMBOL(ldlm_lock2handle);
1606 EXPORT_SYMBOL(__ldlm_handle2lock);
1607 EXPORT_SYMBOL(ldlm_lock_get);
1608 EXPORT_SYMBOL(ldlm_lock_put);
1609 EXPORT_SYMBOL(ldlm_lock_match);
1610 EXPORT_SYMBOL(ldlm_lock_cancel);
1611 EXPORT_SYMBOL(ldlm_lock_addref);
1612 EXPORT_SYMBOL(ldlm_lock_decref);
1613 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1614 EXPORT_SYMBOL(ldlm_lock_change_resource);
1615 EXPORT_SYMBOL(ldlm_lock_set_data);
1616 EXPORT_SYMBOL(ldlm_it2str);
1617 EXPORT_SYMBOL(ldlm_lock_dump);
1618 EXPORT_SYMBOL(ldlm_lock_dump_handle);
1619 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
1620 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1621 EXPORT_SYMBOL(ldlm_lock_allow_match);
1623 /* ldlm_request.c */
1624 EXPORT_SYMBOL(ldlm_completion_ast);
1625 EXPORT_SYMBOL(ldlm_expired_completion_wait);
1626 EXPORT_SYMBOL(ldlm_cli_convert);
1627 EXPORT_SYMBOL(ldlm_cli_enqueue);
1628 EXPORT_SYMBOL(ldlm_cli_cancel);
1629 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1630 EXPORT_SYMBOL(ldlm_replay_locks);
1631 EXPORT_SYMBOL(ldlm_resource_foreach);
1632 EXPORT_SYMBOL(ldlm_namespace_foreach);
1633 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
1634 EXPORT_SYMBOL(ldlm_change_cbdata);
1637 EXPORT_SYMBOL(ldlm_server_blocking_ast);
1638 EXPORT_SYMBOL(ldlm_server_completion_ast);
1639 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
1640 EXPORT_SYMBOL(ldlm_handle_enqueue);
1641 EXPORT_SYMBOL(ldlm_handle_cancel);
1642 EXPORT_SYMBOL(ldlm_handle_convert);
1643 EXPORT_SYMBOL(ldlm_del_waiting_lock);
1644 EXPORT_SYMBOL(ldlm_get_ref);
1645 EXPORT_SYMBOL(ldlm_put_ref);
1649 EXPORT_SYMBOL(ldlm_test);
1650 EXPORT_SYMBOL(ldlm_regression_start);
1651 EXPORT_SYMBOL(ldlm_regression_stop);
1654 /* ldlm_resource.c */
1655 EXPORT_SYMBOL(ldlm_namespace_new);
1656 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1657 EXPORT_SYMBOL(ldlm_namespace_free);
1658 EXPORT_SYMBOL(ldlm_namespace_dump);
1659 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1660 EXPORT_SYMBOL(ldlm_resource_get);
1661 EXPORT_SYMBOL(ldlm_resource_putref);
1664 EXPORT_SYMBOL(l_lock);
1665 EXPORT_SYMBOL(l_unlock);
1668 EXPORT_SYMBOL(client_import_add_conn);
1669 EXPORT_SYMBOL(client_import_del_conn);
1670 EXPORT_SYMBOL(client_obd_setup);
1671 EXPORT_SYMBOL(client_obd_cleanup);
1672 EXPORT_SYMBOL(client_connect_import);
1673 EXPORT_SYMBOL(client_disconnect_export);
1674 EXPORT_SYMBOL(target_start_recovery_thread);
1675 EXPORT_SYMBOL(target_stop_recovery_thread);
1676 EXPORT_SYMBOL(target_handle_connect);
1677 EXPORT_SYMBOL(target_cleanup_recovery);
1678 EXPORT_SYMBOL(target_destroy_export);
1679 EXPORT_SYMBOL(target_cancel_recovery_timer);
1680 EXPORT_SYMBOL(target_send_reply);
1681 EXPORT_SYMBOL(target_queue_recovery_request);
1682 EXPORT_SYMBOL(target_handle_ping);
1683 EXPORT_SYMBOL(target_handle_disconnect);