1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 # define EXPORT_SYMTAB
27 #define DEBUG_SUBSYSTEM S_LDLM
30 # include <linux/module.h>
31 # include <linux/slab.h>
32 # include <linux/init.h>
33 # include <linux/wait.h>
35 # include <liblustre.h>
38 #include <linux/lustre_dlm.h>
39 #include <linux/obd_class.h>
40 #include <libcfs/list.h>
41 #include "ldlm_internal.h"
43 extern kmem_cache_t *ldlm_resource_slab;
44 extern kmem_cache_t *ldlm_lock_slab;
45 extern struct lustre_lock ldlm_handle_lock;
46 extern struct list_head ldlm_namespace_list;
48 static DECLARE_MUTEX(ldlm_ref_sem);
49 static int ldlm_refcount;
52 static struct ldlm_state *ldlm_state;
54 inline unsigned long round_timeout(unsigned long timeout)
56 return ((timeout / HZ) + 1) * HZ;
60 /* XXX should this be per-ldlm? */
61 static struct list_head waiting_locks_list;
62 static spinlock_t waiting_locks_spinlock;
63 static struct timer_list waiting_locks_timer;
65 static struct expired_lock_thread {
66 wait_queue_head_t elt_waitq;
68 struct list_head elt_expired_locks;
70 } expired_lock_thread;
73 #if !defined(ENOTSUPP)
79 #define ELT_TERMINATE 2
83 struct list_head blp_list;
84 wait_queue_head_t blp_waitq;
85 atomic_t blp_num_threads;
86 struct completion blp_comp;
89 struct ldlm_bl_work_item {
90 struct list_head blwi_entry;
91 struct ldlm_namespace *blwi_ns;
92 struct ldlm_lock_desc blwi_ld;
93 struct ldlm_lock *blwi_lock;
98 static inline int have_expired_locks(void)
102 spin_lock_bh(&expired_lock_thread.elt_lock);
103 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
104 spin_unlock_bh(&expired_lock_thread.elt_lock);
109 static int expired_lock_main(void *arg)
111 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
112 struct l_wait_info lwi = { 0 };
117 kportal_daemonize("ldlm_elt");
119 SIGNAL_MASK_LOCK(current, flags);
120 sigfillset(¤t->blocked);
122 SIGNAL_MASK_UNLOCK(current, flags);
126 expired_lock_thread.elt_state = ELT_READY;
127 wake_up(&expired_lock_thread.elt_waitq);
130 l_wait_event(expired_lock_thread.elt_waitq,
131 have_expired_locks() ||
132 expired_lock_thread.elt_state == ELT_TERMINATE,
135 spin_lock_bh(&expired_lock_thread.elt_lock);
136 while (!list_empty(expired)) {
137 struct obd_export *export;
138 struct ldlm_lock *lock;
140 lock = list_entry(expired->next, struct ldlm_lock,
142 if ((void *)lock < LP_POISON + PAGE_SIZE &&
143 (void *)lock >= LP_POISON) {
144 CERROR("free lock on elt list %p\n", lock);
147 list_del_init(&lock->l_pending_chain);
148 if ((void *)lock->l_export < LP_POISON + PAGE_SIZE &&
149 (void *)lock->l_export >= LP_POISON + PAGE_SIZE) {
150 CERROR("lock with free export on elt list %p\n",
152 lock->l_export = NULL;
153 LDLM_ERROR(lock, "free export\n");
156 export = class_export_get(lock->l_export);
157 spin_unlock_bh(&expired_lock_thread.elt_lock);
159 ptlrpc_fail_export(export);
160 class_export_put(export);
161 spin_lock_bh(&expired_lock_thread.elt_lock);
163 spin_unlock_bh(&expired_lock_thread.elt_lock);
165 if (expired_lock_thread.elt_state == ELT_TERMINATE)
169 expired_lock_thread.elt_state = ELT_STOPPED;
170 wake_up(&expired_lock_thread.elt_waitq);
174 static void waiting_locks_callback(unsigned long unused)
176 struct ldlm_lock *lock;
177 char str[PTL_NALFMT_SIZE];
179 if (obd_dump_on_timeout)
180 portals_debug_dumplog();
182 spin_lock_bh(&waiting_locks_spinlock);
183 while (!list_empty(&waiting_locks_list)) {
184 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
187 if ((lock->l_callback_timeout > jiffies) ||
188 (lock->l_req_mode == LCK_GROUP))
191 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
193 lock->l_export->exp_client_uuid.uuid,
194 lock->l_export->exp_connection->c_remote_uuid.uuid,
195 ptlrpc_peernid2str(&lock->l_export->exp_connection->c_peer, str));
197 spin_lock_bh(&expired_lock_thread.elt_lock);
198 list_del(&lock->l_pending_chain);
199 list_add(&lock->l_pending_chain,
200 &expired_lock_thread.elt_expired_locks);
201 spin_unlock_bh(&expired_lock_thread.elt_lock);
202 wake_up(&expired_lock_thread.elt_waitq);
206 * Make sure the timer will fire again if we have any locks
209 if (!list_empty(&waiting_locks_list)) {
210 unsigned long timeout_rounded;
211 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
213 timeout_rounded = round_timeout(lock->l_callback_timeout);
214 mod_timer(&waiting_locks_timer, timeout_rounded);
216 spin_unlock_bh(&waiting_locks_spinlock);
220 * Indicate that we're waiting for a client to call us back cancelling a given
221 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
222 * timer to fire appropriately. (We round up to the next second, to avoid
223 * floods of timer firings during periods of high lock contention and traffic).
225 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
227 unsigned long timeout_rounded;
229 spin_lock_bh(&waiting_locks_spinlock);
230 if (!list_empty(&lock->l_pending_chain)) {
231 LDLM_DEBUG(lock, "not re-adding to wait list");
232 spin_unlock_bh(&waiting_locks_spinlock);
235 LDLM_DEBUG(lock, "adding to wait list");
237 lock->l_callback_timeout = jiffies + (obd_timeout * HZ / 2);
239 timeout_rounded = round_timeout(lock->l_callback_timeout);
241 if (timeout_rounded < waiting_locks_timer.expires ||
242 !timer_pending(&waiting_locks_timer)) {
243 mod_timer(&waiting_locks_timer, timeout_rounded);
245 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
246 spin_unlock_bh(&waiting_locks_spinlock);
251 * Remove a lock from the pending list, likely because it had its cancellation
252 * callback arrive without incident. This adjusts the lock-timeout timer if
253 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
255 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
257 struct list_head *list_next;
259 if (lock->l_export == NULL) {
260 /* We don't have a "waiting locks list" on clients. */
261 LDLM_DEBUG(lock, "client lock: no-op");
265 spin_lock_bh(&waiting_locks_spinlock);
267 if (list_empty(&lock->l_pending_chain)) {
268 spin_unlock_bh(&waiting_locks_spinlock);
269 LDLM_DEBUG(lock, "wasn't waiting");
273 list_next = lock->l_pending_chain.next;
274 if (lock->l_pending_chain.prev == &waiting_locks_list) {
275 /* Removing the head of the list, adjust timer. */
276 if (list_next == &waiting_locks_list) {
277 /* No more, just cancel. */
278 del_timer(&waiting_locks_timer);
280 struct ldlm_lock *next;
281 next = list_entry(list_next, struct ldlm_lock,
283 mod_timer(&waiting_locks_timer,
284 round_timeout(next->l_callback_timeout));
288 spin_lock_bh(&expired_lock_thread.elt_lock);
289 list_del_init(&lock->l_pending_chain);
290 spin_unlock_bh(&expired_lock_thread.elt_lock);
292 spin_unlock_bh(&waiting_locks_spinlock);
293 LDLM_DEBUG(lock, "removed");
297 #else /* !__KERNEL__ */
299 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
304 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
309 #endif /* __KERNEL__ */
311 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,const char *ast_type)
313 struct ptlrpc_connection *conn = lock->l_export->exp_connection;
314 char str[PTL_NALFMT_SIZE];
316 LDLM_ERROR(lock, "%s AST failed (%d): evicting client %s@%s NID "LPX64
317 " (%s)", ast_type, rc, lock->l_export->exp_client_uuid.uuid,
318 conn->c_remote_uuid.uuid, conn->c_peer.peer_id.nid,
319 ptlrpc_peernid2str(&conn->c_peer, str));
321 if (obd_dump_on_timeout)
322 portals_debug_dumplog();
323 ptlrpc_fail_export(lock->l_export);
326 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
327 struct ptlrpc_request *req, int rc,
328 const char *ast_type)
330 struct ptlrpc_peer *peer = &req->rq_import->imp_connection->c_peer;
331 char str[PTL_NALFMT_SIZE];
333 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
334 LASSERT(lock->l_export);
335 if (lock->l_export->exp_libclient) {
336 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
337 " timeout, just cancelling lock", ast_type,
338 ptlrpc_peernid2str(peer, str));
339 ldlm_lock_cancel(lock);
342 l_lock(&lock->l_resource->lr_namespace->ns_lock);
343 ldlm_del_waiting_lock(lock);
344 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
345 ldlm_failed_ast(lock, rc, ast_type);
349 LDLM_DEBUG(lock, "client (nid %s) returned %d"
350 " from %s AST - normal race",
351 ptlrpc_peernid2str(peer, str),
352 req->rq_repmsg->status, ast_type);
354 LDLM_ERROR(lock, "client (nid %s) returned %d "
355 "from %s AST", ptlrpc_peernid2str(peer, str),
356 (req->rq_repmsg != NULL) ?
357 req->rq_repmsg->status : 0, ast_type);
358 ldlm_lock_cancel(lock);
359 /* Server-side AST functions are called from ldlm_reprocess_all,
360 * which needs to be told to please restart its reprocessing. */
367 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
368 struct ldlm_lock_desc *desc,
369 void *data, int flag)
371 struct ldlm_request *body;
372 struct ptlrpc_request *req;
373 int rc = 0, size = sizeof(*body);
376 if (flag == LDLM_CB_CANCELING) {
377 /* Don't need to do anything here. */
383 l_lock(&lock->l_resource->lr_namespace->ns_lock);
384 if (lock->l_granted_mode != lock->l_req_mode) {
385 /* this blocking AST will be communicated as part of the
386 * completion AST instead */
387 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
388 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
392 if (lock->l_destroyed) {
393 /* What's the point? */
394 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
399 if (LTIME_S(CURRENT_TIME) - lock->l_export->exp_last_request_time > 30){
400 ldlm_failed_ast(lock, -ETIMEDOUT, "Not-attempted blocking");
401 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
406 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
407 LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK,
410 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
414 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
415 memcpy(&body->lock_handle1, &lock->l_remote_handle,
416 sizeof(body->lock_handle1));
417 memcpy(&body->lock_desc, desc, sizeof(*desc));
418 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
420 LDLM_DEBUG(lock, "server preparing blocking AST");
421 req->rq_replen = lustre_msg_size(0, NULL);
423 if (lock->l_granted_mode == lock->l_req_mode)
424 ldlm_add_waiting_lock(lock);
425 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
427 req->rq_send_state = LUSTRE_IMP_FULL;
428 req->rq_timeout = ldlm_timeout; /* timeout for initial AST reply */
429 rc = ptlrpc_queue_wait(req);
431 rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
433 ptlrpc_req_finished(req);
438 /* XXX copied from ptlrpc/service.c */
439 static long timeval_sub(struct timeval *large, struct timeval *small)
441 return (large->tv_sec - small->tv_sec) * 1000000 +
442 (large->tv_usec - small->tv_usec);
445 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
447 struct ldlm_request *body;
448 struct ptlrpc_request *req;
449 struct timeval granted_time;
450 long total_enqueue_wait;
451 int rc = 0, size[2] = {sizeof(*body)}, buffers = 1;
454 LASSERT(lock != NULL);
456 do_gettimeofday(&granted_time);
457 total_enqueue_wait = timeval_sub(&granted_time, &lock->l_enqueued_time);
459 if (total_enqueue_wait / 1000000 > obd_timeout)
460 LDLM_ERROR(lock, "enqueue wait took %ldus", total_enqueue_wait);
462 down(&lock->l_resource->lr_lvb_sem);
463 if (lock->l_resource->lr_lvb_len) {
465 size[1] = lock->l_resource->lr_lvb_len;
467 up(&lock->l_resource->lr_lvb_sem);
469 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
470 LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK,
471 buffers, size, NULL);
475 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
476 memcpy(&body->lock_handle1, &lock->l_remote_handle,
477 sizeof(body->lock_handle1));
478 body->lock_flags = flags;
479 ldlm_lock2desc(lock, &body->lock_desc);
484 down(&lock->l_resource->lr_lvb_sem);
485 lvb = lustre_msg_buf(req->rq_reqmsg, 1,
486 lock->l_resource->lr_lvb_len);
488 memcpy(lvb, lock->l_resource->lr_lvb_data,
489 lock->l_resource->lr_lvb_len);
490 up(&lock->l_resource->lr_lvb_sem);
493 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
495 req->rq_replen = lustre_msg_size(0, NULL);
497 req->rq_send_state = LUSTRE_IMP_FULL;
498 req->rq_timeout = ldlm_timeout; /* timeout for initial AST reply */
500 /* We only send real blocking ASTs after the lock is granted */
501 l_lock(&lock->l_resource->lr_namespace->ns_lock);
502 if (lock->l_flags & LDLM_FL_AST_SENT) {
503 body->lock_flags |= LDLM_FL_AST_SENT;
504 ldlm_add_waiting_lock(lock); /* start the lock-timeout clock */
506 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
508 rc = ptlrpc_queue_wait(req);
510 rc = ldlm_handle_ast_error(lock, req, rc, "completion");
512 ptlrpc_req_finished(req);
517 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
519 struct ldlm_resource *res = lock->l_resource;
520 struct ldlm_request *body;
521 struct ptlrpc_request *req;
522 int rc = 0, size = sizeof(*body);
525 LASSERT(lock != NULL);
527 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
528 LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK,
533 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
534 memcpy(&body->lock_handle1, &lock->l_remote_handle,
535 sizeof(body->lock_handle1));
536 ldlm_lock2desc(lock, &body->lock_desc);
538 down(&lock->l_resource->lr_lvb_sem);
539 size = lock->l_resource->lr_lvb_len;
540 up(&lock->l_resource->lr_lvb_sem);
541 req->rq_replen = lustre_msg_size(1, &size);
543 req->rq_send_state = LUSTRE_IMP_FULL;
544 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
546 rc = ptlrpc_queue_wait(req);
547 if (rc == -ELDLM_NO_LOCK_DATA)
548 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
550 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
552 rc = res->lr_namespace->ns_lvbo->lvbo_update
553 (res, req->rq_repmsg, 0, 1);
554 ptlrpc_req_finished(req);
558 static struct ldlm_lock *
559 find_existing_lock(struct obd_export *exp, struct lustre_handle *remote_hdl)
561 struct obd_device *obd = exp->exp_obd;
562 struct list_head *iter;
564 l_lock(&obd->obd_namespace->ns_lock);
565 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
566 struct ldlm_lock *lock;
567 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
568 if (lock->l_remote_handle.cookie == remote_hdl->cookie) {
570 l_unlock(&obd->obd_namespace->ns_lock);
574 l_unlock(&obd->obd_namespace->ns_lock);
579 int ldlm_handle_enqueue(struct ptlrpc_request *req,
580 ldlm_completion_callback completion_callback,
581 ldlm_blocking_callback blocking_callback,
582 ldlm_glimpse_callback glimpse_callback)
584 struct obd_device *obddev = req->rq_export->exp_obd;
585 struct ldlm_reply *dlm_rep;
586 struct ldlm_request *dlm_req;
587 int rc = 0, size[2] = {sizeof(*dlm_rep)};
589 ldlm_error_t err = ELDLM_OK;
590 struct ldlm_lock *lock = NULL;
594 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
596 dlm_req = lustre_swab_reqbuf (req, MDS_REQ_INTENT_LOCKREQ_OFF,
598 lustre_swab_ldlm_request);
599 if (dlm_req == NULL) {
600 CERROR ("Can't unpack dlm_req\n");
601 GOTO(out, rc = -EFAULT);
604 flags = dlm_req->lock_flags;
606 LASSERT(req->rq_export);
608 if (flags & LDLM_FL_REPLAY) {
609 lock = find_existing_lock(req->rq_export,
610 &dlm_req->lock_handle1);
612 DEBUG_REQ(D_HA, req, "found existing lock cookie "LPX64,
613 lock->l_handle.h_cookie);
614 GOTO(existing_lock, rc = 0);
618 /* The lock's callback data might be set in the policy function */
619 lock = ldlm_lock_create(obddev->obd_namespace, &dlm_req->lock_handle2,
620 dlm_req->lock_desc.l_resource.lr_name,
621 dlm_req->lock_desc.l_resource.lr_type,
622 dlm_req->lock_desc.l_req_mode,
623 blocking_callback, completion_callback,
624 glimpse_callback, NULL, 0);
626 GOTO(out, rc = -ENOMEM);
628 do_gettimeofday(&lock->l_enqueued_time);
629 memcpy(&lock->l_remote_handle, &dlm_req->lock_handle1,
630 sizeof(lock->l_remote_handle));
631 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
633 LASSERT(req->rq_export);
634 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
635 l_lock(&lock->l_resource->lr_namespace->ns_lock);
636 if (req->rq_export->exp_failed) {
637 LDLM_ERROR(lock,"lock on destroyed export %p\n",req->rq_export);
638 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
639 GOTO(out, err = -ENOTCONN);
641 lock->l_export = class_export_get(req->rq_export);
643 list_add(&lock->l_export_chain,
644 &lock->l_export->exp_ldlm_data.led_held_locks);
645 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
649 if (flags & LDLM_FL_HAS_INTENT) {
650 /* In this case, the reply buffer is allocated deep in
651 * local_lock_enqueue by the policy function. */
655 down(&lock->l_resource->lr_lvb_sem);
656 if (lock->l_resource->lr_lvb_len) {
657 size[1] = lock->l_resource->lr_lvb_len;
660 up(&lock->l_resource->lr_lvb_sem);
661 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
662 GOTO(out, rc = -ENOMEM);
664 rc = lustre_pack_reply(req, buffers, size, NULL);
669 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
670 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
671 sizeof(ldlm_policy_data_t));
672 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
673 memcpy(&lock->l_req_extent, &lock->l_policy_data.l_extent,
674 sizeof(lock->l_req_extent));
676 err = ldlm_lock_enqueue(obddev->obd_namespace, &lock, cookie, &flags);
680 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
681 dlm_rep->lock_flags = flags;
683 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
684 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
686 /* We never send a blocking AST until the lock is granted, but
687 * we can tell it right now */
688 l_lock(&lock->l_resource->lr_namespace->ns_lock);
689 if (lock->l_flags & LDLM_FL_AST_SENT) {
690 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
691 if (lock->l_granted_mode == lock->l_req_mode)
692 ldlm_add_waiting_lock(lock);
694 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
698 req->rq_status = err;
699 if (req->rq_reply_state == NULL) {
700 err = lustre_pack_reply(req, 0, NULL, NULL);
706 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
707 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
709 l_lock(&lock->l_resource->lr_namespace->ns_lock);
710 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
711 "(err=%d, rc=%d)", err, rc);
712 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
715 down(&lock->l_resource->lr_lvb_sem);
716 size[1] = lock->l_resource->lr_lvb_len;
718 void *lvb = lustre_msg_buf(req->rq_repmsg,
720 LASSERTF(lvb != NULL, "req %p, lock %p\n",
723 memcpy(lvb, lock->l_resource->lr_lvb_data,
726 up(&lock->l_resource->lr_lvb_sem);
728 ldlm_lock_destroy(lock);
731 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
732 ldlm_reprocess_all(lock->l_resource);
735 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
741 int ldlm_handle_convert(struct ptlrpc_request *req)
743 struct ldlm_request *dlm_req;
744 struct ldlm_reply *dlm_rep;
745 struct ldlm_lock *lock;
746 int rc, size = sizeof(*dlm_rep);
749 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
750 lustre_swab_ldlm_request);
751 if (dlm_req == NULL) {
752 CERROR ("Can't unpack dlm_req\n");
756 rc = lustre_pack_reply(req, 1, &size, NULL);
758 CERROR("out of memory\n");
761 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
762 dlm_rep->lock_flags = dlm_req->lock_flags;
764 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
766 req->rq_status = EINVAL;
770 l_lock(&lock->l_resource->lr_namespace->ns_lock);
771 LDLM_DEBUG(lock, "server-side convert handler START");
772 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
774 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
775 &dlm_rep->lock_flags);
777 l_lock(&lock->l_resource->lr_namespace->ns_lock);
778 if (ldlm_del_waiting_lock(lock))
779 CDEBUG(D_DLMTRACE, "converted waiting lock %p\n", lock);
780 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
783 req->rq_status = EDEADLOCK;
789 ldlm_reprocess_all(lock->l_resource);
790 l_lock(&lock->l_resource->lr_namespace->ns_lock);
791 LDLM_DEBUG(lock, "server-side convert handler END");
792 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
795 LDLM_DEBUG_NOLOCK("server-side convert handler END");
800 int ldlm_handle_cancel(struct ptlrpc_request *req)
802 struct ldlm_request *dlm_req;
803 struct ldlm_lock *lock;
804 struct ldlm_resource *res;
808 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
809 lustre_swab_ldlm_request);
810 if (dlm_req == NULL) {
811 CERROR("bad request buffer for cancel\n");
815 rc = lustre_pack_reply(req, 0, NULL, NULL);
817 CERROR("out of memory\n");
821 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
823 CERROR("received cancel for unknown lock cookie "LPX64
824 " from client %s id %s\n",
825 dlm_req->lock_handle1.cookie,
826 req->rq_export->exp_client_uuid.uuid,
828 LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock "
830 dlm_req->lock_handle1.cookie);
831 req->rq_status = ESTALE;
833 LDLM_DEBUG(lock, "server-side cancel handler START");
834 res = lock->l_resource;
835 if (res && res->lr_namespace->ns_lvbo &&
836 res->lr_namespace->ns_lvbo->lvbo_update) {
837 (void)res->lr_namespace->ns_lvbo->lvbo_update
839 //(res, req->rq_reqmsg, 1);
842 l_lock(&res->lr_namespace->ns_lock);
843 ldlm_lock_cancel(lock);
844 if (ldlm_del_waiting_lock(lock))
845 CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
846 l_unlock(&res->lr_namespace->ns_lock);
850 if (ptlrpc_reply(req) != 0)
854 ldlm_reprocess_all(lock->l_resource);
855 l_lock(&lock->l_resource->lr_namespace->ns_lock);
856 LDLM_DEBUG(lock, "server-side cancel handler END");
857 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
864 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
865 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
870 l_lock(&ns->ns_lock);
871 LDLM_DEBUG(lock, "client blocking AST callback handler START");
873 lock->l_flags |= LDLM_FL_CBPENDING;
874 do_ast = (!lock->l_readers && !lock->l_writers);
877 LDLM_DEBUG(lock, "already unused, calling "
878 "callback (%p)", lock->l_blocking_ast);
879 if (lock->l_blocking_ast != NULL) {
880 l_unlock(&ns->ns_lock);
881 l_check_no_ns_lock(ns);
882 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
884 l_lock(&ns->ns_lock);
887 LDLM_DEBUG(lock, "Lock still has references, will be"
891 LDLM_DEBUG(lock, "client blocking callback handler END");
892 l_unlock(&ns->ns_lock);
897 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
898 struct ldlm_namespace *ns,
899 struct ldlm_request *dlm_req,
900 struct ldlm_lock *lock)
905 l_lock(&ns->ns_lock);
906 LDLM_DEBUG(lock, "client completion callback handler START");
908 /* If we receive the completion AST before the actual enqueue returned,
909 * then we might need to switch lock modes, resources, or extents. */
910 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
911 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
912 LDLM_DEBUG(lock, "completion AST, new lock mode");
915 if (lock->l_resource->lr_type != LDLM_PLAIN) {
916 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
917 sizeof(lock->l_policy_data));
918 LDLM_DEBUG(lock, "completion AST, new policy data");
921 ldlm_resource_unlink_lock(lock);
922 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
923 &lock->l_resource->lr_name,
924 sizeof(lock->l_resource->lr_name)) != 0) {
925 ldlm_lock_change_resource(ns, lock,
926 dlm_req->lock_desc.l_resource.lr_name);
927 LDLM_DEBUG(lock, "completion AST, new resource");
930 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
931 lock->l_flags |= LDLM_FL_CBPENDING;
932 LDLM_DEBUG(lock, "completion AST includes blocking AST");
935 if (lock->l_lvb_len) {
937 lvb = lustre_swab_reqbuf(req, 1, lock->l_lvb_len,
938 lock->l_lvb_swabber);
940 LDLM_ERROR(lock, "completion AST did not contain "
943 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
947 lock->l_resource->lr_tmp = &ast_list;
948 ldlm_grant_lock(lock, req, sizeof(*req), 1);
949 lock->l_resource->lr_tmp = NULL;
950 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
951 l_unlock(&ns->ns_lock);
954 ldlm_run_ast_work(ns, &ast_list);
956 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
961 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
962 struct ldlm_namespace *ns,
963 struct ldlm_request *dlm_req,
964 struct ldlm_lock *lock)
969 l_lock(&ns->ns_lock);
970 LDLM_DEBUG(lock, "client glimpse AST callback handler");
972 if (lock->l_glimpse_ast != NULL) {
973 l_unlock(&ns->ns_lock);
974 l_check_no_ns_lock(ns);
975 rc = lock->l_glimpse_ast(lock, req);
976 l_lock(&ns->ns_lock);
979 if (req->rq_repmsg != NULL) {
986 l_unlock(&ns->ns_lock);
987 if (lock->l_granted_mode == LCK_PW &&
988 !lock->l_readers && !lock->l_writers &&
989 time_after(jiffies, lock->l_last_used + 10 * HZ)) {
990 if (ldlm_bl_to_thread(ns, NULL, lock))
991 ldlm_handle_bl_callback(ns, NULL, lock);
1000 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1002 req->rq_status = rc;
1003 if (req->rq_reply_state == NULL) {
1004 rc = lustre_pack_reply(req, 0, NULL, NULL);
1008 return ptlrpc_reply(req);
1011 int ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1012 struct ldlm_lock *lock)
1015 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1016 struct ldlm_bl_work_item *blwi;
1019 OBD_ALLOC(blwi, sizeof(*blwi));
1025 blwi->blwi_ld = *ld;
1026 blwi->blwi_lock = lock;
1028 spin_lock(&blp->blp_lock);
1029 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1030 wake_up(&blp->blp_waitq);
1031 spin_unlock(&blp->blp_lock);
1040 static int ldlm_msg_check_version(struct lustre_msg *msg)
1048 case LDLM_BL_CALLBACK:
1049 case LDLM_CP_CALLBACK:
1050 case LDLM_GL_CALLBACK:
1051 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
1053 CERROR("bad opc %u version %08x, expecting %08x\n",
1054 msg->opc, msg->version, LUSTRE_DLM_VERSION);
1056 case OBD_LOG_CANCEL:
1057 case LLOG_ORIGIN_HANDLE_OPEN:
1058 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1059 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1060 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1061 case LLOG_ORIGIN_HANDLE_CLOSE:
1063 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
1065 CERROR("bad opc %u version %08x, expecting %08x\n",
1066 msg->opc, msg->version, LUSTRE_LOG_VERSION);
1069 CERROR("LDLM unknown opcode %d\n", msg->opc);
1077 static int ldlm_callback_handler(struct ptlrpc_request *req)
1079 struct ldlm_namespace *ns;
1080 struct ldlm_request *dlm_req;
1081 struct ldlm_lock *lock;
1085 rc = ldlm_msg_check_version(req->rq_reqmsg);
1087 CERROR("LDLM_CB drop mal-formed request\n");
1091 /* Requests arrive in sender's byte order. The ptlrpc service
1092 * handler has already checked and, if necessary, byte-swapped the
1093 * incoming request message body, but I am responsible for the
1094 * message buffers. */
1096 if (req->rq_export == NULL) {
1097 struct ldlm_request *dlm_req;
1099 CDEBUG(D_RPCTRACE, "operation %d from %s with bad "
1100 "export cookie "LPX64"; this is "
1101 "normal if this node rebooted with a lock held\n",
1102 req->rq_reqmsg->opc,
1104 req->rq_reqmsg->handle.cookie);
1105 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
1106 lustre_swab_ldlm_request);
1107 if (dlm_req != NULL)
1108 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
1109 dlm_req->lock_handle1.cookie);
1111 ldlm_callback_reply(req, -ENOTCONN);
1115 LASSERT(req->rq_export != NULL);
1116 LASSERT(req->rq_export->exp_obd != NULL);
1118 switch(req->rq_reqmsg->opc) {
1119 case LDLM_BL_CALLBACK:
1120 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1122 case LDLM_CP_CALLBACK:
1123 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
1125 case LDLM_GL_CALLBACK:
1126 OBD_FAIL_RETURN(OBD_FAIL_LDLM_GL_CALLBACK, 0);
1128 case OBD_LOG_CANCEL:
1129 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1130 rc = llog_origin_handle_cancel(req);
1131 ldlm_callback_reply(req, rc);
1133 case LLOG_ORIGIN_HANDLE_OPEN:
1134 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1135 rc = llog_origin_handle_open(req);
1136 ldlm_callback_reply(req, rc);
1138 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1139 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1140 rc = llog_origin_handle_next_block(req);
1141 ldlm_callback_reply(req, rc);
1143 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1144 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1145 rc = llog_origin_handle_prev_block(req);
1146 ldlm_callback_reply(req, rc);
1148 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1149 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1150 rc = llog_origin_handle_read_header(req);
1151 ldlm_callback_reply(req, rc);
1153 case LLOG_ORIGIN_HANDLE_CLOSE:
1154 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1155 rc = llog_origin_handle_close(req);
1156 ldlm_callback_reply(req, rc);
1159 CERROR("unknown opcode %u\n", req->rq_reqmsg->opc);
1160 ldlm_callback_reply(req, -EPROTO);
1164 ns = req->rq_export->exp_obd->obd_namespace;
1165 LASSERT(ns != NULL);
1167 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
1168 lustre_swab_ldlm_request);
1169 if (dlm_req == NULL) {
1170 CERROR ("can't unpack dlm_req\n");
1171 ldlm_callback_reply (req, -EPROTO);
1175 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle1);
1177 LDLM_DEBUG_NOLOCK("callback on lock "LPX64" - lock "
1178 "disappeared\n",dlm_req->lock_handle1.cookie);
1179 ldlm_callback_reply(req, -EINVAL);
1183 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1184 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1186 /* We want the ost thread to get this reply so that it can respond
1187 * to ost requests (write cache writeback) that might be triggered
1190 * But we'd also like to be able to indicate in the reply that we're
1191 * cancelling right now, because it's unused, or have an intent result
1192 * in the reply, so we might have to push the responsibility for sending
1193 * the reply down into the AST handlers, alas. */
1195 switch (req->rq_reqmsg->opc) {
1196 case LDLM_BL_CALLBACK:
1197 CDEBUG(D_INODE, "blocking ast\n");
1198 ldlm_callback_reply(req, 0);
1199 if (ldlm_bl_to_thread(ns, &dlm_req->lock_desc, lock))
1200 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
1203 case LDLM_CP_CALLBACK:
1204 CDEBUG(D_INODE, "completion ast\n");
1205 ldlm_callback_reply(req, 0);
1206 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
1208 case LDLM_GL_CALLBACK:
1209 CDEBUG(D_INODE, "glimpse ast\n");
1210 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
1213 LBUG(); /* checked above */
1219 static int ldlm_cancel_handler(struct ptlrpc_request *req)
1224 rc = ldlm_msg_check_version(req->rq_reqmsg);
1226 CERROR("LDLM_CL drop mal-formed request\n");
1230 /* Requests arrive in sender's byte order. The ptlrpc service
1231 * handler has already checked and, if necessary, byte-swapped the
1232 * incoming request message body, but I am responsible for the
1233 * message buffers. */
1235 if (req->rq_export == NULL) {
1236 struct ldlm_request *dlm_req;
1237 CERROR("operation %d with bad export from %s\n",
1238 req->rq_reqmsg->opc,
1240 CERROR("--> export cookie: "LPX64"\n",
1241 req->rq_reqmsg->handle.cookie);
1242 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
1243 lustre_swab_ldlm_request);
1244 if (dlm_req != NULL)
1245 ldlm_lock_dump_handle(D_ERROR, &dlm_req->lock_handle1);
1246 ldlm_callback_reply(req, -ENOTCONN);
1250 switch (req->rq_reqmsg->opc) {
1252 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1254 CDEBUG(D_INODE, "cancel\n");
1255 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
1256 rc = ldlm_handle_cancel(req);
1259 CERROR("invalid opcode %d\n", req->rq_reqmsg->opc);
1260 ldlm_callback_reply(req, -EINVAL);
1267 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1269 struct ldlm_bl_work_item *blwi = NULL;
1271 spin_lock(&blp->blp_lock);
1272 if (!list_empty(&blp->blp_list)) {
1273 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1275 list_del(&blwi->blwi_entry);
1277 spin_unlock(&blp->blp_lock);
1282 struct ldlm_bl_thread_data {
1284 struct ldlm_bl_pool *bltd_blp;
1287 static int ldlm_bl_thread_main(void *arg)
1289 struct ldlm_bl_thread_data *bltd = arg;
1290 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1291 unsigned long flags;
1294 /* XXX boiler-plate */
1296 char name[sizeof(current->comm)];
1297 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1299 kportal_daemonize(name);
1301 SIGNAL_MASK_LOCK(current, flags);
1302 sigfillset(¤t->blocked);
1304 SIGNAL_MASK_UNLOCK(current, flags);
1306 atomic_inc(&blp->blp_num_threads);
1307 complete(&blp->blp_comp);
1310 struct l_wait_info lwi = { 0 };
1311 struct ldlm_bl_work_item *blwi = NULL;
1313 l_wait_event_exclusive(blp->blp_waitq,
1314 (blwi = ldlm_bl_get_work(blp)) != NULL,
1317 if (blwi->blwi_ns == NULL)
1320 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1322 OBD_FREE(blwi, sizeof(*blwi));
1325 atomic_dec(&blp->blp_num_threads);
1326 complete(&blp->blp_comp);
1332 static int ldlm_setup(void);
1333 static int ldlm_cleanup(int force);
1335 int ldlm_get_ref(void)
1338 down(&ldlm_ref_sem);
1339 if (++ldlm_refcount == 1) {
1349 void ldlm_put_ref(int force)
1351 down(&ldlm_ref_sem);
1352 if (ldlm_refcount == 1) {
1353 int rc = ldlm_cleanup(force);
1355 CERROR("ldlm_cleanup failed: %d\n", rc);
1366 static int ldlm_setup(void)
1368 struct ldlm_bl_pool *blp;
1375 if (ldlm_state != NULL)
1378 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
1379 if (ldlm_state == NULL)
1383 rc = ldlm_proc_setup();
1388 ldlm_state->ldlm_cb_service =
1389 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1390 LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1391 1500, ldlm_callback_handler, "ldlm_cbd",
1394 if (!ldlm_state->ldlm_cb_service) {
1395 CERROR("failed to start service\n");
1396 GOTO(out_proc, rc = -ENOMEM);
1399 ldlm_state->ldlm_cancel_service =
1400 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1401 LDLM_CANCEL_REQUEST_PORTAL,
1402 LDLM_CANCEL_REPLY_PORTAL, 30000,
1403 ldlm_cancel_handler, "ldlm_canceld",
1406 if (!ldlm_state->ldlm_cancel_service) {
1407 CERROR("failed to start service\n");
1408 GOTO(out_proc, rc = -ENOMEM);
1411 OBD_ALLOC(blp, sizeof(*blp));
1413 GOTO(out_proc, rc = -ENOMEM);
1414 ldlm_state->ldlm_bl_pool = blp;
1416 atomic_set(&blp->blp_num_threads, 0);
1417 init_waitqueue_head(&blp->blp_waitq);
1418 spin_lock_init(&blp->blp_lock);
1420 INIT_LIST_HEAD(&blp->blp_list);
1423 for (i = 0; i < LDLM_NUM_THREADS; i++) {
1424 struct ldlm_bl_thread_data bltd = {
1428 init_completion(&blp->blp_comp);
1429 rc = kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1431 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1432 GOTO(out_thread, rc);
1434 wait_for_completion(&blp->blp_comp);
1437 rc = ptlrpc_start_n_threads(NULL, ldlm_state->ldlm_cancel_service,
1438 LDLM_NUM_THREADS, "ldlm_cn");
1440 GOTO(out_thread, rc);
1442 rc = ptlrpc_start_n_threads(NULL, ldlm_state->ldlm_cb_service,
1443 LDLM_NUM_THREADS, "ldlm_cb");
1445 GOTO(out_thread, rc);
1447 INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1448 spin_lock_init(&expired_lock_thread.elt_lock);
1449 expired_lock_thread.elt_state = ELT_STOPPED;
1450 init_waitqueue_head(&expired_lock_thread.elt_waitq);
1452 rc = kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FS);
1454 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1455 GOTO(out_thread, rc);
1458 wait_event(expired_lock_thread.elt_waitq,
1459 expired_lock_thread.elt_state == ELT_READY);
1461 INIT_LIST_HEAD(&waiting_locks_list);
1462 spin_lock_init(&waiting_locks_spinlock);
1463 waiting_locks_timer.function = waiting_locks_callback;
1464 waiting_locks_timer.data = 0;
1465 init_timer(&waiting_locks_timer);
1472 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1473 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1478 ldlm_proc_cleanup();
1481 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1486 static int ldlm_cleanup(int force)
1489 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1493 if (!list_empty(&ldlm_namespace_list)) {
1494 CERROR("ldlm still has namespaces; clean these up first.\n");
1495 ldlm_dump_all_namespaces(D_DLMTRACE);
1500 while (atomic_read(&blp->blp_num_threads) > 0) {
1501 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1503 init_completion(&blp->blp_comp);
1505 spin_lock(&blp->blp_lock);
1506 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1507 wake_up(&blp->blp_waitq);
1508 spin_unlock(&blp->blp_lock);
1510 wait_for_completion(&blp->blp_comp);
1512 OBD_FREE(blp, sizeof(*blp));
1514 ptlrpc_stop_all_threads(ldlm_state->ldlm_cb_service);
1515 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1516 ptlrpc_stop_all_threads(ldlm_state->ldlm_cancel_service);
1517 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1518 ldlm_proc_cleanup();
1520 expired_lock_thread.elt_state = ELT_TERMINATE;
1521 wake_up(&expired_lock_thread.elt_waitq);
1522 wait_event(expired_lock_thread.elt_waitq,
1523 expired_lock_thread.elt_state == ELT_STOPPED);
1525 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1526 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1529 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1535 int __init ldlm_init(void)
1537 ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1538 sizeof(struct ldlm_resource), 0,
1539 SLAB_HWCACHE_ALIGN, NULL, NULL);
1540 if (ldlm_resource_slab == NULL)
1543 ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1544 sizeof(struct ldlm_lock), 0,
1545 SLAB_HWCACHE_ALIGN, NULL, NULL);
1546 if (ldlm_lock_slab == NULL) {
1547 kmem_cache_destroy(ldlm_resource_slab);
1551 l_lock_init(&ldlm_handle_lock);
1556 void __exit ldlm_exit(void)
1558 if ( ldlm_refcount )
1559 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1560 LASSERTF(kmem_cache_destroy(ldlm_resource_slab) == 0,
1561 "couldn't free ldlm resource slab\n");
1562 LASSERTF(kmem_cache_destroy(ldlm_lock_slab) == 0,
1563 "couldn't free ldlm lock slab\n");
1568 EXPORT_SYMBOL(ldlm_flock_completion_ast);
1571 EXPORT_SYMBOL(ldlm_extent_shift_kms);
1574 EXPORT_SYMBOL(ldlm_get_processing_policy);
1575 EXPORT_SYMBOL(ldlm_lock2desc);
1576 EXPORT_SYMBOL(ldlm_register_intent);
1577 EXPORT_SYMBOL(ldlm_lockname);
1578 EXPORT_SYMBOL(ldlm_typename);
1579 EXPORT_SYMBOL(ldlm_lock2handle);
1580 EXPORT_SYMBOL(__ldlm_handle2lock);
1581 EXPORT_SYMBOL(ldlm_lock_get);
1582 EXPORT_SYMBOL(ldlm_lock_put);
1583 EXPORT_SYMBOL(ldlm_lock_match);
1584 EXPORT_SYMBOL(ldlm_lock_cancel);
1585 EXPORT_SYMBOL(ldlm_lock_addref);
1586 EXPORT_SYMBOL(ldlm_lock_decref);
1587 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1588 EXPORT_SYMBOL(ldlm_lock_change_resource);
1589 EXPORT_SYMBOL(ldlm_lock_set_data);
1590 EXPORT_SYMBOL(ldlm_it2str);
1591 EXPORT_SYMBOL(ldlm_lock_dump);
1592 EXPORT_SYMBOL(ldlm_lock_dump_handle);
1593 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
1594 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1595 EXPORT_SYMBOL(ldlm_lock_allow_match);
1597 /* ldlm_request.c */
1598 EXPORT_SYMBOL(ldlm_completion_ast);
1599 EXPORT_SYMBOL(ldlm_expired_completion_wait);
1600 EXPORT_SYMBOL(ldlm_cli_convert);
1601 EXPORT_SYMBOL(ldlm_cli_enqueue);
1602 EXPORT_SYMBOL(ldlm_cli_cancel);
1603 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1604 EXPORT_SYMBOL(ldlm_replay_locks);
1605 EXPORT_SYMBOL(ldlm_resource_foreach);
1606 EXPORT_SYMBOL(ldlm_namespace_foreach);
1607 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
1608 EXPORT_SYMBOL(ldlm_change_cbdata);
1611 EXPORT_SYMBOL(ldlm_server_blocking_ast);
1612 EXPORT_SYMBOL(ldlm_server_completion_ast);
1613 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
1614 EXPORT_SYMBOL(ldlm_handle_enqueue);
1615 EXPORT_SYMBOL(ldlm_handle_cancel);
1616 EXPORT_SYMBOL(ldlm_handle_convert);
1617 EXPORT_SYMBOL(ldlm_del_waiting_lock);
1618 EXPORT_SYMBOL(ldlm_get_ref);
1619 EXPORT_SYMBOL(ldlm_put_ref);
1623 EXPORT_SYMBOL(ldlm_test);
1624 EXPORT_SYMBOL(ldlm_regression_start);
1625 EXPORT_SYMBOL(ldlm_regression_stop);
1628 /* ldlm_resource.c */
1629 EXPORT_SYMBOL(ldlm_namespace_new);
1630 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1631 EXPORT_SYMBOL(ldlm_namespace_free);
1632 EXPORT_SYMBOL(ldlm_namespace_dump);
1633 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1634 EXPORT_SYMBOL(ldlm_resource_get);
1635 EXPORT_SYMBOL(ldlm_resource_putref);
1638 EXPORT_SYMBOL(l_lock);
1639 EXPORT_SYMBOL(l_unlock);
1642 EXPORT_SYMBOL(client_import_add_conn);
1643 EXPORT_SYMBOL(client_import_del_conn);
1644 EXPORT_SYMBOL(client_obd_setup);
1645 EXPORT_SYMBOL(client_obd_cleanup);
1646 EXPORT_SYMBOL(client_connect_import);
1647 EXPORT_SYMBOL(client_disconnect_export);
1648 EXPORT_SYMBOL(target_start_recovery_thread);
1649 EXPORT_SYMBOL(target_stop_recovery_thread);
1650 EXPORT_SYMBOL(target_handle_connect);
1651 EXPORT_SYMBOL(target_cleanup_recovery);
1652 EXPORT_SYMBOL(target_destroy_export);
1653 EXPORT_SYMBOL(target_cancel_recovery_timer);
1654 EXPORT_SYMBOL(target_send_reply);
1655 EXPORT_SYMBOL(target_queue_recovery_request);
1656 EXPORT_SYMBOL(target_handle_ping);
1657 EXPORT_SYMBOL(target_handle_disconnect);
1658 EXPORT_SYMBOL(target_queue_final_reply);