1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 # define EXPORT_SYMTAB
27 #define DEBUG_SUBSYSTEM S_LDLM
30 # include <linux/module.h>
31 # include <linux/slab.h>
32 # include <linux/init.h>
33 # include <linux/wait.h>
35 # include <liblustre.h>
38 #include <linux/lustre_dlm.h>
39 #include <linux/obd_class.h>
40 #include <portals/list.h>
41 #include "ldlm_internal.h"
43 extern kmem_cache_t *ldlm_resource_slab;
44 extern kmem_cache_t *ldlm_lock_slab;
45 extern struct lustre_lock ldlm_handle_lock;
46 extern struct list_head ldlm_namespace_list;
47 extern int (*mds_reint_p)(int offset, struct ptlrpc_request *req);
48 extern int (*mds_getattr_name_p)(int offset, struct ptlrpc_request *req);
50 static DECLARE_MUTEX(ldlm_ref_sem);
51 static int ldlm_refcount = 0;
55 static struct ldlm_state *ldlm_state;
57 inline unsigned long round_timeout(unsigned long timeout)
59 return ((timeout / HZ) + 1) * HZ;
63 /* XXX should this be per-ldlm? */
64 static struct list_head waiting_locks_list;
65 static spinlock_t waiting_locks_spinlock;
66 static struct timer_list waiting_locks_timer;
68 static struct expired_lock_thread {
69 wait_queue_head_t elt_waitq;
71 struct list_head elt_expired_locks;
73 } expired_lock_thread;
78 #define ELT_TERMINATE 2
82 struct list_head blp_list;
83 wait_queue_head_t blp_waitq;
84 atomic_t blp_num_threads;
85 struct completion blp_comp;
88 struct ldlm_bl_work_item {
89 struct list_head blwi_entry;
90 struct ldlm_namespace *blwi_ns;
91 struct ldlm_lock_desc blwi_ld;
92 struct ldlm_lock *blwi_lock;
97 static inline int have_expired_locks(void)
101 spin_lock_bh(&expired_lock_thread.elt_lock);
102 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
103 spin_unlock_bh(&expired_lock_thread.elt_lock);
108 static int expired_lock_main(void *arg)
110 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
111 struct l_wait_info lwi = { 0 };
116 kportal_daemonize("ldlm_elt");
118 SIGNAL_MASK_LOCK(current, flags);
119 sigfillset(¤t->blocked);
121 SIGNAL_MASK_UNLOCK(current, flags);
125 expired_lock_thread.elt_state = ELT_READY;
126 wake_up(&expired_lock_thread.elt_waitq);
129 l_wait_event(expired_lock_thread.elt_waitq,
130 have_expired_locks() ||
131 expired_lock_thread.elt_state == ELT_TERMINATE,
134 spin_lock_bh(&expired_lock_thread.elt_lock);
135 while (!list_empty(expired)) {
136 struct obd_export *export;
137 struct ldlm_lock *lock;
139 lock = list_entry(expired->next, struct ldlm_lock,
141 if ((void *)lock < LP_POISON + PAGE_SIZE &&
142 (void *)lock >= LP_POISON) {
143 CERROR("free lock on elt list %p\n", lock);
146 list_del_init(&lock->l_pending_chain);
147 if ((void *)lock->l_export < LP_POISON + PAGE_SIZE &&
148 (void *)lock->l_export >= LP_POISON + PAGE_SIZE) {
149 CERROR("lock with free export on elt list %p\n",
151 lock->l_export = NULL;
152 LDLM_ERROR(lock, "free export\n");
155 export = class_export_get(lock->l_export);
156 spin_unlock_bh(&expired_lock_thread.elt_lock);
158 ptlrpc_fail_export(export);
159 class_export_put(export);
160 spin_lock_bh(&expired_lock_thread.elt_lock);
162 spin_unlock_bh(&expired_lock_thread.elt_lock);
164 if (expired_lock_thread.elt_state == ELT_TERMINATE)
168 expired_lock_thread.elt_state = ELT_STOPPED;
169 wake_up(&expired_lock_thread.elt_waitq);
173 static void waiting_locks_callback(unsigned long unused)
175 struct ldlm_lock *lock;
176 char str[PTL_NALFMT_SIZE];
178 spin_lock_bh(&waiting_locks_spinlock);
179 while (!list_empty(&waiting_locks_list)) {
180 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
183 if ((lock->l_callback_timeout > jiffies) ||
184 (lock->l_req_mode == LCK_GROUP))
187 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
189 lock->l_export->exp_client_uuid.uuid,
190 lock->l_export->exp_connection->c_remote_uuid.uuid,
191 ptlrpc_peernid2str(&lock->l_export->exp_connection->c_peer, str));
193 spin_lock_bh(&expired_lock_thread.elt_lock);
194 list_del(&lock->l_pending_chain);
195 list_add(&lock->l_pending_chain,
196 &expired_lock_thread.elt_expired_locks);
197 spin_unlock_bh(&expired_lock_thread.elt_lock);
198 wake_up(&expired_lock_thread.elt_waitq);
202 * Make sure the timer will fire again if we have any locks
205 if (!list_empty(&waiting_locks_list)) {
206 unsigned long timeout_rounded;
207 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
209 timeout_rounded = round_timeout(lock->l_callback_timeout);
210 mod_timer(&waiting_locks_timer, timeout_rounded);
212 spin_unlock_bh(&waiting_locks_spinlock);
216 * Indicate that we're waiting for a client to call us back cancelling a given
217 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
218 * timer to fire appropriately. (We round up to the next second, to avoid
219 * floods of timer firings during periods of high lock contention and traffic).
221 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
223 unsigned long timeout_rounded;
225 spin_lock_bh(&waiting_locks_spinlock);
226 if (!list_empty(&lock->l_pending_chain)) {
227 LDLM_DEBUG(lock, "not re-adding to wait list");
228 spin_unlock_bh(&waiting_locks_spinlock);
231 LDLM_DEBUG(lock, "adding to wait list");
233 lock->l_callback_timeout = jiffies + (obd_timeout * HZ / 2);
235 timeout_rounded = round_timeout(lock->l_callback_timeout);
237 if (timeout_rounded < waiting_locks_timer.expires ||
238 !timer_pending(&waiting_locks_timer)) {
239 mod_timer(&waiting_locks_timer, timeout_rounded);
241 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
242 spin_unlock_bh(&waiting_locks_spinlock);
247 * Remove a lock from the pending list, likely because it had its cancellation
248 * callback arrive without incident. This adjusts the lock-timeout timer if
249 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
251 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
253 struct list_head *list_next;
255 if (lock->l_export == NULL) {
256 /* We don't have a "waiting locks list" on clients. */
257 LDLM_DEBUG(lock, "client lock: no-op");
261 spin_lock_bh(&waiting_locks_spinlock);
263 if (list_empty(&lock->l_pending_chain)) {
264 spin_unlock_bh(&waiting_locks_spinlock);
265 LDLM_DEBUG(lock, "wasn't waiting");
269 list_next = lock->l_pending_chain.next;
270 if (lock->l_pending_chain.prev == &waiting_locks_list) {
271 /* Removing the head of the list, adjust timer. */
272 if (list_next == &waiting_locks_list) {
273 /* No more, just cancel. */
274 del_timer(&waiting_locks_timer);
276 struct ldlm_lock *next;
277 next = list_entry(list_next, struct ldlm_lock,
279 mod_timer(&waiting_locks_timer,
280 round_timeout(next->l_callback_timeout));
284 spin_lock_bh(&expired_lock_thread.elt_lock);
285 list_del_init(&lock->l_pending_chain);
286 spin_unlock_bh(&expired_lock_thread.elt_lock);
288 spin_unlock_bh(&waiting_locks_spinlock);
289 LDLM_DEBUG(lock, "removed");
293 #else /* !__KERNEL__ */
295 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
300 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
305 #endif /* __KERNEL__ */
307 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,const char *ast_type)
309 struct ptlrpc_connection *conn = lock->l_export->exp_connection;
310 char str[PTL_NALFMT_SIZE];
312 CERROR("%s AST failed (%d) for res "LPU64"/"LPU64
313 ", mode %s: evicting client %s@%s NID %s\n",
315 lock->l_resource->lr_name.name[0],
316 lock->l_resource->lr_name.name[1],
317 ldlm_lockname[lock->l_granted_mode],
318 lock->l_export->exp_client_uuid.uuid,
319 conn->c_remote_uuid.uuid,
320 ptlrpc_peernid2str(&conn->c_peer, str));
321 ptlrpc_fail_export(lock->l_export);
324 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
325 struct ptlrpc_request *req, int rc,
326 const char *ast_type)
328 char str[PTL_NALFMT_SIZE];
330 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
331 LASSERT(lock->l_export);
332 if (lock->l_export->exp_libclient) {
333 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
334 " timeout, just cancelling lock", ast_type,
335 ptlrpc_peernid2str(&req->rq_peer, str));
336 ldlm_lock_cancel(lock);
339 ldlm_del_waiting_lock(lock);
340 ldlm_failed_ast(lock, rc, ast_type);
344 LDLM_DEBUG(lock, "client (nid %s) returned %d"
345 " from %s AST - normal race",
346 ptlrpc_peernid2str(&req->rq_peer, str),
347 req->rq_repmsg->status, ast_type);
349 LDLM_ERROR(lock, "client (nid %s) returned %d "
351 ptlrpc_peernid2str(&req->rq_peer, str),
352 (req->rq_repmsg != NULL) ?
353 req->rq_repmsg->status : 0, ast_type);
354 ldlm_lock_cancel(lock);
355 /* Server-side AST functions are called from ldlm_reprocess_all,
356 * which needs to be told to please restart its reprocessing. */
363 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
364 struct ldlm_lock_desc *desc,
365 void *data, int flag)
367 struct ldlm_request *body;
368 struct ptlrpc_request *req;
369 int rc = 0, size = sizeof(*body);
372 if (flag == LDLM_CB_CANCELING) {
373 /* Don't need to do anything here. */
379 l_lock(&lock->l_resource->lr_namespace->ns_lock);
380 if (lock->l_granted_mode != lock->l_req_mode) {
381 /* this blocking AST will be communicated as part of the
382 * completion AST instead */
383 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
384 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST"); RETURN(0);
387 if (lock->l_destroyed) {
388 /* What's the point? */
389 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
394 if (LTIME_S(CURRENT_TIME) - lock->l_export->exp_last_request_time > 30){
395 ldlm_failed_ast(lock, -ETIMEDOUT, "Not-attempted blocking");
396 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
401 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
402 LDLM_BL_CALLBACK, 1, &size, NULL);
404 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
408 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
409 memcpy(&body->lock_handle1, &lock->l_remote_handle,
410 sizeof(body->lock_handle1));
411 memcpy(&body->lock_desc, desc, sizeof(*desc));
412 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
414 LDLM_DEBUG(lock, "server preparing blocking AST");
415 req->rq_replen = lustre_msg_size(0, NULL);
417 if (lock->l_granted_mode == lock->l_req_mode)
418 ldlm_add_waiting_lock(lock);
419 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
421 req->rq_send_state = LUSTRE_IMP_FULL;
422 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
423 rc = ptlrpc_queue_wait(req);
425 rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
427 ptlrpc_req_finished(req);
432 /* XXX copied from ptlrpc/service.c */
433 static long timeval_sub(struct timeval *large, struct timeval *small)
435 return (large->tv_sec - small->tv_sec) * 1000000 +
436 (large->tv_usec - small->tv_usec);
439 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
441 struct ldlm_request *body;
442 struct ptlrpc_request *req;
443 struct timeval granted_time;
444 long total_enqueue_wait;
445 int rc = 0, size[2] = {sizeof(*body)}, buffers = 1;
448 LASSERT(lock != NULL);
450 do_gettimeofday(&granted_time);
451 total_enqueue_wait = timeval_sub(&granted_time, &lock->l_enqueued_time);
453 if (total_enqueue_wait / 1000000 > obd_timeout)
454 LDLM_ERROR(lock, "enqueue wait took %ldus", total_enqueue_wait);
456 if (lock->l_resource->lr_lvb_len) {
458 size[1] = lock->l_resource->lr_lvb_len;
461 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
462 LDLM_CP_CALLBACK, buffers, size, NULL);
466 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
467 memcpy(&body->lock_handle1, &lock->l_remote_handle,
468 sizeof(body->lock_handle1));
469 body->lock_flags = flags;
470 ldlm_lock2desc(lock, &body->lock_desc);
473 void *lvb = lustre_msg_buf(req->rq_reqmsg, 1,
474 lock->l_resource->lr_lvb_len);
475 memcpy(lvb, lock->l_resource->lr_lvb_data,
476 lock->l_resource->lr_lvb_len);
479 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
481 req->rq_replen = lustre_msg_size(0, NULL);
483 req->rq_send_state = LUSTRE_IMP_FULL;
484 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
486 /* We only send real blocking ASTs after the lock is granted */
487 l_lock(&lock->l_resource->lr_namespace->ns_lock);
488 if (lock->l_flags & LDLM_FL_AST_SENT) {
489 body->lock_flags |= LDLM_FL_AST_SENT;
490 ldlm_add_waiting_lock(lock); /* start the lock-timeout clock */
492 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
494 rc = ptlrpc_queue_wait(req);
496 rc = ldlm_handle_ast_error(lock, req, rc, "completion");
498 ptlrpc_req_finished(req);
503 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
505 struct ldlm_resource *res = lock->l_resource;
506 struct ldlm_request *body;
507 struct ptlrpc_request *req;
508 int rc = 0, size = sizeof(*body);
511 LASSERT(lock != NULL);
513 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
514 LDLM_GL_CALLBACK, 1, &size, NULL);
518 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
519 memcpy(&body->lock_handle1, &lock->l_remote_handle,
520 sizeof(body->lock_handle1));
521 ldlm_lock2desc(lock, &body->lock_desc);
523 size = lock->l_resource->lr_lvb_len;
524 req->rq_replen = lustre_msg_size(1, &size);
526 req->rq_send_state = LUSTRE_IMP_FULL;
527 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
529 rc = ptlrpc_queue_wait(req);
530 if (rc == -ELDLM_NO_LOCK_DATA)
531 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
533 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
535 rc = res->lr_namespace->ns_lvbo->lvbo_update
536 (res, req->rq_repmsg, 0, 1);
537 ptlrpc_req_finished(req);
541 int ldlm_handle_enqueue(struct ptlrpc_request *req,
542 ldlm_completion_callback completion_callback,
543 ldlm_blocking_callback blocking_callback,
544 ldlm_glimpse_callback glimpse_callback)
546 struct obd_device *obddev = req->rq_export->exp_obd;
547 struct ldlm_reply *dlm_rep;
548 struct ldlm_request *dlm_req;
549 int rc = 0, size[2] = {sizeof(*dlm_rep)};
551 ldlm_error_t err = ELDLM_OK;
552 struct ldlm_lock *lock = NULL;
556 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
558 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
559 lustre_swab_ldlm_request);
560 if (dlm_req == NULL) {
561 CERROR ("Can't unpack dlm_req\n");
562 GOTO(out, rc = -EFAULT);
565 flags = dlm_req->lock_flags;
567 /* The lock's callback data might be set in the policy function */
568 lock = ldlm_lock_create(obddev->obd_namespace, &dlm_req->lock_handle2,
569 dlm_req->lock_desc.l_resource.lr_name,
570 dlm_req->lock_desc.l_resource.lr_type,
571 dlm_req->lock_desc.l_req_mode,
572 blocking_callback, completion_callback,
573 glimpse_callback, NULL, 0);
575 GOTO(out, rc = -ENOMEM);
577 do_gettimeofday(&lock->l_enqueued_time);
578 memcpy(&lock->l_remote_handle, &dlm_req->lock_handle1,
579 sizeof(lock->l_remote_handle));
580 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
582 LASSERT(req->rq_export);
583 lock->l_export = class_export_get(req->rq_export);
584 l_lock(&lock->l_resource->lr_namespace->ns_lock);
585 list_add(&lock->l_export_chain,
586 &lock->l_export->exp_ldlm_data.led_held_locks);
587 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
589 if (flags & LDLM_FL_HAS_INTENT) {
590 /* In this case, the reply buffer is allocated deep in
591 * local_lock_enqueue by the policy function. */
595 if (lock->l_resource->lr_lvb_len) {
596 size[1] = lock->l_resource->lr_lvb_len;
600 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
601 GOTO(out, rc = -ENOMEM);
603 rc = lustre_pack_reply(req, buffers, size, NULL);
608 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
609 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
610 sizeof(ldlm_policy_data_t));
611 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
612 memcpy(&lock->l_req_extent, &lock->l_policy_data.l_extent,
613 sizeof(lock->l_req_extent));
615 err = ldlm_lock_enqueue(obddev->obd_namespace, &lock, cookie, &flags);
619 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
620 dlm_rep->lock_flags = flags;
622 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
623 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
625 /* We never send a blocking AST until the lock is granted, but
626 * we can tell it right now */
627 l_lock(&lock->l_resource->lr_namespace->ns_lock);
628 if (lock->l_flags & LDLM_FL_AST_SENT) {
629 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
630 if (lock->l_granted_mode == lock->l_req_mode)
631 ldlm_add_waiting_lock(lock);
633 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
637 req->rq_status = err;
638 if (req->rq_reply_state == NULL) {
639 err = lustre_pack_reply(req, 0, NULL, NULL);
645 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
646 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
648 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
649 "(err=%d, rc=%d)", err, rc);
651 if (lock->l_resource->lr_lvb_len > 0 && rc == 0) {
652 void *lvb = lustre_msg_buf(req->rq_repmsg, 1,
653 lock->l_resource->lr_lvb_len);
654 LASSERT(lvb != NULL);
655 memcpy(lvb, lock->l_resource->lr_lvb_data,
656 lock->l_resource->lr_lvb_len);
659 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
660 ldlm_reprocess_all(lock->l_resource);
663 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
669 int ldlm_handle_convert(struct ptlrpc_request *req)
671 struct ldlm_request *dlm_req;
672 struct ldlm_reply *dlm_rep;
673 struct ldlm_lock *lock;
674 int rc, size = sizeof(*dlm_rep);
677 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
678 lustre_swab_ldlm_request);
679 if (dlm_req == NULL) {
680 CERROR ("Can't unpack dlm_req\n");
684 rc = lustre_pack_reply(req, 1, &size, NULL);
686 CERROR("out of memory\n");
689 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
690 dlm_rep->lock_flags = dlm_req->lock_flags;
692 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
694 req->rq_status = EINVAL;
696 LDLM_DEBUG(lock, "server-side convert handler START");
697 ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
698 &dlm_rep->lock_flags);
699 if (ldlm_del_waiting_lock(lock))
700 CDEBUG(D_DLMTRACE, "converted waiting lock %p\n", lock);
705 ldlm_reprocess_all(lock->l_resource);
706 LDLM_DEBUG(lock, "server-side convert handler END");
709 LDLM_DEBUG_NOLOCK("server-side convert handler END");
714 int ldlm_handle_cancel(struct ptlrpc_request *req)
716 struct ldlm_request *dlm_req;
717 struct ldlm_lock *lock;
718 struct ldlm_resource *res;
719 char str[PTL_NALFMT_SIZE];
723 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
724 lustre_swab_ldlm_request);
725 if (dlm_req == NULL) {
726 CERROR("bad request buffer for cancel\n");
730 rc = lustre_pack_reply(req, 0, NULL, NULL);
732 CERROR("out of memory\n");
736 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
738 CERROR("received cancel for unknown lock cookie "LPX64
739 " from client %s nid %s\n",
740 dlm_req->lock_handle1.cookie,
741 req->rq_export->exp_client_uuid.uuid,
742 ptlrpc_peernid2str(&req->rq_peer, str));
743 LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock "
745 dlm_req->lock_handle1.cookie);
746 req->rq_status = ESTALE;
748 LDLM_DEBUG(lock, "server-side cancel handler START");
749 res = lock->l_resource;
750 if (res && res->lr_namespace->ns_lvbo &&
751 res->lr_namespace->ns_lvbo->lvbo_update) {
752 (void)res->lr_namespace->ns_lvbo->lvbo_update
754 //(res, req->rq_reqmsg, 1);
757 ldlm_lock_cancel(lock);
758 if (ldlm_del_waiting_lock(lock))
759 CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
763 if (ptlrpc_reply(req) != 0)
767 ldlm_reprocess_all(lock->l_resource);
768 LDLM_DEBUG(lock, "server-side cancel handler END");
775 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
776 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
781 l_lock(&ns->ns_lock);
782 LDLM_DEBUG(lock, "client blocking AST callback handler START");
784 lock->l_flags |= LDLM_FL_CBPENDING;
785 do_ast = (!lock->l_readers && !lock->l_writers);
788 LDLM_DEBUG(lock, "already unused, calling "
789 "callback (%p)", lock->l_blocking_ast);
790 if (lock->l_blocking_ast != NULL) {
791 l_unlock(&ns->ns_lock);
792 l_check_no_ns_lock(ns);
793 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
795 l_lock(&ns->ns_lock);
798 LDLM_DEBUG(lock, "Lock still has references, will be"
802 LDLM_DEBUG(lock, "client blocking callback handler END");
803 l_unlock(&ns->ns_lock);
808 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
809 struct ldlm_namespace *ns,
810 struct ldlm_request *dlm_req,
811 struct ldlm_lock *lock)
816 l_lock(&ns->ns_lock);
817 LDLM_DEBUG(lock, "client completion callback handler START");
819 /* If we receive the completion AST before the actual enqueue returned,
820 * then we might need to switch lock modes, resources, or extents. */
821 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
822 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
823 LDLM_DEBUG(lock, "completion AST, new lock mode");
826 if (lock->l_resource->lr_type != LDLM_PLAIN) {
827 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
828 sizeof(lock->l_policy_data));
829 LDLM_DEBUG(lock, "completion AST, new policy data");
832 ldlm_resource_unlink_lock(lock);
833 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
834 &lock->l_resource->lr_name,
835 sizeof(lock->l_resource->lr_name)) != 0) {
836 ldlm_lock_change_resource(ns, lock,
837 dlm_req->lock_desc.l_resource.lr_name);
838 LDLM_DEBUG(lock, "completion AST, new resource");
841 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
842 lock->l_flags |= LDLM_FL_CBPENDING;
843 LDLM_DEBUG(lock, "completion AST includes blocking AST");
846 if (lock->l_lvb_len) {
848 lvb = lustre_swab_reqbuf(req, 1, lock->l_lvb_len,
849 lock->l_lvb_swabber);
851 LDLM_ERROR(lock, "completion AST did not contain "
854 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
858 lock->l_resource->lr_tmp = &ast_list;
859 ldlm_grant_lock(lock, req, sizeof(*req), 1);
860 lock->l_resource->lr_tmp = NULL;
861 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
862 l_unlock(&ns->ns_lock);
865 ldlm_run_ast_work(ns, &ast_list);
867 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
872 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
873 struct ldlm_namespace *ns,
874 struct ldlm_request *dlm_req,
875 struct ldlm_lock *lock)
880 l_lock(&ns->ns_lock);
881 LDLM_DEBUG(lock, "client glimpse AST callback handler");
883 if (lock->l_glimpse_ast != NULL) {
884 l_unlock(&ns->ns_lock);
885 l_check_no_ns_lock(ns);
886 rc = lock->l_glimpse_ast(lock, req);
887 l_lock(&ns->ns_lock);
890 if (req->rq_repmsg != NULL) {
897 if (lock->l_granted_mode == LCK_PW &&
898 !lock->l_readers && !lock->l_writers &&
899 time_after(jiffies, lock->l_last_used + 10 * HZ)) {
900 l_unlock(&ns->ns_lock);
901 ldlm_handle_bl_callback(ns, NULL, lock);
906 l_unlock(&ns->ns_lock);
911 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
914 if (req->rq_reply_state == NULL) {
915 rc = lustre_pack_reply(req, 0, NULL, NULL);
919 return ptlrpc_reply(req);
922 int ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
923 struct ldlm_lock *lock)
926 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
927 struct ldlm_bl_work_item *blwi;
930 OBD_ALLOC(blwi, sizeof(*blwi));
937 blwi->blwi_lock = lock;
939 spin_lock(&blp->blp_lock);
940 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
941 wake_up(&blp->blp_waitq);
942 spin_unlock(&blp->blp_lock);
950 static int ldlm_callback_handler(struct ptlrpc_request *req)
952 struct ldlm_namespace *ns;
953 struct ldlm_request *dlm_req;
954 struct ldlm_lock *lock;
955 char str[PTL_NALFMT_SIZE];
959 /* Requests arrive in sender's byte order. The ptlrpc service
960 * handler has already checked and, if necessary, byte-swapped the
961 * incoming request message body, but I am responsible for the
962 * message buffers. */
964 if (req->rq_export == NULL) {
965 struct ldlm_request *dlm_req;
967 CDEBUG(D_RPCTRACE, "operation %d from nid %s with bad "
968 "export cookie "LPX64" (ptl req %d/rep %d); this is "
969 "normal if this node rebooted with a lock held\n",
971 ptlrpc_peernid2str(&req->rq_peer, str),
972 req->rq_reqmsg->handle.cookie,
973 req->rq_request_portal, req->rq_reply_portal);
975 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
976 lustre_swab_ldlm_request);
978 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
979 dlm_req->lock_handle1.cookie);
981 ldlm_callback_reply(req, -ENOTCONN);
985 LASSERT(req->rq_export != NULL);
986 LASSERT(req->rq_export->exp_obd != NULL);
988 switch(req->rq_reqmsg->opc) {
989 case LDLM_BL_CALLBACK:
990 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
992 case LDLM_CP_CALLBACK:
993 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
995 case LDLM_GL_CALLBACK:
996 OBD_FAIL_RETURN(OBD_FAIL_LDLM_GL_CALLBACK, 0);
999 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1000 rc = llog_origin_handle_cancel(req);
1001 ldlm_callback_reply(req, rc);
1003 case LLOG_ORIGIN_HANDLE_CREATE:
1004 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1005 rc = llog_origin_handle_create(req);
1006 ldlm_callback_reply(req, rc);
1008 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1009 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1010 rc = llog_origin_handle_next_block(req);
1011 ldlm_callback_reply(req, rc);
1013 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1014 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1015 rc = llog_origin_handle_read_header(req);
1016 ldlm_callback_reply(req, rc);
1018 case LLOG_ORIGIN_HANDLE_CLOSE:
1019 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1020 rc = llog_origin_handle_close(req);
1021 ldlm_callback_reply(req, rc);
1024 CERROR("unknown opcode %u\n", req->rq_reqmsg->opc);
1025 ldlm_callback_reply(req, -EPROTO);
1029 ns = req->rq_export->exp_obd->obd_namespace;
1030 LASSERT(ns != NULL);
1032 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
1033 lustre_swab_ldlm_request);
1034 if (dlm_req == NULL) {
1035 CERROR ("can't unpack dlm_req\n");
1036 ldlm_callback_reply (req, -EPROTO);
1040 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle1);
1042 CDEBUG(D_INODE, "callback on lock "LPX64" - lock disappeared\n",
1043 dlm_req->lock_handle1.cookie);
1044 ldlm_callback_reply(req, -EINVAL);
1048 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1049 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1051 /* We want the ost thread to get this reply so that it can respond
1052 * to ost requests (write cache writeback) that might be triggered
1055 * But we'd also like to be able to indicate in the reply that we're
1056 * cancelling right now, because it's unused, or have an intent result
1057 * in the reply, so we might have to push the responsibility for sending
1058 * the reply down into the AST handlers, alas. */
1060 switch (req->rq_reqmsg->opc) {
1061 case LDLM_BL_CALLBACK:
1062 CDEBUG(D_INODE, "blocking ast\n");
1064 rc = ldlm_bl_to_thread(ns, &dlm_req->lock_desc, lock);
1065 ldlm_callback_reply(req, rc);
1068 ldlm_callback_reply(req, rc);
1069 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
1072 case LDLM_CP_CALLBACK:
1073 CDEBUG(D_INODE, "completion ast\n");
1074 ldlm_callback_reply(req, 0);
1075 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
1077 case LDLM_GL_CALLBACK:
1078 CDEBUG(D_INODE, "glimpse ast\n");
1079 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
1082 LBUG(); /* checked above */
1088 static int ldlm_cancel_handler(struct ptlrpc_request *req)
1093 /* Requests arrive in sender's byte order. The ptlrpc service
1094 * handler has already checked and, if necessary, byte-swapped the
1095 * incoming request message body, but I am responsible for the
1096 * message buffers. */
1098 if (req->rq_export == NULL) {
1099 struct ldlm_request *dlm_req;
1100 CERROR("operation %d with bad export (ptl req %d/rep %d)\n",
1101 req->rq_reqmsg->opc, req->rq_request_portal,
1102 req->rq_reply_portal);
1103 CERROR("--> export cookie: "LPX64"\n",
1104 req->rq_reqmsg->handle.cookie);
1105 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
1106 lustre_swab_ldlm_request);
1107 if (dlm_req != NULL)
1108 ldlm_lock_dump_handle(D_ERROR, &dlm_req->lock_handle1);
1112 switch (req->rq_reqmsg->opc) {
1114 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1116 CDEBUG(D_INODE, "cancel\n");
1117 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
1118 rc = ldlm_handle_cancel(req);
1124 CERROR("invalid opcode %d\n", req->rq_reqmsg->opc);
1132 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1134 struct ldlm_bl_work_item *blwi = NULL;
1136 spin_lock(&blp->blp_lock);
1137 if (!list_empty(&blp->blp_list)) {
1138 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1140 list_del(&blwi->blwi_entry);
1142 spin_unlock(&blp->blp_lock);
1147 struct ldlm_bl_thread_data {
1149 struct ldlm_bl_pool *bltd_blp;
1152 static int ldlm_bl_thread_main(void *arg)
1154 struct ldlm_bl_thread_data *bltd = arg;
1155 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1156 unsigned long flags;
1159 /* XXX boiler-plate */
1161 char name[sizeof(current->comm)];
1162 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1164 kportal_daemonize(name);
1166 SIGNAL_MASK_LOCK(current, flags);
1167 sigfillset(¤t->blocked);
1169 SIGNAL_MASK_UNLOCK(current, flags);
1171 atomic_inc(&blp->blp_num_threads);
1172 complete(&blp->blp_comp);
1175 struct l_wait_info lwi = { 0 };
1176 struct ldlm_bl_work_item *blwi = NULL;
1178 l_wait_event_exclusive(blp->blp_waitq,
1179 (blwi = ldlm_bl_get_work(blp)) != NULL,
1182 if (blwi->blwi_ns == NULL)
1185 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1187 OBD_FREE(blwi, sizeof(*blwi));
1190 atomic_dec(&blp->blp_num_threads);
1191 complete(&blp->blp_comp);
1197 static int ldlm_setup(void);
1198 static int ldlm_cleanup(int force);
1200 int ldlm_get_ref(void)
1203 down(&ldlm_ref_sem);
1204 if (++ldlm_refcount == 1) {
1214 void ldlm_put_ref(int force)
1216 down(&ldlm_ref_sem);
1217 if (ldlm_refcount == 1) {
1218 int rc = ldlm_cleanup(force);
1220 CERROR("ldlm_cleanup failed: %d\n", rc);
1231 static int ldlm_setup(void)
1233 struct ldlm_bl_pool *blp;
1240 if (ldlm_state != NULL)
1243 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
1244 if (ldlm_state == NULL)
1248 rc = ldlm_proc_setup();
1253 ldlm_state->ldlm_cb_service =
1254 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1255 LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1256 ldlm_callback_handler, "ldlm_cbd",
1259 if (!ldlm_state->ldlm_cb_service) {
1260 CERROR("failed to start service\n");
1261 GOTO(out_proc, rc = -ENOMEM);
1264 ldlm_state->ldlm_cancel_service =
1265 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1266 LDLM_CANCEL_REQUEST_PORTAL,
1267 LDLM_CANCEL_REPLY_PORTAL,
1268 ldlm_cancel_handler, "ldlm_canceld",
1271 if (!ldlm_state->ldlm_cancel_service) {
1272 CERROR("failed to start service\n");
1273 GOTO(out_proc, rc = -ENOMEM);
1276 OBD_ALLOC(blp, sizeof(*blp));
1278 GOTO(out_proc, rc = -ENOMEM);
1279 ldlm_state->ldlm_bl_pool = blp;
1281 atomic_set(&blp->blp_num_threads, 0);
1282 init_waitqueue_head(&blp->blp_waitq);
1283 spin_lock_init(&blp->blp_lock);
1285 INIT_LIST_HEAD(&blp->blp_list);
1288 for (i = 0; i < LDLM_NUM_THREADS; i++) {
1289 struct ldlm_bl_thread_data bltd = {
1293 init_completion(&blp->blp_comp);
1294 rc = kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1296 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1298 GOTO(out_thread, rc);
1300 wait_for_completion(&blp->blp_comp);
1303 rc = ptlrpc_start_n_threads(NULL, ldlm_state->ldlm_cancel_service,
1304 LDLM_NUM_THREADS, "ldlm_cn");
1307 GOTO(out_thread, rc);
1310 rc = ptlrpc_start_n_threads(NULL, ldlm_state->ldlm_cb_service,
1311 LDLM_NUM_THREADS, "ldlm_cb");
1314 GOTO(out_thread, rc);
1317 INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1318 spin_lock_init(&expired_lock_thread.elt_lock);
1319 expired_lock_thread.elt_state = ELT_STOPPED;
1320 init_waitqueue_head(&expired_lock_thread.elt_waitq);
1322 rc = kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FS);
1324 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1325 GOTO(out_thread, rc);
1328 wait_event(expired_lock_thread.elt_waitq,
1329 expired_lock_thread.elt_state == ELT_READY);
1331 INIT_LIST_HEAD(&waiting_locks_list);
1332 spin_lock_init(&waiting_locks_spinlock);
1333 waiting_locks_timer.function = waiting_locks_callback;
1334 waiting_locks_timer.data = 0;
1335 init_timer(&waiting_locks_timer);
1342 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1343 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1348 ldlm_proc_cleanup();
1351 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1356 static int ldlm_cleanup(int force)
1359 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1363 if (!list_empty(&ldlm_namespace_list)) {
1364 CERROR("ldlm still has namespaces; clean these up first.\n");
1365 ldlm_dump_all_namespaces();
1370 while (atomic_read(&blp->blp_num_threads) > 0) {
1371 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1373 init_completion(&blp->blp_comp);
1375 spin_lock(&blp->blp_lock);
1376 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1377 wake_up(&blp->blp_waitq);
1378 spin_unlock(&blp->blp_lock);
1380 wait_for_completion(&blp->blp_comp);
1382 OBD_FREE(blp, sizeof(*blp));
1384 ptlrpc_stop_all_threads(ldlm_state->ldlm_cb_service);
1385 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1386 ptlrpc_stop_all_threads(ldlm_state->ldlm_cancel_service);
1387 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1388 ldlm_proc_cleanup();
1390 expired_lock_thread.elt_state = ELT_TERMINATE;
1391 wake_up(&expired_lock_thread.elt_waitq);
1392 wait_event(expired_lock_thread.elt_waitq,
1393 expired_lock_thread.elt_state == ELT_STOPPED);
1397 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1403 int __init ldlm_init(void)
1405 ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1406 sizeof(struct ldlm_resource), 0,
1407 SLAB_HWCACHE_ALIGN, NULL, NULL);
1408 if (ldlm_resource_slab == NULL)
1411 ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1412 sizeof(struct ldlm_lock), 0,
1413 SLAB_HWCACHE_ALIGN, NULL, NULL);
1414 if (ldlm_lock_slab == NULL) {
1415 kmem_cache_destroy(ldlm_resource_slab);
1419 l_lock_init(&ldlm_handle_lock);
1424 void __exit ldlm_exit(void)
1426 if ( ldlm_refcount )
1427 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1428 if (kmem_cache_destroy(ldlm_resource_slab) != 0)
1429 CERROR("couldn't free ldlm resource slab\n");
1430 if (kmem_cache_destroy(ldlm_lock_slab) != 0)
1431 CERROR("couldn't free ldlm lock slab\n");
1435 EXPORT_SYMBOL(ldlm_flock_completion_ast);
1438 EXPORT_SYMBOL(ldlm_extent_shift_kms);
1441 EXPORT_SYMBOL(ldlm_get_processing_policy);
1442 EXPORT_SYMBOL(ldlm_lock2desc);
1443 EXPORT_SYMBOL(ldlm_register_intent);
1444 EXPORT_SYMBOL(ldlm_lockname);
1445 EXPORT_SYMBOL(ldlm_typename);
1446 EXPORT_SYMBOL(ldlm_lock2handle);
1447 EXPORT_SYMBOL(__ldlm_handle2lock);
1448 EXPORT_SYMBOL(ldlm_lock_get);
1449 EXPORT_SYMBOL(ldlm_lock_put);
1450 EXPORT_SYMBOL(ldlm_lock_match);
1451 EXPORT_SYMBOL(ldlm_lock_cancel);
1452 EXPORT_SYMBOL(ldlm_lock_addref);
1453 EXPORT_SYMBOL(ldlm_lock_decref);
1454 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1455 EXPORT_SYMBOL(ldlm_lock_change_resource);
1456 EXPORT_SYMBOL(ldlm_lock_set_data);
1457 EXPORT_SYMBOL(ldlm_it2str);
1458 EXPORT_SYMBOL(ldlm_lock_dump);
1459 EXPORT_SYMBOL(ldlm_lock_dump_handle);
1460 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
1461 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1462 EXPORT_SYMBOL(ldlm_lock_allow_match);
1464 /* ldlm_request.c */
1465 EXPORT_SYMBOL(ldlm_completion_ast);
1466 EXPORT_SYMBOL(ldlm_expired_completion_wait);
1467 EXPORT_SYMBOL(ldlm_cli_convert);
1468 EXPORT_SYMBOL(ldlm_cli_enqueue);
1469 EXPORT_SYMBOL(ldlm_cli_cancel);
1470 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1471 EXPORT_SYMBOL(ldlm_replay_locks);
1472 EXPORT_SYMBOL(ldlm_resource_foreach);
1473 EXPORT_SYMBOL(ldlm_namespace_foreach);
1474 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
1475 EXPORT_SYMBOL(ldlm_change_cbdata);
1478 EXPORT_SYMBOL(ldlm_server_blocking_ast);
1479 EXPORT_SYMBOL(ldlm_server_completion_ast);
1480 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
1481 EXPORT_SYMBOL(ldlm_handle_enqueue);
1482 EXPORT_SYMBOL(ldlm_handle_cancel);
1483 EXPORT_SYMBOL(ldlm_handle_convert);
1484 EXPORT_SYMBOL(ldlm_del_waiting_lock);
1485 EXPORT_SYMBOL(ldlm_get_ref);
1486 EXPORT_SYMBOL(ldlm_put_ref);
1490 EXPORT_SYMBOL(ldlm_test);
1491 EXPORT_SYMBOL(ldlm_regression_start);
1492 EXPORT_SYMBOL(ldlm_regression_stop);
1495 /* ldlm_resource.c */
1496 EXPORT_SYMBOL(ldlm_namespace_new);
1497 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1498 EXPORT_SYMBOL(ldlm_namespace_free);
1499 EXPORT_SYMBOL(ldlm_namespace_dump);
1500 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1501 EXPORT_SYMBOL(ldlm_resource_get);
1502 EXPORT_SYMBOL(ldlm_resource_putref);
1505 EXPORT_SYMBOL(l_lock);
1506 EXPORT_SYMBOL(l_unlock);
1509 EXPORT_SYMBOL(client_obd_setup);
1510 EXPORT_SYMBOL(client_obd_cleanup);
1511 EXPORT_SYMBOL(client_connect_import);
1512 EXPORT_SYMBOL(client_disconnect_export);
1513 EXPORT_SYMBOL(target_abort_recovery);
1514 EXPORT_SYMBOL(target_handle_connect);
1515 EXPORT_SYMBOL(target_destroy_export);
1516 EXPORT_SYMBOL(target_cancel_recovery_timer);
1517 EXPORT_SYMBOL(target_send_reply);
1518 EXPORT_SYMBOL(target_queue_recovery_request);
1519 EXPORT_SYMBOL(target_handle_ping);
1520 EXPORT_SYMBOL(target_handle_disconnect);
1521 EXPORT_SYMBOL(target_queue_final_reply);