1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 # define EXPORT_SYMTAB
27 #define DEBUG_SUBSYSTEM S_LDLM
30 # include <linux/module.h>
31 # include <linux/slab.h>
32 # include <linux/init.h>
33 # include <linux/wait.h>
35 # include <liblustre.h>
38 #include <linux/lustre_dlm.h>
39 #include <linux/obd_class.h>
40 #include <portals/list.h>
41 #include "ldlm_internal.h"
43 extern kmem_cache_t *ldlm_resource_slab;
44 extern kmem_cache_t *ldlm_lock_slab;
45 extern struct lustre_lock ldlm_handle_lock;
46 extern struct list_head ldlm_namespace_list;
47 extern int (*mds_reint_p)(int offset, struct ptlrpc_request *req);
48 extern int (*mds_getattr_name_p)(int offset, struct ptlrpc_request *req);
50 static DECLARE_MUTEX(ldlm_ref_sem);
51 static int ldlm_refcount = 0;
55 static struct ldlm_state *ldlm_state;
57 inline unsigned long round_timeout(unsigned long timeout)
59 return ((timeout / HZ) + 1) * HZ;
63 /* XXX should this be per-ldlm? */
64 static struct list_head waiting_locks_list;
65 static spinlock_t waiting_locks_spinlock;
66 static struct timer_list waiting_locks_timer;
68 static struct expired_lock_thread {
69 wait_queue_head_t elt_waitq;
71 struct list_head elt_expired_locks;
73 } expired_lock_thread;
78 #define ELT_TERMINATE 2
82 struct list_head blp_list;
83 wait_queue_head_t blp_waitq;
84 atomic_t blp_num_threads;
85 struct completion blp_comp;
88 struct ldlm_bl_work_item {
89 struct list_head blwi_entry;
90 struct ldlm_namespace *blwi_ns;
91 struct ldlm_lock_desc blwi_ld;
92 struct ldlm_lock *blwi_lock;
97 static inline int have_expired_locks(void)
101 spin_lock_bh(&expired_lock_thread.elt_lock);
102 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
103 spin_unlock_bh(&expired_lock_thread.elt_lock);
108 static int expired_lock_main(void *arg)
110 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
111 struct l_wait_info lwi = { 0 };
116 kportal_daemonize("ldlm_elt");
118 SIGNAL_MASK_LOCK(current, flags);
119 sigfillset(¤t->blocked);
121 SIGNAL_MASK_UNLOCK(current, flags);
125 expired_lock_thread.elt_state = ELT_READY;
126 wake_up(&expired_lock_thread.elt_waitq);
129 l_wait_event(expired_lock_thread.elt_waitq,
130 have_expired_locks() ||
131 expired_lock_thread.elt_state == ELT_TERMINATE,
134 spin_lock_bh(&expired_lock_thread.elt_lock);
135 while (!list_empty(expired)) {
136 struct obd_export *export;
137 struct ldlm_lock *lock;
139 lock = list_entry(expired->next, struct ldlm_lock,
141 if ((void *)lock < LP_POISON + PAGE_SIZE &&
142 (void *)lock >= LP_POISON) {
143 CERROR("free lock on elt list %p\n", lock);
146 list_del_init(&lock->l_pending_chain);
147 if ((void *)lock->l_export < LP_POISON + PAGE_SIZE &&
148 (void *)lock->l_export >= LP_POISON + PAGE_SIZE) {
149 CERROR("lock with free export on elt list %p\n",
151 lock->l_export = NULL;
152 LDLM_ERROR(lock, "free export\n");
155 export = class_export_get(lock->l_export);
156 spin_unlock_bh(&expired_lock_thread.elt_lock);
158 ptlrpc_fail_export(export);
159 class_export_put(export);
160 spin_lock_bh(&expired_lock_thread.elt_lock);
162 spin_unlock_bh(&expired_lock_thread.elt_lock);
164 if (expired_lock_thread.elt_state == ELT_TERMINATE)
168 expired_lock_thread.elt_state = ELT_STOPPED;
169 wake_up(&expired_lock_thread.elt_waitq);
173 static void waiting_locks_callback(unsigned long unused)
175 struct ldlm_lock *lock;
176 char str[PTL_NALFMT_SIZE];
178 spin_lock_bh(&waiting_locks_spinlock);
179 while (!list_empty(&waiting_locks_list)) {
180 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
183 if ((lock->l_callback_timeout > jiffies) ||
184 (lock->l_req_mode == LCK_GROUP))
187 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
189 lock->l_export->exp_client_uuid.uuid,
190 lock->l_export->exp_connection->c_remote_uuid.uuid,
191 ptlrpc_peernid2str(&lock->l_export->exp_connection->c_peer, str));
193 spin_lock_bh(&expired_lock_thread.elt_lock);
194 list_del(&lock->l_pending_chain);
195 list_add(&lock->l_pending_chain,
196 &expired_lock_thread.elt_expired_locks);
197 spin_unlock_bh(&expired_lock_thread.elt_lock);
198 wake_up(&expired_lock_thread.elt_waitq);
202 * Make sure the timer will fire again if we have any locks
205 if (!list_empty(&waiting_locks_list)) {
206 unsigned long timeout_rounded;
207 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
209 timeout_rounded = round_timeout(lock->l_callback_timeout);
210 mod_timer(&waiting_locks_timer, timeout_rounded);
212 spin_unlock_bh(&waiting_locks_spinlock);
216 * Indicate that we're waiting for a client to call us back cancelling a given
217 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
218 * timer to fire appropriately. (We round up to the next second, to avoid
219 * floods of timer firings during periods of high lock contention and traffic).
221 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
223 unsigned long timeout_rounded;
225 spin_lock_bh(&waiting_locks_spinlock);
226 if (!list_empty(&lock->l_pending_chain)) {
227 LDLM_DEBUG(lock, "not re-adding to wait list");
228 spin_unlock_bh(&waiting_locks_spinlock);
231 LDLM_DEBUG(lock, "adding to wait list");
233 lock->l_callback_timeout = jiffies + (obd_timeout * HZ / 2);
235 timeout_rounded = round_timeout(lock->l_callback_timeout);
237 if (timeout_rounded < waiting_locks_timer.expires ||
238 !timer_pending(&waiting_locks_timer)) {
239 mod_timer(&waiting_locks_timer, timeout_rounded);
241 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
242 spin_unlock_bh(&waiting_locks_spinlock);
247 * Remove a lock from the pending list, likely because it had its cancellation
248 * callback arrive without incident. This adjusts the lock-timeout timer if
249 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
251 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
253 struct list_head *list_next;
255 if (lock->l_export == NULL) {
256 /* We don't have a "waiting locks list" on clients. */
257 LDLM_DEBUG(lock, "client lock: no-op");
261 spin_lock_bh(&waiting_locks_spinlock);
263 if (list_empty(&lock->l_pending_chain)) {
264 spin_unlock_bh(&waiting_locks_spinlock);
265 LDLM_DEBUG(lock, "wasn't waiting");
269 list_next = lock->l_pending_chain.next;
270 if (lock->l_pending_chain.prev == &waiting_locks_list) {
271 /* Removing the head of the list, adjust timer. */
272 if (list_next == &waiting_locks_list) {
273 /* No more, just cancel. */
274 del_timer(&waiting_locks_timer);
276 struct ldlm_lock *next;
277 next = list_entry(list_next, struct ldlm_lock,
279 mod_timer(&waiting_locks_timer,
280 round_timeout(next->l_callback_timeout));
284 spin_lock_bh(&expired_lock_thread.elt_lock);
285 list_del_init(&lock->l_pending_chain);
286 spin_unlock_bh(&expired_lock_thread.elt_lock);
288 spin_unlock_bh(&waiting_locks_spinlock);
289 LDLM_DEBUG(lock, "removed");
293 #else /* !__KERNEL__ */
295 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
300 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
305 #endif /* __KERNEL__ */
307 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,const char *ast_type)
309 struct ptlrpc_connection *conn = lock->l_export->exp_connection;
310 char str[PTL_NALFMT_SIZE];
312 LDLM_ERROR(lock, "%s AST failed (%d): evicting client %s@%s NID "LPX64
313 " (%s)", ast_type, rc, lock->l_export->exp_client_uuid.uuid,
314 conn->c_remote_uuid.uuid, conn->c_peer.peer_id.nid,
315 ptlrpc_peernid2str(&conn->c_peer, str));
316 ptlrpc_fail_export(lock->l_export);
319 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
320 struct ptlrpc_request *req, int rc,
321 const char *ast_type)
323 struct ptlrpc_peer *peer = &req->rq_import->imp_connection->c_peer;
324 char str[PTL_NALFMT_SIZE];
326 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
327 LASSERT(lock->l_export);
328 if (lock->l_export->exp_libclient) {
329 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
330 " timeout, just cancelling lock", ast_type,
331 ptlrpc_peernid2str(peer, str));
332 ldlm_lock_cancel(lock);
335 ldlm_del_waiting_lock(lock);
336 ldlm_failed_ast(lock, rc, ast_type);
340 LDLM_DEBUG(lock, "client (nid %s) returned %d"
341 " from %s AST - normal race",
342 ptlrpc_peernid2str(peer, str),
343 req->rq_repmsg->status, ast_type);
345 LDLM_ERROR(lock, "client (nid %s) returned %d "
346 "from %s AST", ptlrpc_peernid2str(peer, str),
347 (req->rq_repmsg != NULL) ?
348 req->rq_repmsg->status : 0, ast_type);
349 ldlm_lock_cancel(lock);
350 /* Server-side AST functions are called from ldlm_reprocess_all,
351 * which needs to be told to please restart its reprocessing. */
358 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
359 struct ldlm_lock_desc *desc,
360 void *data, int flag)
362 struct ldlm_request *body;
363 struct ptlrpc_request *req;
364 int rc = 0, size = sizeof(*body);
367 if (flag == LDLM_CB_CANCELING) {
368 /* Don't need to do anything here. */
374 l_lock(&lock->l_resource->lr_namespace->ns_lock);
375 if (lock->l_granted_mode != lock->l_req_mode) {
376 /* this blocking AST will be communicated as part of the
377 * completion AST instead */
378 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
379 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST"); RETURN(0);
382 if (lock->l_destroyed) {
383 /* What's the point? */
384 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
389 if (LTIME_S(CURRENT_TIME) - lock->l_export->exp_last_request_time > 30){
390 ldlm_failed_ast(lock, -ETIMEDOUT, "Not-attempted blocking");
391 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
396 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
397 LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK,
400 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
404 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
405 memcpy(&body->lock_handle1, &lock->l_remote_handle,
406 sizeof(body->lock_handle1));
407 memcpy(&body->lock_desc, desc, sizeof(*desc));
408 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
410 LDLM_DEBUG(lock, "server preparing blocking AST");
411 req->rq_replen = lustre_msg_size(0, NULL);
413 if (lock->l_granted_mode == lock->l_req_mode)
414 ldlm_add_waiting_lock(lock);
415 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
417 req->rq_send_state = LUSTRE_IMP_FULL;
418 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
419 rc = ptlrpc_queue_wait(req);
421 rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
423 ptlrpc_req_finished(req);
428 /* XXX copied from ptlrpc/service.c */
429 static long timeval_sub(struct timeval *large, struct timeval *small)
431 return (large->tv_sec - small->tv_sec) * 1000000 +
432 (large->tv_usec - small->tv_usec);
435 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
437 struct ldlm_request *body;
438 struct ptlrpc_request *req;
439 struct timeval granted_time;
440 long total_enqueue_wait;
441 int rc = 0, size[2] = {sizeof(*body)}, buffers = 1;
444 LASSERT(lock != NULL);
446 do_gettimeofday(&granted_time);
447 total_enqueue_wait = timeval_sub(&granted_time, &lock->l_enqueued_time);
449 if (total_enqueue_wait / 1000000 > obd_timeout)
450 LDLM_ERROR(lock, "enqueue wait took %ldus", total_enqueue_wait);
452 if (lock->l_resource->lr_lvb_len) {
454 size[1] = lock->l_resource->lr_lvb_len;
457 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
458 LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK,
459 buffers, size, NULL);
463 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
464 memcpy(&body->lock_handle1, &lock->l_remote_handle,
465 sizeof(body->lock_handle1));
466 body->lock_flags = flags;
467 ldlm_lock2desc(lock, &body->lock_desc);
470 void *lvb = lustre_msg_buf(req->rq_reqmsg, 1,
471 lock->l_resource->lr_lvb_len);
472 memcpy(lvb, lock->l_resource->lr_lvb_data,
473 lock->l_resource->lr_lvb_len);
476 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
478 req->rq_replen = lustre_msg_size(0, NULL);
480 req->rq_send_state = LUSTRE_IMP_FULL;
481 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
483 /* We only send real blocking ASTs after the lock is granted */
484 l_lock(&lock->l_resource->lr_namespace->ns_lock);
485 if (lock->l_flags & LDLM_FL_AST_SENT) {
486 body->lock_flags |= LDLM_FL_AST_SENT;
487 ldlm_add_waiting_lock(lock); /* start the lock-timeout clock */
489 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
491 rc = ptlrpc_queue_wait(req);
493 rc = ldlm_handle_ast_error(lock, req, rc, "completion");
495 ptlrpc_req_finished(req);
500 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
502 struct ldlm_resource *res = lock->l_resource;
503 struct ldlm_request *body;
504 struct ptlrpc_request *req;
505 int rc = 0, size = sizeof(*body);
508 LASSERT(lock != NULL);
510 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
511 LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK,
516 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
517 memcpy(&body->lock_handle1, &lock->l_remote_handle,
518 sizeof(body->lock_handle1));
519 ldlm_lock2desc(lock, &body->lock_desc);
521 size = lock->l_resource->lr_lvb_len;
522 req->rq_replen = lustre_msg_size(1, &size);
524 req->rq_send_state = LUSTRE_IMP_FULL;
525 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
527 rc = ptlrpc_queue_wait(req);
528 if (rc == -ELDLM_NO_LOCK_DATA)
529 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
531 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
533 rc = res->lr_namespace->ns_lvbo->lvbo_update
534 (res, req->rq_repmsg, 0, 1);
535 ptlrpc_req_finished(req);
539 int ldlm_handle_enqueue(struct ptlrpc_request *req,
540 ldlm_completion_callback completion_callback,
541 ldlm_blocking_callback blocking_callback,
542 ldlm_glimpse_callback glimpse_callback)
544 struct obd_device *obddev = req->rq_export->exp_obd;
545 struct ldlm_reply *dlm_rep;
546 struct ldlm_request *dlm_req;
547 int rc = 0, size[2] = {sizeof(*dlm_rep)};
549 ldlm_error_t err = ELDLM_OK;
550 struct ldlm_lock *lock = NULL;
554 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
556 dlm_req = lustre_swab_reqbuf (req, MDS_REQ_INTENT_LOCKREQ_OFF,
558 lustre_swab_ldlm_request);
559 if (dlm_req == NULL) {
560 CERROR ("Can't unpack dlm_req\n");
561 GOTO(out, rc = -EFAULT);
564 flags = dlm_req->lock_flags;
566 /* The lock's callback data might be set in the policy function */
567 lock = ldlm_lock_create(obddev->obd_namespace, &dlm_req->lock_handle2,
568 dlm_req->lock_desc.l_resource.lr_name,
569 dlm_req->lock_desc.l_resource.lr_type,
570 dlm_req->lock_desc.l_req_mode,
571 blocking_callback, completion_callback,
572 glimpse_callback, NULL, 0);
574 GOTO(out, rc = -ENOMEM);
576 do_gettimeofday(&lock->l_enqueued_time);
577 memcpy(&lock->l_remote_handle, &dlm_req->lock_handle1,
578 sizeof(lock->l_remote_handle));
579 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
581 LASSERT(req->rq_export);
582 lock->l_export = class_export_get(req->rq_export);
583 l_lock(&lock->l_resource->lr_namespace->ns_lock);
584 list_add(&lock->l_export_chain,
585 &lock->l_export->exp_ldlm_data.led_held_locks);
586 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
588 if (flags & LDLM_FL_HAS_INTENT) {
589 /* In this case, the reply buffer is allocated deep in
590 * local_lock_enqueue by the policy function. */
594 if (lock->l_resource->lr_lvb_len) {
595 size[1] = lock->l_resource->lr_lvb_len;
599 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
600 GOTO(out, rc = -ENOMEM);
602 rc = lustre_pack_reply(req, buffers, size, NULL);
607 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
608 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
609 sizeof(ldlm_policy_data_t));
610 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
611 memcpy(&lock->l_req_extent, &lock->l_policy_data.l_extent,
612 sizeof(lock->l_req_extent));
614 err = ldlm_lock_enqueue(obddev->obd_namespace, &lock, cookie, &flags);
618 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
619 dlm_rep->lock_flags = flags;
621 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
622 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
624 /* We never send a blocking AST until the lock is granted, but
625 * we can tell it right now */
626 l_lock(&lock->l_resource->lr_namespace->ns_lock);
627 if (lock->l_flags & LDLM_FL_AST_SENT) {
628 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
629 if (lock->l_granted_mode == lock->l_req_mode)
630 ldlm_add_waiting_lock(lock);
632 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
636 req->rq_status = err;
637 if (req->rq_reply_state == NULL) {
638 err = lustre_pack_reply(req, 0, NULL, NULL);
644 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
645 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
647 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
648 "(err=%d, rc=%d)", err, rc);
650 if (lock->l_resource->lr_lvb_len > 0 && rc == 0) {
651 void *lvb = lustre_msg_buf(req->rq_repmsg, 1,
652 lock->l_resource->lr_lvb_len);
653 LASSERT(lvb != NULL);
654 memcpy(lvb, lock->l_resource->lr_lvb_data,
655 lock->l_resource->lr_lvb_len);
658 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
659 ldlm_reprocess_all(lock->l_resource);
662 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
668 int ldlm_handle_convert(struct ptlrpc_request *req)
670 struct ldlm_request *dlm_req;
671 struct ldlm_reply *dlm_rep;
672 struct ldlm_lock *lock;
673 int rc, size = sizeof(*dlm_rep);
676 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
677 lustre_swab_ldlm_request);
678 if (dlm_req == NULL) {
679 CERROR ("Can't unpack dlm_req\n");
683 rc = lustre_pack_reply(req, 1, &size, NULL);
685 CERROR("out of memory\n");
688 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
689 dlm_rep->lock_flags = dlm_req->lock_flags;
691 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
693 req->rq_status = EINVAL;
695 LDLM_DEBUG(lock, "server-side convert handler START");
696 ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
697 &dlm_rep->lock_flags);
698 if (ldlm_del_waiting_lock(lock))
699 CDEBUG(D_DLMTRACE, "converted waiting lock %p\n", lock);
704 ldlm_reprocess_all(lock->l_resource);
705 LDLM_DEBUG(lock, "server-side convert handler END");
708 LDLM_DEBUG_NOLOCK("server-side convert handler END");
713 int ldlm_handle_cancel(struct ptlrpc_request *req)
715 struct ldlm_request *dlm_req;
716 struct ldlm_lock *lock;
717 struct ldlm_resource *res;
718 char str[PTL_NALFMT_SIZE];
722 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
723 lustre_swab_ldlm_request);
724 if (dlm_req == NULL) {
725 CERROR("bad request buffer for cancel\n");
729 rc = lustre_pack_reply(req, 0, NULL, NULL);
731 CERROR("out of memory\n");
735 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
737 CERROR("received cancel for unknown lock cookie "LPX64
738 " from client %s nid %s\n",
739 dlm_req->lock_handle1.cookie,
740 req->rq_export->exp_client_uuid.uuid,
741 ptlrpc_peernid2str(&req->rq_peer, str));
742 LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock "
744 dlm_req->lock_handle1.cookie);
745 req->rq_status = ESTALE;
747 LDLM_DEBUG(lock, "server-side cancel handler START");
748 res = lock->l_resource;
749 if (res && res->lr_namespace->ns_lvbo &&
750 res->lr_namespace->ns_lvbo->lvbo_update) {
751 (void)res->lr_namespace->ns_lvbo->lvbo_update
753 //(res, req->rq_reqmsg, 1);
756 ldlm_lock_cancel(lock);
757 if (ldlm_del_waiting_lock(lock))
758 CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
762 if (ptlrpc_reply(req) != 0)
766 ldlm_reprocess_all(lock->l_resource);
767 LDLM_DEBUG(lock, "server-side cancel handler END");
774 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
775 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
780 l_lock(&ns->ns_lock);
781 LDLM_DEBUG(lock, "client blocking AST callback handler START");
783 lock->l_flags |= LDLM_FL_CBPENDING;
784 do_ast = (!lock->l_readers && !lock->l_writers);
787 LDLM_DEBUG(lock, "already unused, calling "
788 "callback (%p)", lock->l_blocking_ast);
789 if (lock->l_blocking_ast != NULL) {
790 l_unlock(&ns->ns_lock);
791 l_check_no_ns_lock(ns);
792 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
794 l_lock(&ns->ns_lock);
797 LDLM_DEBUG(lock, "Lock still has references, will be"
801 LDLM_DEBUG(lock, "client blocking callback handler END");
802 l_unlock(&ns->ns_lock);
807 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
808 struct ldlm_namespace *ns,
809 struct ldlm_request *dlm_req,
810 struct ldlm_lock *lock)
815 l_lock(&ns->ns_lock);
816 LDLM_DEBUG(lock, "client completion callback handler START");
818 /* If we receive the completion AST before the actual enqueue returned,
819 * then we might need to switch lock modes, resources, or extents. */
820 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
821 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
822 LDLM_DEBUG(lock, "completion AST, new lock mode");
825 if (lock->l_resource->lr_type != LDLM_PLAIN) {
826 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
827 sizeof(lock->l_policy_data));
828 LDLM_DEBUG(lock, "completion AST, new policy data");
831 ldlm_resource_unlink_lock(lock);
832 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
833 &lock->l_resource->lr_name,
834 sizeof(lock->l_resource->lr_name)) != 0) {
835 ldlm_lock_change_resource(ns, lock,
836 dlm_req->lock_desc.l_resource.lr_name);
837 LDLM_DEBUG(lock, "completion AST, new resource");
840 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
841 lock->l_flags |= LDLM_FL_CBPENDING;
842 LDLM_DEBUG(lock, "completion AST includes blocking AST");
845 if (lock->l_lvb_len) {
847 lvb = lustre_swab_reqbuf(req, 1, lock->l_lvb_len,
848 lock->l_lvb_swabber);
850 LDLM_ERROR(lock, "completion AST did not contain "
853 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
857 lock->l_resource->lr_tmp = &ast_list;
858 ldlm_grant_lock(lock, req, sizeof(*req), 1);
859 lock->l_resource->lr_tmp = NULL;
860 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
861 l_unlock(&ns->ns_lock);
864 ldlm_run_ast_work(ns, &ast_list);
866 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
871 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
872 struct ldlm_namespace *ns,
873 struct ldlm_request *dlm_req,
874 struct ldlm_lock *lock)
879 l_lock(&ns->ns_lock);
880 LDLM_DEBUG(lock, "client glimpse AST callback handler");
882 if (lock->l_glimpse_ast != NULL) {
883 l_unlock(&ns->ns_lock);
884 l_check_no_ns_lock(ns);
885 rc = lock->l_glimpse_ast(lock, req);
886 l_lock(&ns->ns_lock);
889 if (req->rq_repmsg != NULL) {
896 if (lock->l_granted_mode == LCK_PW &&
897 !lock->l_readers && !lock->l_writers &&
898 time_after(jiffies, lock->l_last_used + 10 * HZ)) {
900 ldlm_bl_to_thread(ns, NULL, lock);
901 l_unlock(&ns->ns_lock);
903 l_unlock(&ns->ns_lock);
904 ldlm_handle_bl_callback(ns, NULL, lock);
910 l_unlock(&ns->ns_lock);
915 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
918 if (req->rq_reply_state == NULL) {
919 rc = lustre_pack_reply(req, 0, NULL, NULL);
923 return ptlrpc_reply(req);
926 int ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
927 struct ldlm_lock *lock)
930 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
931 struct ldlm_bl_work_item *blwi;
934 OBD_ALLOC(blwi, sizeof(*blwi));
941 blwi->blwi_lock = lock;
943 spin_lock(&blp->blp_lock);
944 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
945 wake_up(&blp->blp_waitq);
946 spin_unlock(&blp->blp_lock);
954 static int ldlm_msg_check_version(struct lustre_msg *msg)
962 case LDLM_BL_CALLBACK:
963 case LDLM_CP_CALLBACK:
964 case LDLM_GL_CALLBACK:
965 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
967 CERROR("bad opc %u version %08x, expecting %08x\n",
968 msg->opc, msg->version, LUSTRE_DLM_VERSION);
971 case LLOG_ORIGIN_HANDLE_OPEN:
972 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
973 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
974 case LLOG_ORIGIN_HANDLE_READ_HEADER:
975 case LLOG_ORIGIN_HANDLE_CLOSE:
977 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
979 CERROR("bad opc %u version %08x, expecting %08x\n",
980 msg->opc, msg->version, LUSTRE_LOG_VERSION);
983 CERROR("LDLM unknown opcode %d\n", msg->opc);
991 static int ldlm_callback_handler(struct ptlrpc_request *req)
993 struct ldlm_namespace *ns;
994 struct ldlm_request *dlm_req;
995 struct ldlm_lock *lock;
996 char str[PTL_NALFMT_SIZE];
1000 rc = ldlm_msg_check_version(req->rq_reqmsg);
1002 CERROR("LDLM_CB drop mal-formed request\n");
1006 /* Requests arrive in sender's byte order. The ptlrpc service
1007 * handler has already checked and, if necessary, byte-swapped the
1008 * incoming request message body, but I am responsible for the
1009 * message buffers. */
1011 if (req->rq_export == NULL) {
1012 struct ldlm_request *dlm_req;
1014 CDEBUG(D_RPCTRACE, "operation %d from nid %s with bad "
1015 "export cookie "LPX64" (ptl req %d/rep %d); this is "
1016 "normal if this node rebooted with a lock held\n",
1017 req->rq_reqmsg->opc,
1018 ptlrpc_peernid2str(&req->rq_peer, str),
1019 req->rq_reqmsg->handle.cookie,
1020 req->rq_request_portal, req->rq_reply_portal);
1022 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
1023 lustre_swab_ldlm_request);
1024 if (dlm_req != NULL)
1025 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
1026 dlm_req->lock_handle1.cookie);
1028 ldlm_callback_reply(req, -ENOTCONN);
1032 LASSERT(req->rq_export != NULL);
1033 LASSERT(req->rq_export->exp_obd != NULL);
1035 switch(req->rq_reqmsg->opc) {
1036 case LDLM_BL_CALLBACK:
1037 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1039 case LDLM_CP_CALLBACK:
1040 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
1042 case LDLM_GL_CALLBACK:
1043 OBD_FAIL_RETURN(OBD_FAIL_LDLM_GL_CALLBACK, 0);
1045 case OBD_LOG_CANCEL:
1046 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1047 rc = llog_origin_handle_cancel(req);
1048 ldlm_callback_reply(req, rc);
1050 case LLOG_ORIGIN_HANDLE_OPEN:
1051 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1052 rc = llog_origin_handle_open(req);
1053 ldlm_callback_reply(req, rc);
1055 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1056 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1057 rc = llog_origin_handle_next_block(req);
1058 ldlm_callback_reply(req, rc);
1060 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1061 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1062 rc = llog_origin_handle_prev_block(req);
1063 ldlm_callback_reply(req, rc);
1065 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1066 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1067 rc = llog_origin_handle_read_header(req);
1068 ldlm_callback_reply(req, rc);
1070 case LLOG_ORIGIN_HANDLE_CLOSE:
1071 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1072 rc = llog_origin_handle_close(req);
1073 ldlm_callback_reply(req, rc);
1076 CERROR("unknown opcode %u\n", req->rq_reqmsg->opc);
1077 ldlm_callback_reply(req, -EPROTO);
1081 ns = req->rq_export->exp_obd->obd_namespace;
1082 LASSERT(ns != NULL);
1084 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
1085 lustre_swab_ldlm_request);
1086 if (dlm_req == NULL) {
1087 CERROR ("can't unpack dlm_req\n");
1088 ldlm_callback_reply (req, -EPROTO);
1092 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle1);
1094 LDLM_DEBUG_NOLOCK("callback on lock "LPX64" - lock "
1095 "disappeared\n",dlm_req->lock_handle1.cookie);
1096 ldlm_callback_reply(req, -EINVAL);
1100 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1101 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1103 /* We want the ost thread to get this reply so that it can respond
1104 * to ost requests (write cache writeback) that might be triggered
1107 * But we'd also like to be able to indicate in the reply that we're
1108 * cancelling right now, because it's unused, or have an intent result
1109 * in the reply, so we might have to push the responsibility for sending
1110 * the reply down into the AST handlers, alas. */
1112 switch (req->rq_reqmsg->opc) {
1113 case LDLM_BL_CALLBACK:
1114 CDEBUG(D_INODE, "blocking ast\n");
1116 rc = ldlm_bl_to_thread(ns, &dlm_req->lock_desc, lock);
1117 ldlm_callback_reply(req, rc);
1120 ldlm_callback_reply(req, rc);
1121 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
1124 case LDLM_CP_CALLBACK:
1125 CDEBUG(D_INODE, "completion ast\n");
1126 ldlm_callback_reply(req, 0);
1127 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
1129 case LDLM_GL_CALLBACK:
1130 CDEBUG(D_INODE, "glimpse ast\n");
1131 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
1134 LBUG(); /* checked above */
1140 static int ldlm_cancel_handler(struct ptlrpc_request *req)
1145 rc = ldlm_msg_check_version(req->rq_reqmsg);
1147 CERROR("LDLM_CL drop mal-formed request\n");
1151 /* Requests arrive in sender's byte order. The ptlrpc service
1152 * handler has already checked and, if necessary, byte-swapped the
1153 * incoming request message body, but I am responsible for the
1154 * message buffers. */
1156 if (req->rq_export == NULL) {
1157 struct ldlm_request *dlm_req;
1158 CERROR("operation %d with bad export (ptl req %d/rep %d)\n",
1159 req->rq_reqmsg->opc, req->rq_request_portal,
1160 req->rq_reply_portal);
1161 CERROR("--> export cookie: "LPX64"\n",
1162 req->rq_reqmsg->handle.cookie);
1163 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
1164 lustre_swab_ldlm_request);
1165 if (dlm_req != NULL)
1166 ldlm_lock_dump_handle(D_ERROR, &dlm_req->lock_handle1);
1167 ldlm_callback_reply(req, -ENOTCONN);
1171 switch (req->rq_reqmsg->opc) {
1173 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1175 CDEBUG(D_INODE, "cancel\n");
1176 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
1177 rc = ldlm_handle_cancel(req);
1180 CERROR("invalid opcode %d\n", req->rq_reqmsg->opc);
1181 ldlm_callback_reply(req, -EINVAL);
1188 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1190 struct ldlm_bl_work_item *blwi = NULL;
1192 spin_lock(&blp->blp_lock);
1193 if (!list_empty(&blp->blp_list)) {
1194 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1196 list_del(&blwi->blwi_entry);
1198 spin_unlock(&blp->blp_lock);
1203 struct ldlm_bl_thread_data {
1205 struct ldlm_bl_pool *bltd_blp;
1208 static int ldlm_bl_thread_main(void *arg)
1210 struct ldlm_bl_thread_data *bltd = arg;
1211 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1212 unsigned long flags;
1215 /* XXX boiler-plate */
1217 char name[sizeof(current->comm)];
1218 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1220 kportal_daemonize(name);
1222 SIGNAL_MASK_LOCK(current, flags);
1223 sigfillset(¤t->blocked);
1225 SIGNAL_MASK_UNLOCK(current, flags);
1227 atomic_inc(&blp->blp_num_threads);
1228 complete(&blp->blp_comp);
1231 struct l_wait_info lwi = { 0 };
1232 struct ldlm_bl_work_item *blwi = NULL;
1234 l_wait_event_exclusive(blp->blp_waitq,
1235 (blwi = ldlm_bl_get_work(blp)) != NULL,
1238 if (blwi->blwi_ns == NULL)
1241 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1243 OBD_FREE(blwi, sizeof(*blwi));
1246 atomic_dec(&blp->blp_num_threads);
1247 complete(&blp->blp_comp);
1253 static int ldlm_setup(void);
1254 static int ldlm_cleanup(int force);
1256 int ldlm_get_ref(void)
1259 down(&ldlm_ref_sem);
1260 if (++ldlm_refcount == 1) {
1270 void ldlm_put_ref(int force)
1272 down(&ldlm_ref_sem);
1273 if (ldlm_refcount == 1) {
1274 int rc = ldlm_cleanup(force);
1276 CERROR("ldlm_cleanup failed: %d\n", rc);
1287 static int ldlm_setup(void)
1289 struct ldlm_bl_pool *blp;
1296 if (ldlm_state != NULL)
1299 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
1300 if (ldlm_state == NULL)
1304 rc = ldlm_proc_setup();
1309 ldlm_state->ldlm_cb_service =
1310 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1311 LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1312 ldlm_callback_handler, "ldlm_cbd",
1315 if (!ldlm_state->ldlm_cb_service) {
1316 CERROR("failed to start service\n");
1317 GOTO(out_proc, rc = -ENOMEM);
1320 ldlm_state->ldlm_cancel_service =
1321 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1322 LDLM_CANCEL_REQUEST_PORTAL,
1323 LDLM_CANCEL_REPLY_PORTAL,
1324 ldlm_cancel_handler, "ldlm_canceld",
1327 if (!ldlm_state->ldlm_cancel_service) {
1328 CERROR("failed to start service\n");
1329 GOTO(out_proc, rc = -ENOMEM);
1332 OBD_ALLOC(blp, sizeof(*blp));
1334 GOTO(out_proc, rc = -ENOMEM);
1335 ldlm_state->ldlm_bl_pool = blp;
1337 atomic_set(&blp->blp_num_threads, 0);
1338 init_waitqueue_head(&blp->blp_waitq);
1339 spin_lock_init(&blp->blp_lock);
1341 INIT_LIST_HEAD(&blp->blp_list);
1344 for (i = 0; i < LDLM_NUM_THREADS; i++) {
1345 struct ldlm_bl_thread_data bltd = {
1349 init_completion(&blp->blp_comp);
1350 rc = kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1352 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1353 GOTO(out_thread, rc);
1355 wait_for_completion(&blp->blp_comp);
1358 rc = ptlrpc_start_n_threads(NULL, ldlm_state->ldlm_cancel_service,
1359 LDLM_NUM_THREADS, "ldlm_cn");
1361 GOTO(out_thread, rc);
1363 rc = ptlrpc_start_n_threads(NULL, ldlm_state->ldlm_cb_service,
1364 LDLM_NUM_THREADS, "ldlm_cb");
1366 GOTO(out_thread, rc);
1368 INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1369 spin_lock_init(&expired_lock_thread.elt_lock);
1370 expired_lock_thread.elt_state = ELT_STOPPED;
1371 init_waitqueue_head(&expired_lock_thread.elt_waitq);
1373 rc = kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FS);
1375 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1376 GOTO(out_thread, rc);
1379 wait_event(expired_lock_thread.elt_waitq,
1380 expired_lock_thread.elt_state == ELT_READY);
1382 INIT_LIST_HEAD(&waiting_locks_list);
1383 spin_lock_init(&waiting_locks_spinlock);
1384 waiting_locks_timer.function = waiting_locks_callback;
1385 waiting_locks_timer.data = 0;
1386 init_timer(&waiting_locks_timer);
1393 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1394 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1399 ldlm_proc_cleanup();
1402 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1407 static int ldlm_cleanup(int force)
1410 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1414 if (!list_empty(&ldlm_namespace_list)) {
1415 CERROR("ldlm still has namespaces; clean these up first.\n");
1416 ldlm_dump_all_namespaces();
1421 while (atomic_read(&blp->blp_num_threads) > 0) {
1422 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1424 init_completion(&blp->blp_comp);
1426 spin_lock(&blp->blp_lock);
1427 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1428 wake_up(&blp->blp_waitq);
1429 spin_unlock(&blp->blp_lock);
1431 wait_for_completion(&blp->blp_comp);
1433 OBD_FREE(blp, sizeof(*blp));
1435 ptlrpc_stop_all_threads(ldlm_state->ldlm_cb_service);
1436 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1437 ptlrpc_stop_all_threads(ldlm_state->ldlm_cancel_service);
1438 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1439 ldlm_proc_cleanup();
1441 expired_lock_thread.elt_state = ELT_TERMINATE;
1442 wake_up(&expired_lock_thread.elt_waitq);
1443 wait_event(expired_lock_thread.elt_waitq,
1444 expired_lock_thread.elt_state == ELT_STOPPED);
1448 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1454 int __init ldlm_init(void)
1456 ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1457 sizeof(struct ldlm_resource), 0,
1458 SLAB_HWCACHE_ALIGN, NULL, NULL);
1459 if (ldlm_resource_slab == NULL)
1462 ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1463 sizeof(struct ldlm_lock), 0,
1464 SLAB_HWCACHE_ALIGN, NULL, NULL);
1465 if (ldlm_lock_slab == NULL) {
1466 kmem_cache_destroy(ldlm_resource_slab);
1470 l_lock_init(&ldlm_handle_lock);
1475 void __exit ldlm_exit(void)
1477 if ( ldlm_refcount )
1478 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1479 if (kmem_cache_destroy(ldlm_resource_slab) != 0)
1480 CERROR("couldn't free ldlm resource slab\n");
1481 if (kmem_cache_destroy(ldlm_lock_slab) != 0)
1482 CERROR("couldn't free ldlm lock slab\n");
1486 EXPORT_SYMBOL(ldlm_flock_completion_ast);
1489 EXPORT_SYMBOL(ldlm_extent_shift_kms);
1492 EXPORT_SYMBOL(ldlm_get_processing_policy);
1493 EXPORT_SYMBOL(ldlm_lock2desc);
1494 EXPORT_SYMBOL(ldlm_register_intent);
1495 EXPORT_SYMBOL(ldlm_lockname);
1496 EXPORT_SYMBOL(ldlm_typename);
1497 EXPORT_SYMBOL(ldlm_lock2handle);
1498 EXPORT_SYMBOL(__ldlm_handle2lock);
1499 EXPORT_SYMBOL(ldlm_lock_get);
1500 EXPORT_SYMBOL(ldlm_lock_put);
1501 EXPORT_SYMBOL(ldlm_lock_match);
1502 EXPORT_SYMBOL(ldlm_lock_cancel);
1503 EXPORT_SYMBOL(ldlm_lock_addref);
1504 EXPORT_SYMBOL(ldlm_lock_decref);
1505 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1506 EXPORT_SYMBOL(ldlm_lock_change_resource);
1507 EXPORT_SYMBOL(ldlm_lock_set_data);
1508 EXPORT_SYMBOL(ldlm_it2str);
1509 EXPORT_SYMBOL(ldlm_lock_dump);
1510 EXPORT_SYMBOL(ldlm_lock_dump_handle);
1511 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
1512 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1513 EXPORT_SYMBOL(ldlm_lock_allow_match);
1515 /* ldlm_request.c */
1516 EXPORT_SYMBOL(ldlm_completion_ast);
1517 EXPORT_SYMBOL(ldlm_expired_completion_wait);
1518 EXPORT_SYMBOL(ldlm_cli_convert);
1519 EXPORT_SYMBOL(ldlm_cli_enqueue);
1520 EXPORT_SYMBOL(ldlm_cli_cancel);
1521 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1522 EXPORT_SYMBOL(ldlm_replay_locks);
1523 EXPORT_SYMBOL(ldlm_resource_foreach);
1524 EXPORT_SYMBOL(ldlm_namespace_foreach);
1525 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
1526 EXPORT_SYMBOL(ldlm_change_cbdata);
1529 EXPORT_SYMBOL(ldlm_server_blocking_ast);
1530 EXPORT_SYMBOL(ldlm_server_completion_ast);
1531 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
1532 EXPORT_SYMBOL(ldlm_handle_enqueue);
1533 EXPORT_SYMBOL(ldlm_handle_cancel);
1534 EXPORT_SYMBOL(ldlm_handle_convert);
1535 EXPORT_SYMBOL(ldlm_del_waiting_lock);
1536 EXPORT_SYMBOL(ldlm_get_ref);
1537 EXPORT_SYMBOL(ldlm_put_ref);
1541 EXPORT_SYMBOL(ldlm_test);
1542 EXPORT_SYMBOL(ldlm_regression_start);
1543 EXPORT_SYMBOL(ldlm_regression_stop);
1546 /* ldlm_resource.c */
1547 EXPORT_SYMBOL(ldlm_namespace_new);
1548 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1549 EXPORT_SYMBOL(ldlm_namespace_free);
1550 EXPORT_SYMBOL(ldlm_namespace_dump);
1551 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1552 EXPORT_SYMBOL(ldlm_resource_get);
1553 EXPORT_SYMBOL(ldlm_resource_putref);
1556 EXPORT_SYMBOL(l_lock);
1557 EXPORT_SYMBOL(l_unlock);
1560 EXPORT_SYMBOL(client_obd_setup);
1561 EXPORT_SYMBOL(client_obd_cleanup);
1562 EXPORT_SYMBOL(client_connect_import);
1563 EXPORT_SYMBOL(client_disconnect_export);
1564 EXPORT_SYMBOL(target_start_recovery_thread);
1565 EXPORT_SYMBOL(target_stop_recovery_thread);
1566 EXPORT_SYMBOL(target_handle_connect);
1567 EXPORT_SYMBOL(target_destroy_export);
1568 EXPORT_SYMBOL(target_cancel_recovery_timer);
1569 EXPORT_SYMBOL(target_send_reply);
1570 EXPORT_SYMBOL(target_queue_recovery_request);
1571 EXPORT_SYMBOL(target_handle_ping);
1572 EXPORT_SYMBOL(target_handle_disconnect);
1573 EXPORT_SYMBOL(target_queue_final_reply);