1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 # define EXPORT_SYMTAB
27 #define DEBUG_SUBSYSTEM S_LDLM
30 # include <linux/module.h>
31 # include <linux/slab.h>
32 # include <linux/init.h>
33 # include <linux/wait.h>
35 # include <liblustre.h>
38 #include <linux/lustre_dlm.h>
39 #include <linux/obd_class.h>
40 #include <portals/list.h>
41 #include "ldlm_internal.h"
43 extern kmem_cache_t *ldlm_resource_slab;
44 extern kmem_cache_t *ldlm_lock_slab;
45 extern struct lustre_lock ldlm_handle_lock;
46 extern struct list_head ldlm_namespace_list;
47 extern int (*mds_reint_p)(int offset, struct ptlrpc_request *req);
48 extern int (*mds_getattr_name_p)(int offset, struct ptlrpc_request *req);
50 static DECLARE_MUTEX(ldlm_ref_sem);
51 static int ldlm_refcount = 0;
55 static struct ldlm_state *ldlm_state;
57 inline unsigned long round_timeout(unsigned long timeout)
59 return ((timeout / HZ) + 1) * HZ;
63 /* XXX should this be per-ldlm? */
64 static struct list_head waiting_locks_list;
65 static spinlock_t waiting_locks_spinlock;
66 static struct timer_list waiting_locks_timer;
68 static struct expired_lock_thread {
69 wait_queue_head_t elt_waitq;
71 struct list_head elt_expired_locks;
73 } expired_lock_thread;
78 #define ELT_TERMINATE 2
82 struct list_head blp_list;
83 wait_queue_head_t blp_waitq;
84 atomic_t blp_num_threads;
85 struct completion blp_comp;
88 struct ldlm_bl_work_item {
89 struct list_head blwi_entry;
90 struct ldlm_namespace *blwi_ns;
91 struct ldlm_lock_desc blwi_ld;
92 struct ldlm_lock *blwi_lock;
97 static inline int have_expired_locks(void)
101 spin_lock_bh(&expired_lock_thread.elt_lock);
102 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
103 spin_unlock_bh(&expired_lock_thread.elt_lock);
108 static int expired_lock_main(void *arg)
110 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
111 struct l_wait_info lwi = { 0 };
116 kportal_daemonize("ldlm_elt");
118 SIGNAL_MASK_LOCK(current, flags);
119 sigfillset(¤t->blocked);
121 SIGNAL_MASK_UNLOCK(current, flags);
125 expired_lock_thread.elt_state = ELT_READY;
126 wake_up(&expired_lock_thread.elt_waitq);
129 l_wait_event(expired_lock_thread.elt_waitq,
130 have_expired_locks() ||
131 expired_lock_thread.elt_state == ELT_TERMINATE,
134 spin_lock_bh(&expired_lock_thread.elt_lock);
135 while (!list_empty(expired)) {
136 struct obd_export *export;
137 struct ldlm_lock *lock;
139 lock = list_entry(expired->next, struct ldlm_lock,
141 if ((void *)lock < LP_POISON + PAGE_SIZE &&
142 (void *)lock >= LP_POISON) {
143 CERROR("free lock on elt list %p\n", lock);
146 list_del_init(&lock->l_pending_chain);
147 if ((void *)lock->l_export < LP_POISON + PAGE_SIZE &&
148 (void *)lock->l_export >= LP_POISON + PAGE_SIZE) {
149 CERROR("lock with free export on elt list %p\n",
151 lock->l_export = NULL;
152 LDLM_ERROR(lock, "free export\n");
155 export = class_export_get(lock->l_export);
156 spin_unlock_bh(&expired_lock_thread.elt_lock);
158 ptlrpc_fail_export(export);
159 class_export_put(export);
160 spin_lock_bh(&expired_lock_thread.elt_lock);
162 spin_unlock_bh(&expired_lock_thread.elt_lock);
164 if (expired_lock_thread.elt_state == ELT_TERMINATE)
168 expired_lock_thread.elt_state = ELT_STOPPED;
169 wake_up(&expired_lock_thread.elt_waitq);
173 static void waiting_locks_callback(unsigned long unused)
175 struct ldlm_lock *lock;
176 char str[PTL_NALFMT_SIZE];
178 spin_lock_bh(&waiting_locks_spinlock);
179 while (!list_empty(&waiting_locks_list)) {
180 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
183 if ((lock->l_callback_timeout > jiffies) ||
184 (lock->l_req_mode == LCK_GROUP))
187 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
188 "%s@%s nid "LPX64" (%s) ",
189 lock->l_export->exp_client_uuid.uuid,
190 lock->l_export->exp_connection->c_remote_uuid.uuid,
191 lock->l_export->exp_connection->c_peer.peer_nid,
192 portals_nid2str(lock->l_export->exp_connection->c_peer.peer_ni->pni_number,
193 lock->l_export->exp_connection->c_peer.peer_nid,
196 spin_lock_bh(&expired_lock_thread.elt_lock);
197 list_del(&lock->l_pending_chain);
198 list_add(&lock->l_pending_chain,
199 &expired_lock_thread.elt_expired_locks);
200 spin_unlock_bh(&expired_lock_thread.elt_lock);
201 wake_up(&expired_lock_thread.elt_waitq);
205 * Make sure the timer will fire again if we have any locks
208 if (!list_empty(&waiting_locks_list)) {
209 unsigned long timeout_rounded;
210 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
212 timeout_rounded = round_timeout(lock->l_callback_timeout);
213 mod_timer(&waiting_locks_timer, timeout_rounded);
215 spin_unlock_bh(&waiting_locks_spinlock);
219 * Indicate that we're waiting for a client to call us back cancelling a given
220 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
221 * timer to fire appropriately. (We round up to the next second, to avoid
222 * floods of timer firings during periods of high lock contention and traffic).
224 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
226 unsigned long timeout_rounded;
228 spin_lock_bh(&waiting_locks_spinlock);
229 if (!list_empty(&lock->l_pending_chain)) {
230 LDLM_DEBUG(lock, "not re-adding to wait list");
231 spin_unlock_bh(&waiting_locks_spinlock);
234 LDLM_DEBUG(lock, "adding to wait list");
236 lock->l_callback_timeout = jiffies + (obd_timeout * HZ / 2);
238 timeout_rounded = round_timeout(lock->l_callback_timeout);
240 if (timeout_rounded < waiting_locks_timer.expires ||
241 !timer_pending(&waiting_locks_timer)) {
242 mod_timer(&waiting_locks_timer, timeout_rounded);
244 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
245 spin_unlock_bh(&waiting_locks_spinlock);
250 * Remove a lock from the pending list, likely because it had its cancellation
251 * callback arrive without incident. This adjusts the lock-timeout timer if
252 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
254 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
256 struct list_head *list_next;
258 if (lock->l_export == NULL) {
259 /* We don't have a "waiting locks list" on clients. */
260 LDLM_DEBUG(lock, "client lock: no-op");
264 spin_lock_bh(&waiting_locks_spinlock);
266 if (list_empty(&lock->l_pending_chain)) {
267 spin_unlock_bh(&waiting_locks_spinlock);
268 LDLM_DEBUG(lock, "wasn't waiting");
272 list_next = lock->l_pending_chain.next;
273 if (lock->l_pending_chain.prev == &waiting_locks_list) {
274 /* Removing the head of the list, adjust timer. */
275 if (list_next == &waiting_locks_list) {
276 /* No more, just cancel. */
277 del_timer(&waiting_locks_timer);
279 struct ldlm_lock *next;
280 next = list_entry(list_next, struct ldlm_lock,
282 mod_timer(&waiting_locks_timer,
283 round_timeout(next->l_callback_timeout));
287 spin_lock_bh(&expired_lock_thread.elt_lock);
288 list_del_init(&lock->l_pending_chain);
289 spin_unlock_bh(&expired_lock_thread.elt_lock);
291 spin_unlock_bh(&waiting_locks_spinlock);
292 LDLM_DEBUG(lock, "removed");
296 #else /* !__KERNEL__ */
298 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
303 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
308 #endif /* __KERNEL__ */
310 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,const char *ast_type)
312 const struct ptlrpc_connection *conn = lock->l_export->exp_connection;
313 char str[PTL_NALFMT_SIZE];
315 CERROR("%s AST failed (%d) for res "LPU64"/"LPU64
316 ", mode %s: evicting client %s@%s NID "LPX64" (%s)\n",
318 lock->l_resource->lr_name.name[0],
319 lock->l_resource->lr_name.name[1],
320 ldlm_lockname[lock->l_granted_mode],
321 lock->l_export->exp_client_uuid.uuid,
322 conn->c_remote_uuid.uuid, conn->c_peer.peer_nid,
323 portals_nid2str(conn->c_peer.peer_ni->pni_number,
324 conn->c_peer.peer_nid, str));
325 ptlrpc_fail_export(lock->l_export);
328 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
329 struct ptlrpc_request *req, int rc,
330 const char *ast_type)
332 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
333 LASSERT(lock->l_export);
334 if (lock->l_export->exp_libclient) {
335 LDLM_DEBUG(lock, "%s AST to liblustre client (nid "
336 LPU64") timeout, just cancelling lock",
337 ast_type, req->rq_peer.peer_nid);
338 ldlm_lock_cancel(lock);
341 ldlm_del_waiting_lock(lock);
342 ldlm_failed_ast(lock, rc, ast_type);
346 LDLM_DEBUG(lock, "client (nid "LPU64") returned %d"
347 " from %s AST - normal race",
348 req->rq_peer.peer_nid,
349 req->rq_repmsg->status, ast_type);
351 LDLM_ERROR(lock, "client (nid "LPU64") returned %d "
352 "from %s AST", req->rq_peer.peer_nid,
353 (req->rq_repmsg != NULL) ?
354 req->rq_repmsg->status : 0, ast_type);
355 ldlm_lock_cancel(lock);
356 /* Server-side AST functions are called from ldlm_reprocess_all,
357 * which needs to be told to please restart its reprocessing. */
364 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
365 struct ldlm_lock_desc *desc,
366 void *data, int flag)
368 struct ldlm_request *body;
369 struct ptlrpc_request *req;
370 int rc = 0, size = sizeof(*body);
373 if (flag == LDLM_CB_CANCELING) {
374 /* Don't need to do anything here. */
380 l_lock(&lock->l_resource->lr_namespace->ns_lock);
381 if (lock->l_granted_mode != lock->l_req_mode) {
382 /* this blocking AST will be communicated as part of the
383 * completion AST instead */
384 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
385 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST"); RETURN(0);
388 if (lock->l_destroyed) {
389 /* What's the point? */
390 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
395 if (LTIME_S(CURRENT_TIME) - lock->l_export->exp_last_request_time > 30){
396 ldlm_failed_ast(lock, -ETIMEDOUT, "Not-attempted blocking");
397 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
402 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
403 LDLM_BL_CALLBACK, 1, &size, NULL);
405 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
409 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
410 memcpy(&body->lock_handle1, &lock->l_remote_handle,
411 sizeof(body->lock_handle1));
412 memcpy(&body->lock_desc, desc, sizeof(*desc));
413 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
415 LDLM_DEBUG(lock, "server preparing blocking AST");
416 req->rq_replen = lustre_msg_size(0, NULL);
418 if (lock->l_granted_mode == lock->l_req_mode)
419 ldlm_add_waiting_lock(lock);
420 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
422 req->rq_send_state = LUSTRE_IMP_FULL;
423 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
424 rc = ptlrpc_queue_wait(req);
426 rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
428 ptlrpc_req_finished(req);
433 /* XXX copied from ptlrpc/service.c */
434 static long timeval_sub(struct timeval *large, struct timeval *small)
436 return (large->tv_sec - small->tv_sec) * 1000000 +
437 (large->tv_usec - small->tv_usec);
440 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
442 struct ldlm_request *body;
443 struct ptlrpc_request *req;
444 struct timeval granted_time;
445 long total_enqueue_wait;
446 int rc = 0, size[2] = {sizeof(*body)}, buffers = 1;
449 LASSERT(lock != NULL);
451 do_gettimeofday(&granted_time);
452 total_enqueue_wait = timeval_sub(&granted_time, &lock->l_enqueued_time);
454 if (total_enqueue_wait / 1000000 > obd_timeout)
455 LDLM_ERROR(lock, "enqueue wait took %ldus", total_enqueue_wait);
457 if (lock->l_resource->lr_lvb_len) {
459 size[1] = lock->l_resource->lr_lvb_len;
462 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
463 LDLM_CP_CALLBACK, buffers, size, NULL);
467 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
468 memcpy(&body->lock_handle1, &lock->l_remote_handle,
469 sizeof(body->lock_handle1));
470 body->lock_flags = flags;
471 ldlm_lock2desc(lock, &body->lock_desc);
474 void *lvb = lustre_msg_buf(req->rq_reqmsg, 1,
475 lock->l_resource->lr_lvb_len);
476 memcpy(lvb, lock->l_resource->lr_lvb_data,
477 lock->l_resource->lr_lvb_len);
480 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
482 req->rq_replen = lustre_msg_size(0, NULL);
484 req->rq_send_state = LUSTRE_IMP_FULL;
485 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
487 /* We only send real blocking ASTs after the lock is granted */
488 l_lock(&lock->l_resource->lr_namespace->ns_lock);
489 if (lock->l_flags & LDLM_FL_AST_SENT) {
490 body->lock_flags |= LDLM_FL_AST_SENT;
491 ldlm_add_waiting_lock(lock); /* start the lock-timeout clock */
493 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
495 rc = ptlrpc_queue_wait(req);
497 rc = ldlm_handle_ast_error(lock, req, rc, "completion");
499 ptlrpc_req_finished(req);
504 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
506 struct ldlm_resource *res = lock->l_resource;
507 struct ldlm_request *body;
508 struct ptlrpc_request *req;
509 int rc = 0, size = sizeof(*body);
512 LASSERT(lock != NULL);
514 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
515 LDLM_GL_CALLBACK, 1, &size, NULL);
519 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof(*body));
520 memcpy(&body->lock_handle1, &lock->l_remote_handle,
521 sizeof(body->lock_handle1));
522 ldlm_lock2desc(lock, &body->lock_desc);
524 size = lock->l_resource->lr_lvb_len;
525 req->rq_replen = lustre_msg_size(1, &size);
527 req->rq_send_state = LUSTRE_IMP_FULL;
528 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
530 rc = ptlrpc_queue_wait(req);
531 if (rc == -ELDLM_NO_LOCK_DATA)
532 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
534 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
536 rc = res->lr_namespace->ns_lvbo->lvbo_update
537 (res, req->rq_repmsg, 0, 1);
538 ptlrpc_req_finished(req);
542 int ldlm_handle_enqueue(struct ptlrpc_request *req,
543 ldlm_completion_callback completion_callback,
544 ldlm_blocking_callback blocking_callback,
545 ldlm_glimpse_callback glimpse_callback)
547 struct obd_device *obddev = req->rq_export->exp_obd;
548 struct ldlm_reply *dlm_rep;
549 struct ldlm_request *dlm_req;
550 int rc = 0, size[2] = {sizeof(*dlm_rep)};
552 ldlm_error_t err = ELDLM_OK;
553 struct ldlm_lock *lock = NULL;
557 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
559 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
560 lustre_swab_ldlm_request);
561 if (dlm_req == NULL) {
562 CERROR ("Can't unpack dlm_req\n");
563 GOTO(out, rc = -EFAULT);
566 flags = dlm_req->lock_flags;
568 /* The lock's callback data might be set in the policy function */
569 lock = ldlm_lock_create(obddev->obd_namespace, &dlm_req->lock_handle2,
570 dlm_req->lock_desc.l_resource.lr_name,
571 dlm_req->lock_desc.l_resource.lr_type,
572 dlm_req->lock_desc.l_req_mode,
573 blocking_callback, completion_callback,
574 glimpse_callback, NULL, 0);
576 GOTO(out, rc = -ENOMEM);
578 do_gettimeofday(&lock->l_enqueued_time);
579 memcpy(&lock->l_remote_handle, &dlm_req->lock_handle1,
580 sizeof(lock->l_remote_handle));
581 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
583 LASSERT(req->rq_export);
584 lock->l_export = class_export_get(req->rq_export);
585 l_lock(&lock->l_resource->lr_namespace->ns_lock);
586 list_add(&lock->l_export_chain,
587 &lock->l_export->exp_ldlm_data.led_held_locks);
588 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
590 if (flags & LDLM_FL_HAS_INTENT) {
591 /* In this case, the reply buffer is allocated deep in
592 * local_lock_enqueue by the policy function. */
596 if (lock->l_resource->lr_lvb_len) {
597 size[1] = lock->l_resource->lr_lvb_len;
601 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
602 GOTO(out, rc = -ENOMEM);
604 rc = lustre_pack_reply(req, buffers, size, NULL);
609 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
610 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
611 sizeof(ldlm_policy_data_t));
612 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
613 memcpy(&lock->l_req_extent, &lock->l_policy_data.l_extent,
614 sizeof(lock->l_req_extent));
616 err = ldlm_lock_enqueue(obddev->obd_namespace, &lock, cookie, &flags);
620 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
621 dlm_rep->lock_flags = flags;
623 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
624 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
626 /* We never send a blocking AST until the lock is granted, but
627 * we can tell it right now */
628 l_lock(&lock->l_resource->lr_namespace->ns_lock);
629 if (lock->l_flags & LDLM_FL_AST_SENT) {
630 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
631 if (lock->l_granted_mode == lock->l_req_mode)
632 ldlm_add_waiting_lock(lock);
634 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
638 req->rq_status = err;
639 if (req->rq_reply_state == NULL) {
640 err = lustre_pack_reply(req, 0, NULL, NULL);
646 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
647 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
649 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
650 "(err=%d, rc=%d)", err, rc);
652 if (lock->l_resource->lr_lvb_len > 0 && rc == 0) {
653 void *lvb = lustre_msg_buf(req->rq_repmsg, 1,
654 lock->l_resource->lr_lvb_len);
655 LASSERT(lvb != NULL);
656 memcpy(lvb, lock->l_resource->lr_lvb_data,
657 lock->l_resource->lr_lvb_len);
660 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
661 ldlm_reprocess_all(lock->l_resource);
664 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
670 int ldlm_handle_convert(struct ptlrpc_request *req)
672 struct ldlm_request *dlm_req;
673 struct ldlm_reply *dlm_rep;
674 struct ldlm_lock *lock;
675 int rc, size = sizeof(*dlm_rep);
678 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
679 lustre_swab_ldlm_request);
680 if (dlm_req == NULL) {
681 CERROR ("Can't unpack dlm_req\n");
685 rc = lustre_pack_reply(req, 1, &size, NULL);
687 CERROR("out of memory\n");
690 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
691 dlm_rep->lock_flags = dlm_req->lock_flags;
693 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
695 req->rq_status = EINVAL;
697 LDLM_DEBUG(lock, "server-side convert handler START");
698 ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
699 &dlm_rep->lock_flags);
700 if (ldlm_del_waiting_lock(lock))
701 CDEBUG(D_DLMTRACE, "converted waiting lock %p\n", lock);
706 ldlm_reprocess_all(lock->l_resource);
707 LDLM_DEBUG(lock, "server-side convert handler END");
710 LDLM_DEBUG_NOLOCK("server-side convert handler END");
715 int ldlm_handle_cancel(struct ptlrpc_request *req)
717 struct ldlm_request *dlm_req;
718 struct ldlm_lock *lock;
719 struct ldlm_resource *res;
720 char str[PTL_NALFMT_SIZE];
724 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
725 lustre_swab_ldlm_request);
726 if (dlm_req == NULL) {
727 CERROR("bad request buffer for cancel\n");
731 rc = lustre_pack_reply(req, 0, NULL, NULL);
733 CERROR("out of memory\n");
737 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
739 CERROR("received cancel for unknown lock cookie "LPX64
740 " from client %s nid "LPX64" (%s)\n",
741 dlm_req->lock_handle1.cookie,
742 req->rq_export->exp_client_uuid.uuid,
743 req->rq_peer.peer_nid,
744 portals_nid2str(req->rq_peer.peer_ni->pni_number,
745 req->rq_peer.peer_nid, str));
746 LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock "
748 dlm_req->lock_handle1.cookie);
749 req->rq_status = ESTALE;
751 LDLM_DEBUG(lock, "server-side cancel handler START");
752 res = lock->l_resource;
753 if (res && res->lr_namespace->ns_lvbo &&
754 res->lr_namespace->ns_lvbo->lvbo_update) {
755 (void)res->lr_namespace->ns_lvbo->lvbo_update
757 //(res, req->rq_reqmsg, 1);
760 ldlm_lock_cancel(lock);
761 if (ldlm_del_waiting_lock(lock))
762 CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
766 if (ptlrpc_reply(req) != 0)
770 ldlm_reprocess_all(lock->l_resource);
771 LDLM_DEBUG(lock, "server-side cancel handler END");
778 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
779 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
784 l_lock(&ns->ns_lock);
785 LDLM_DEBUG(lock, "client blocking AST callback handler START");
787 lock->l_flags |= LDLM_FL_CBPENDING;
788 do_ast = (!lock->l_readers && !lock->l_writers);
791 LDLM_DEBUG(lock, "already unused, calling "
792 "callback (%p)", lock->l_blocking_ast);
793 if (lock->l_blocking_ast != NULL) {
794 l_unlock(&ns->ns_lock);
795 l_check_no_ns_lock(ns);
796 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
798 l_lock(&ns->ns_lock);
801 LDLM_DEBUG(lock, "Lock still has references, will be"
805 LDLM_DEBUG(lock, "client blocking callback handler END");
806 l_unlock(&ns->ns_lock);
811 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
812 struct ldlm_namespace *ns,
813 struct ldlm_request *dlm_req,
814 struct ldlm_lock *lock)
819 l_lock(&ns->ns_lock);
820 LDLM_DEBUG(lock, "client completion callback handler START");
822 /* If we receive the completion AST before the actual enqueue returned,
823 * then we might need to switch lock modes, resources, or extents. */
824 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
825 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
826 LDLM_DEBUG(lock, "completion AST, new lock mode");
829 if (lock->l_resource->lr_type != LDLM_PLAIN) {
830 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
831 sizeof(lock->l_policy_data));
832 LDLM_DEBUG(lock, "completion AST, new policy data");
835 ldlm_resource_unlink_lock(lock);
836 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
837 &lock->l_resource->lr_name,
838 sizeof(lock->l_resource->lr_name)) != 0) {
839 ldlm_lock_change_resource(ns, lock,
840 dlm_req->lock_desc.l_resource.lr_name);
841 LDLM_DEBUG(lock, "completion AST, new resource");
844 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
845 lock->l_flags |= LDLM_FL_CBPENDING;
846 LDLM_DEBUG(lock, "completion AST includes blocking AST");
849 if (lock->l_lvb_len) {
851 lvb = lustre_swab_reqbuf(req, 1, lock->l_lvb_len,
852 lock->l_lvb_swabber);
854 LDLM_ERROR(lock, "completion AST did not contain "
857 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
861 lock->l_resource->lr_tmp = &ast_list;
862 ldlm_grant_lock(lock, req, sizeof(*req), 1);
863 lock->l_resource->lr_tmp = NULL;
864 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
865 l_unlock(&ns->ns_lock);
868 ldlm_run_ast_work(ns, &ast_list);
870 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
875 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
876 struct ldlm_namespace *ns,
877 struct ldlm_request *dlm_req,
878 struct ldlm_lock *lock)
883 l_lock(&ns->ns_lock);
884 LDLM_DEBUG(lock, "client glimpse AST callback handler");
886 if (lock->l_glimpse_ast != NULL) {
887 l_unlock(&ns->ns_lock);
888 l_check_no_ns_lock(ns);
889 rc = lock->l_glimpse_ast(lock, req);
890 l_lock(&ns->ns_lock);
893 if (req->rq_repmsg != NULL) {
900 if (lock->l_granted_mode == LCK_PW &&
901 !lock->l_readers && !lock->l_writers &&
902 time_after(jiffies, lock->l_last_used + 10 * HZ)) {
903 l_unlock(&ns->ns_lock);
904 ldlm_handle_bl_callback(ns, NULL, lock);
909 l_unlock(&ns->ns_lock);
914 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
917 if (req->rq_reply_state == NULL) {
918 rc = lustre_pack_reply(req, 0, NULL, NULL);
922 return ptlrpc_reply(req);
925 int ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
926 struct ldlm_lock *lock)
929 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
930 struct ldlm_bl_work_item *blwi;
933 OBD_ALLOC(blwi, sizeof(*blwi));
940 blwi->blwi_lock = lock;
942 spin_lock(&blp->blp_lock);
943 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
944 wake_up(&blp->blp_waitq);
945 spin_unlock(&blp->blp_lock);
953 static int ldlm_callback_handler(struct ptlrpc_request *req)
955 struct ldlm_namespace *ns;
956 struct ldlm_request *dlm_req;
957 struct ldlm_lock *lock;
958 char str[PTL_NALFMT_SIZE];
962 /* Requests arrive in sender's byte order. The ptlrpc service
963 * handler has already checked and, if necessary, byte-swapped the
964 * incoming request message body, but I am responsible for the
965 * message buffers. */
967 if (req->rq_export == NULL) {
968 struct ldlm_request *dlm_req;
970 CDEBUG(D_RPCTRACE, "operation %d from nid "LPX64" (%s) with bad "
971 "export cookie "LPX64" (ptl req %d/rep %d); this is "
972 "normal if this node rebooted with a lock held\n",
973 req->rq_reqmsg->opc, req->rq_peer.peer_nid,
974 portals_nid2str(req->rq_peer.peer_ni->pni_number,
975 req->rq_peer.peer_nid, str),
976 req->rq_reqmsg->handle.cookie,
977 req->rq_request_portal, req->rq_reply_portal);
979 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
980 lustre_swab_ldlm_request);
982 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
983 dlm_req->lock_handle1.cookie);
985 ldlm_callback_reply(req, -ENOTCONN);
989 LASSERT(req->rq_export != NULL);
990 LASSERT(req->rq_export->exp_obd != NULL);
992 switch(req->rq_reqmsg->opc) {
993 case LDLM_BL_CALLBACK:
994 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
996 case LDLM_CP_CALLBACK:
997 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
999 case LDLM_GL_CALLBACK:
1000 OBD_FAIL_RETURN(OBD_FAIL_LDLM_GL_CALLBACK, 0);
1002 case OBD_LOG_CANCEL:
1003 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1004 rc = llog_origin_handle_cancel(req);
1005 ldlm_callback_reply(req, rc);
1007 case LLOG_ORIGIN_HANDLE_CREATE:
1008 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1009 rc = llog_origin_handle_create(req);
1010 ldlm_callback_reply(req, rc);
1012 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1013 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1014 rc = llog_origin_handle_next_block(req);
1015 ldlm_callback_reply(req, rc);
1017 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1018 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1019 rc = llog_origin_handle_read_header(req);
1020 ldlm_callback_reply(req, rc);
1022 case LLOG_ORIGIN_HANDLE_CLOSE:
1023 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1024 rc = llog_origin_handle_close(req);
1025 ldlm_callback_reply(req, rc);
1028 CERROR("unknown opcode %u\n", req->rq_reqmsg->opc);
1029 ldlm_callback_reply(req, -EPROTO);
1033 ns = req->rq_export->exp_obd->obd_namespace;
1034 LASSERT(ns != NULL);
1036 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
1037 lustre_swab_ldlm_request);
1038 if (dlm_req == NULL) {
1039 CERROR ("can't unpack dlm_req\n");
1040 ldlm_callback_reply (req, -EPROTO);
1044 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle1);
1046 CDEBUG(D_INODE, "callback on lock "LPX64" - lock disappeared\n",
1047 dlm_req->lock_handle1.cookie);
1048 ldlm_callback_reply(req, -EINVAL);
1052 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1053 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1055 /* We want the ost thread to get this reply so that it can respond
1056 * to ost requests (write cache writeback) that might be triggered
1059 * But we'd also like to be able to indicate in the reply that we're
1060 * cancelling right now, because it's unused, or have an intent result
1061 * in the reply, so we might have to push the responsibility for sending
1062 * the reply down into the AST handlers, alas. */
1064 switch (req->rq_reqmsg->opc) {
1065 case LDLM_BL_CALLBACK:
1066 CDEBUG(D_INODE, "blocking ast\n");
1068 rc = ldlm_bl_to_thread(ns, &dlm_req->lock_desc, lock);
1069 ldlm_callback_reply(req, rc);
1072 ldlm_callback_reply(req, rc);
1073 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
1076 case LDLM_CP_CALLBACK:
1077 CDEBUG(D_INODE, "completion ast\n");
1078 ldlm_callback_reply(req, 0);
1079 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
1081 case LDLM_GL_CALLBACK:
1082 CDEBUG(D_INODE, "glimpse ast\n");
1083 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
1086 LBUG(); /* checked above */
1092 static int ldlm_cancel_handler(struct ptlrpc_request *req)
1097 /* Requests arrive in sender's byte order. The ptlrpc service
1098 * handler has already checked and, if necessary, byte-swapped the
1099 * incoming request message body, but I am responsible for the
1100 * message buffers. */
1102 if (req->rq_export == NULL) {
1103 struct ldlm_request *dlm_req;
1104 CERROR("operation %d with bad export (ptl req %d/rep %d)\n",
1105 req->rq_reqmsg->opc, req->rq_request_portal,
1106 req->rq_reply_portal);
1107 CERROR("--> export cookie: "LPX64"\n",
1108 req->rq_reqmsg->handle.cookie);
1109 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
1110 lustre_swab_ldlm_request);
1111 if (dlm_req != NULL)
1112 ldlm_lock_dump_handle(D_ERROR, &dlm_req->lock_handle1);
1116 switch (req->rq_reqmsg->opc) {
1118 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1120 CDEBUG(D_INODE, "cancel\n");
1121 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
1122 rc = ldlm_handle_cancel(req);
1128 CERROR("invalid opcode %d\n", req->rq_reqmsg->opc);
1136 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1138 struct ldlm_bl_work_item *blwi = NULL;
1140 spin_lock(&blp->blp_lock);
1141 if (!list_empty(&blp->blp_list)) {
1142 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1144 list_del(&blwi->blwi_entry);
1146 spin_unlock(&blp->blp_lock);
1151 struct ldlm_bl_thread_data {
1153 struct ldlm_bl_pool *bltd_blp;
1156 static int ldlm_bl_thread_main(void *arg)
1158 struct ldlm_bl_thread_data *bltd = arg;
1159 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1160 unsigned long flags;
1163 /* XXX boiler-plate */
1165 char name[sizeof(current->comm)];
1166 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1168 kportal_daemonize(name);
1170 SIGNAL_MASK_LOCK(current, flags);
1171 sigfillset(¤t->blocked);
1173 SIGNAL_MASK_UNLOCK(current, flags);
1175 atomic_inc(&blp->blp_num_threads);
1176 complete(&blp->blp_comp);
1179 struct l_wait_info lwi = { 0 };
1180 struct ldlm_bl_work_item *blwi = NULL;
1182 l_wait_event_exclusive(blp->blp_waitq,
1183 (blwi = ldlm_bl_get_work(blp)) != NULL,
1186 if (blwi->blwi_ns == NULL)
1189 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1191 OBD_FREE(blwi, sizeof(*blwi));
1194 atomic_dec(&blp->blp_num_threads);
1195 complete(&blp->blp_comp);
1201 static int ldlm_setup(void);
1202 static int ldlm_cleanup(int force);
1204 int ldlm_get_ref(void)
1207 down(&ldlm_ref_sem);
1208 if (++ldlm_refcount == 1) {
1218 void ldlm_put_ref(int force)
1220 down(&ldlm_ref_sem);
1221 if (ldlm_refcount == 1) {
1222 int rc = ldlm_cleanup(force);
1224 CERROR("ldlm_cleanup failed: %d\n", rc);
1235 static int ldlm_setup(void)
1237 struct ldlm_bl_pool *blp;
1244 if (ldlm_state != NULL)
1247 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
1248 if (ldlm_state == NULL)
1252 rc = ldlm_proc_setup();
1257 ldlm_state->ldlm_cb_service =
1258 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1259 LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1260 ldlm_callback_handler, "ldlm_cbd",
1263 if (!ldlm_state->ldlm_cb_service) {
1264 CERROR("failed to start service\n");
1265 GOTO(out_proc, rc = -ENOMEM);
1268 ldlm_state->ldlm_cancel_service =
1269 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1270 LDLM_CANCEL_REQUEST_PORTAL,
1271 LDLM_CANCEL_REPLY_PORTAL,
1272 ldlm_cancel_handler, "ldlm_canceld",
1275 if (!ldlm_state->ldlm_cancel_service) {
1276 CERROR("failed to start service\n");
1277 GOTO(out_proc, rc = -ENOMEM);
1280 OBD_ALLOC(blp, sizeof(*blp));
1282 GOTO(out_proc, rc = -ENOMEM);
1283 ldlm_state->ldlm_bl_pool = blp;
1285 atomic_set(&blp->blp_num_threads, 0);
1286 init_waitqueue_head(&blp->blp_waitq);
1287 spin_lock_init(&blp->blp_lock);
1289 INIT_LIST_HEAD(&blp->blp_list);
1292 for (i = 0; i < LDLM_NUM_THREADS; i++) {
1293 struct ldlm_bl_thread_data bltd = {
1297 init_completion(&blp->blp_comp);
1298 rc = kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1300 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1302 GOTO(out_thread, rc);
1304 wait_for_completion(&blp->blp_comp);
1307 rc = ptlrpc_start_n_threads(NULL, ldlm_state->ldlm_cancel_service,
1308 LDLM_NUM_THREADS, "ldlm_cn");
1311 GOTO(out_thread, rc);
1314 rc = ptlrpc_start_n_threads(NULL, ldlm_state->ldlm_cb_service,
1315 LDLM_NUM_THREADS, "ldlm_cb");
1318 GOTO(out_thread, rc);
1321 INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1322 spin_lock_init(&expired_lock_thread.elt_lock);
1323 expired_lock_thread.elt_state = ELT_STOPPED;
1324 init_waitqueue_head(&expired_lock_thread.elt_waitq);
1326 rc = kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FS);
1328 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1329 GOTO(out_thread, rc);
1332 wait_event(expired_lock_thread.elt_waitq,
1333 expired_lock_thread.elt_state == ELT_READY);
1335 INIT_LIST_HEAD(&waiting_locks_list);
1336 spin_lock_init(&waiting_locks_spinlock);
1337 waiting_locks_timer.function = waiting_locks_callback;
1338 waiting_locks_timer.data = 0;
1339 init_timer(&waiting_locks_timer);
1346 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1347 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1352 ldlm_proc_cleanup();
1355 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1360 static int ldlm_cleanup(int force)
1363 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1367 if (!list_empty(&ldlm_namespace_list)) {
1368 CERROR("ldlm still has namespaces; clean these up first.\n");
1369 ldlm_dump_all_namespaces();
1374 while (atomic_read(&blp->blp_num_threads) > 0) {
1375 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1377 init_completion(&blp->blp_comp);
1379 spin_lock(&blp->blp_lock);
1380 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1381 wake_up(&blp->blp_waitq);
1382 spin_unlock(&blp->blp_lock);
1384 wait_for_completion(&blp->blp_comp);
1386 OBD_FREE(blp, sizeof(*blp));
1388 ptlrpc_stop_all_threads(ldlm_state->ldlm_cb_service);
1389 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1390 ptlrpc_stop_all_threads(ldlm_state->ldlm_cancel_service);
1391 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1392 ldlm_proc_cleanup();
1394 expired_lock_thread.elt_state = ELT_TERMINATE;
1395 wake_up(&expired_lock_thread.elt_waitq);
1396 wait_event(expired_lock_thread.elt_waitq,
1397 expired_lock_thread.elt_state == ELT_STOPPED);
1401 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1407 int __init ldlm_init(void)
1409 ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1410 sizeof(struct ldlm_resource), 0,
1411 SLAB_HWCACHE_ALIGN, NULL, NULL);
1412 if (ldlm_resource_slab == NULL)
1415 ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1416 sizeof(struct ldlm_lock), 0,
1417 SLAB_HWCACHE_ALIGN, NULL, NULL);
1418 if (ldlm_lock_slab == NULL) {
1419 kmem_cache_destroy(ldlm_resource_slab);
1423 l_lock_init(&ldlm_handle_lock);
1428 void __exit ldlm_exit(void)
1430 if ( ldlm_refcount )
1431 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1432 if (kmem_cache_destroy(ldlm_resource_slab) != 0)
1433 CERROR("couldn't free ldlm resource slab\n");
1434 if (kmem_cache_destroy(ldlm_lock_slab) != 0)
1435 CERROR("couldn't free ldlm lock slab\n");
1439 EXPORT_SYMBOL(ldlm_flock_completion_ast);
1442 EXPORT_SYMBOL(ldlm_extent_shift_kms);
1445 EXPORT_SYMBOL(ldlm_get_processing_policy);
1446 EXPORT_SYMBOL(ldlm_lock2desc);
1447 EXPORT_SYMBOL(ldlm_register_intent);
1448 EXPORT_SYMBOL(ldlm_lockname);
1449 EXPORT_SYMBOL(ldlm_typename);
1450 EXPORT_SYMBOL(ldlm_lock2handle);
1451 EXPORT_SYMBOL(__ldlm_handle2lock);
1452 EXPORT_SYMBOL(ldlm_lock_get);
1453 EXPORT_SYMBOL(ldlm_lock_put);
1454 EXPORT_SYMBOL(ldlm_lock_match);
1455 EXPORT_SYMBOL(ldlm_lock_cancel);
1456 EXPORT_SYMBOL(ldlm_lock_addref);
1457 EXPORT_SYMBOL(ldlm_lock_decref);
1458 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1459 EXPORT_SYMBOL(ldlm_lock_change_resource);
1460 EXPORT_SYMBOL(ldlm_lock_set_data);
1461 EXPORT_SYMBOL(ldlm_it2str);
1462 EXPORT_SYMBOL(ldlm_lock_dump);
1463 EXPORT_SYMBOL(ldlm_lock_dump_handle);
1464 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
1465 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1466 EXPORT_SYMBOL(ldlm_lock_allow_match);
1468 /* ldlm_request.c */
1469 EXPORT_SYMBOL(ldlm_completion_ast);
1470 EXPORT_SYMBOL(ldlm_expired_completion_wait);
1471 EXPORT_SYMBOL(ldlm_cli_convert);
1472 EXPORT_SYMBOL(ldlm_cli_enqueue);
1473 EXPORT_SYMBOL(ldlm_cli_cancel);
1474 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1475 EXPORT_SYMBOL(ldlm_replay_locks);
1476 EXPORT_SYMBOL(ldlm_resource_foreach);
1477 EXPORT_SYMBOL(ldlm_namespace_foreach);
1478 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
1479 EXPORT_SYMBOL(ldlm_change_cbdata);
1482 EXPORT_SYMBOL(ldlm_server_blocking_ast);
1483 EXPORT_SYMBOL(ldlm_server_completion_ast);
1484 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
1485 EXPORT_SYMBOL(ldlm_handle_enqueue);
1486 EXPORT_SYMBOL(ldlm_handle_cancel);
1487 EXPORT_SYMBOL(ldlm_handle_convert);
1488 EXPORT_SYMBOL(ldlm_del_waiting_lock);
1489 EXPORT_SYMBOL(ldlm_get_ref);
1490 EXPORT_SYMBOL(ldlm_put_ref);
1494 EXPORT_SYMBOL(ldlm_test);
1495 EXPORT_SYMBOL(ldlm_regression_start);
1496 EXPORT_SYMBOL(ldlm_regression_stop);
1499 /* ldlm_resource.c */
1500 EXPORT_SYMBOL(ldlm_namespace_new);
1501 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1502 EXPORT_SYMBOL(ldlm_namespace_free);
1503 EXPORT_SYMBOL(ldlm_namespace_dump);
1504 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1505 EXPORT_SYMBOL(ldlm_resource_get);
1506 EXPORT_SYMBOL(ldlm_resource_putref);
1509 EXPORT_SYMBOL(l_lock);
1510 EXPORT_SYMBOL(l_unlock);
1513 EXPORT_SYMBOL(client_obd_setup);
1514 EXPORT_SYMBOL(client_obd_cleanup);
1515 EXPORT_SYMBOL(client_connect_import);
1516 EXPORT_SYMBOL(client_disconnect_export);
1517 EXPORT_SYMBOL(target_abort_recovery);
1518 EXPORT_SYMBOL(target_handle_connect);
1519 EXPORT_SYMBOL(target_destroy_export);
1520 EXPORT_SYMBOL(target_cancel_recovery_timer);
1521 EXPORT_SYMBOL(target_send_reply);
1522 EXPORT_SYMBOL(target_queue_recovery_request);
1523 EXPORT_SYMBOL(target_handle_ping);
1524 EXPORT_SYMBOL(target_handle_disconnect);
1525 EXPORT_SYMBOL(target_queue_final_reply);