4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_lockd.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
42 #define DEBUG_SUBSYSTEM S_LDLM
45 # include <libcfs/libcfs.h>
47 # include <liblustre.h>
50 #include <lustre_dlm.h>
51 #include <obd_class.h>
52 #include <libcfs/list.h>
53 #include "ldlm_internal.h"
55 static int ldlm_num_threads;
56 CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
57 "number of DLM service threads to start");
59 extern cfs_mem_cache_t *ldlm_resource_slab;
60 extern cfs_mem_cache_t *ldlm_lock_slab;
61 static cfs_mutex_t ldlm_ref_mutex;
62 static int ldlm_refcount;
64 struct ldlm_cb_async_args {
65 struct ldlm_cb_set_arg *ca_set_arg;
66 struct ldlm_lock *ca_lock;
71 static struct ldlm_state *ldlm_state;
73 inline cfs_time_t round_timeout(cfs_time_t timeout)
75 return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
78 /* timeout for initial callback (AST) reply (bz10399) */
79 static inline unsigned int ldlm_get_rq_timeout(void)
82 unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
84 return timeout < 1 ? 1 : timeout;
88 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
89 static cfs_spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
90 static cfs_list_t waiting_locks_list;
91 static cfs_timer_t waiting_locks_timer;
93 static struct expired_lock_thread {
94 cfs_waitq_t elt_waitq;
97 cfs_list_t elt_expired_locks;
98 } expired_lock_thread;
101 #define ELT_STOPPED 0
103 #define ELT_TERMINATE 2
105 struct ldlm_bl_pool {
106 cfs_spinlock_t blp_lock;
109 * blp_prio_list is used for callbacks that should be handled
110 * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
113 cfs_list_t blp_prio_list;
116 * blp_list is used for all other callbacks which are likely
117 * to take longer to process.
121 cfs_waitq_t blp_waitq;
122 cfs_completion_t blp_comp;
123 cfs_atomic_t blp_num_threads;
124 cfs_atomic_t blp_busy_threads;
129 struct ldlm_bl_work_item {
130 cfs_list_t blwi_entry;
131 struct ldlm_namespace *blwi_ns;
132 struct ldlm_lock_desc blwi_ld;
133 struct ldlm_lock *blwi_lock;
134 cfs_list_t blwi_head;
136 cfs_completion_t blwi_comp;
138 int blwi_mem_pressure;
143 static inline int have_expired_locks(void)
148 cfs_spin_lock_bh(&waiting_locks_spinlock);
149 need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
150 cfs_spin_unlock_bh(&waiting_locks_spinlock);
155 static int expired_lock_main(void *arg)
157 cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
158 struct l_wait_info lwi = { 0 };
162 cfs_daemonize("ldlm_elt");
164 expired_lock_thread.elt_state = ELT_READY;
165 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
168 l_wait_event(expired_lock_thread.elt_waitq,
169 have_expired_locks() ||
170 expired_lock_thread.elt_state == ELT_TERMINATE,
173 cfs_spin_lock_bh(&waiting_locks_spinlock);
174 if (expired_lock_thread.elt_dump) {
175 struct libcfs_debug_msg_data msgdata = {
176 .msg_file = __FILE__,
177 .msg_fn = "waiting_locks_callback",
178 .msg_line = expired_lock_thread.elt_dump };
179 cfs_spin_unlock_bh(&waiting_locks_spinlock);
181 /* from waiting_locks_callback, but not in timer */
182 libcfs_debug_dumplog();
183 libcfs_run_lbug_upcall(&msgdata);
185 cfs_spin_lock_bh(&waiting_locks_spinlock);
186 expired_lock_thread.elt_dump = 0;
191 while (!cfs_list_empty(expired)) {
192 struct obd_export *export;
193 struct ldlm_lock *lock;
195 lock = cfs_list_entry(expired->next, struct ldlm_lock,
197 if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
198 (void *)lock >= LP_POISON) {
199 cfs_spin_unlock_bh(&waiting_locks_spinlock);
200 CERROR("free lock on elt list %p\n", lock);
203 cfs_list_del_init(&lock->l_pending_chain);
204 if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
205 (void *)lock->l_export >= LP_POISON) {
206 CERROR("lock with free export on elt list %p\n",
208 lock->l_export = NULL;
209 LDLM_ERROR(lock, "free export");
210 /* release extra ref grabbed by
211 * ldlm_add_waiting_lock() or
212 * ldlm_failed_ast() */
213 LDLM_LOCK_RELEASE(lock);
216 export = class_export_lock_get(lock->l_export, lock);
217 cfs_spin_unlock_bh(&waiting_locks_spinlock);
220 class_fail_export(export);
221 class_export_lock_put(export, lock);
223 /* release extra ref grabbed by ldlm_add_waiting_lock()
224 * or ldlm_failed_ast() */
225 LDLM_LOCK_RELEASE(lock);
227 cfs_spin_lock_bh(&waiting_locks_spinlock);
229 cfs_spin_unlock_bh(&waiting_locks_spinlock);
231 if (do_dump && obd_dump_on_eviction) {
232 CERROR("dump the log upon eviction\n");
233 libcfs_debug_dumplog();
236 if (expired_lock_thread.elt_state == ELT_TERMINATE)
240 expired_lock_thread.elt_state = ELT_STOPPED;
241 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
245 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
248 * Check if there is a request in the export request list
249 * which prevents the lock canceling.
251 static int ldlm_lock_busy(struct ldlm_lock *lock)
253 struct ptlrpc_request *req;
257 if (lock->l_export == NULL)
260 cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
261 cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
263 if (req->rq_ops->hpreq_lock_match) {
264 match = req->rq_ops->hpreq_lock_match(req, lock);
269 cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
273 /* This is called from within a timer interrupt and cannot schedule */
274 static void waiting_locks_callback(unsigned long unused)
276 struct ldlm_lock *lock;
279 cfs_spin_lock_bh(&waiting_locks_spinlock);
280 while (!cfs_list_empty(&waiting_locks_list)) {
281 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
283 if (cfs_time_after(lock->l_callback_timeout,
284 cfs_time_current()) ||
285 (lock->l_req_mode == LCK_GROUP))
288 if (ptlrpc_check_suspend()) {
289 /* there is a case when we talk to one mds, holding
290 * lock from another mds. this way we easily can get
291 * here, if second mds is being recovered. so, we
292 * suspend timeouts. bug 6019 */
294 LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
295 lock->l_export->exp_client_uuid.uuid,
296 lock->l_export->exp_connection->c_remote_uuid.uuid,
297 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
299 cfs_list_del_init(&lock->l_pending_chain);
300 cfs_spin_unlock_bh(&waiting_locks_spinlock);
301 ldlm_add_waiting_lock(lock);
305 /* if timeout overlaps the activation time of suspended timeouts
306 * then extend it to give a chance for client to reconnect */
307 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
308 cfs_time_seconds(obd_timeout)/2),
309 ptlrpc_suspend_wakeup_time())) {
310 LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
311 lock->l_export->exp_client_uuid.uuid,
312 lock->l_export->exp_connection->c_remote_uuid.uuid,
313 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
315 cfs_list_del_init(&lock->l_pending_chain);
316 cfs_spin_unlock_bh(&waiting_locks_spinlock);
317 ldlm_add_waiting_lock(lock);
321 /* Check if we need to prolong timeout */
322 if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
323 ldlm_lock_busy(lock)) {
326 if (lock->l_pending_chain.next == &waiting_locks_list)
331 cfs_spin_unlock_bh(&waiting_locks_spinlock);
332 LDLM_DEBUG(lock, "prolong the busy lock");
333 ldlm_refresh_waiting_lock(lock,
334 ldlm_get_enq_timeout(lock));
335 cfs_spin_lock_bh(&waiting_locks_spinlock);
338 LDLM_LOCK_RELEASE(lock);
342 LDLM_LOCK_RELEASE(lock);
345 ldlm_lock_to_ns(lock)->ns_timeouts++;
346 LDLM_ERROR(lock, "lock callback timer expired after %lds: "
347 "evicting client at %s ",
348 cfs_time_current_sec()- lock->l_last_activity,
350 lock->l_export->exp_connection->c_peer.nid));
352 /* no needs to take an extra ref on the lock since it was in
353 * the waiting_locks_list and ldlm_add_waiting_lock()
354 * already grabbed a ref */
355 cfs_list_del(&lock->l_pending_chain);
356 cfs_list_add(&lock->l_pending_chain,
357 &expired_lock_thread.elt_expired_locks);
360 if (!cfs_list_empty(&expired_lock_thread.elt_expired_locks)) {
361 if (obd_dump_on_timeout)
362 expired_lock_thread.elt_dump = __LINE__;
364 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
368 * Make sure the timer will fire again if we have any locks
371 if (!cfs_list_empty(&waiting_locks_list)) {
372 cfs_time_t timeout_rounded;
373 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
375 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
376 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
378 cfs_spin_unlock_bh(&waiting_locks_spinlock);
382 * Indicate that we're waiting for a client to call us back cancelling a given
383 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
384 * timer to fire appropriately. (We round up to the next second, to avoid
385 * floods of timer firings during periods of high lock contention and traffic).
386 * As done by ldlm_add_waiting_lock(), the caller must grab a lock reference
387 * if it has been added to the waiting list (1 is returned).
389 * Called with the namespace lock held.
391 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
394 cfs_time_t timeout_rounded;
396 if (!cfs_list_empty(&lock->l_pending_chain))
399 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
400 OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
403 timeout = cfs_time_shift(seconds);
404 if (likely(cfs_time_after(timeout, lock->l_callback_timeout)))
405 lock->l_callback_timeout = timeout;
407 timeout_rounded = round_timeout(lock->l_callback_timeout);
409 if (cfs_time_before(timeout_rounded,
410 cfs_timer_deadline(&waiting_locks_timer)) ||
411 !cfs_timer_is_armed(&waiting_locks_timer)) {
412 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
414 /* if the new lock has a shorter timeout than something earlier on
415 the list, we'll wait the longer amount of time; no big deal. */
417 cfs_list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
421 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
424 int timeout = ldlm_get_enq_timeout(lock);
426 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
428 cfs_spin_lock_bh(&waiting_locks_spinlock);
429 if (lock->l_destroyed) {
430 static cfs_time_t next;
431 cfs_spin_unlock_bh(&waiting_locks_spinlock);
432 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
433 if (cfs_time_after(cfs_time_current(), next)) {
434 next = cfs_time_shift(14400);
435 libcfs_debug_dumpstack(NULL);
440 ret = __ldlm_add_waiting_lock(lock, timeout);
442 /* grab ref on the lock if it has been added to the
446 cfs_spin_unlock_bh(&waiting_locks_spinlock);
449 cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
450 if (cfs_list_empty(&lock->l_exp_list))
451 cfs_list_add(&lock->l_exp_list,
452 &lock->l_export->exp_bl_list);
453 cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
456 LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
457 ret == 0 ? "not re-" : "", timeout,
458 AT_OFF ? "off" : "on");
463 * Remove a lock from the pending list, likely because it had its cancellation
464 * callback arrive without incident. This adjusts the lock-timeout timer if
465 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
466 * As done by ldlm_del_waiting_lock(), the caller must release the lock
467 * reference when the lock is removed from any list (1 is returned).
469 * Called with namespace lock held.
471 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
473 cfs_list_t *list_next;
475 if (cfs_list_empty(&lock->l_pending_chain))
478 list_next = lock->l_pending_chain.next;
479 if (lock->l_pending_chain.prev == &waiting_locks_list) {
480 /* Removing the head of the list, adjust timer. */
481 if (list_next == &waiting_locks_list) {
482 /* No more, just cancel. */
483 cfs_timer_disarm(&waiting_locks_timer);
485 struct ldlm_lock *next;
486 next = cfs_list_entry(list_next, struct ldlm_lock,
488 cfs_timer_arm(&waiting_locks_timer,
489 round_timeout(next->l_callback_timeout));
492 cfs_list_del_init(&lock->l_pending_chain);
497 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
501 if (lock->l_export == NULL) {
502 /* We don't have a "waiting locks list" on clients. */
503 CDEBUG(D_DLMTRACE, "Client lock %p : no-op\n", lock);
507 cfs_spin_lock_bh(&waiting_locks_spinlock);
508 ret = __ldlm_del_waiting_lock(lock);
509 cfs_spin_unlock_bh(&waiting_locks_spinlock);
511 /* remove the lock out of export blocking list */
512 cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
513 cfs_list_del_init(&lock->l_exp_list);
514 cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
517 /* release lock ref if it has indeed been removed
519 LDLM_LOCK_RELEASE(lock);
522 LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
529 * Called with namespace lock held.
531 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
533 if (lock->l_export == NULL) {
534 /* We don't have a "waiting locks list" on clients. */
535 LDLM_DEBUG(lock, "client lock: no-op");
539 cfs_spin_lock_bh(&waiting_locks_spinlock);
541 if (cfs_list_empty(&lock->l_pending_chain)) {
542 cfs_spin_unlock_bh(&waiting_locks_spinlock);
543 LDLM_DEBUG(lock, "wasn't waiting");
547 /* we remove/add the lock to the waiting list, so no needs to
548 * release/take a lock reference */
549 __ldlm_del_waiting_lock(lock);
550 __ldlm_add_waiting_lock(lock, timeout);
551 cfs_spin_unlock_bh(&waiting_locks_spinlock);
553 LDLM_DEBUG(lock, "refreshed");
556 #else /* !__KERNEL__ */
558 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
563 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
567 #endif /* __KERNEL__ */
569 #ifdef HAVE_SERVER_SUPPORT
571 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
573 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
578 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
579 const char *ast_type)
581 LCONSOLE_ERROR_MSG(0x138, "%s: A client on nid %s was evicted due "
582 "to a lock %s callback time out: rc %d\n",
583 lock->l_export->exp_obd->obd_name,
584 obd_export_nid2str(lock->l_export), ast_type, rc);
586 if (obd_dump_on_timeout)
587 libcfs_debug_dumplog();
589 cfs_spin_lock_bh(&waiting_locks_spinlock);
590 if (__ldlm_del_waiting_lock(lock) == 0)
591 /* the lock was not in any list, grab an extra ref before adding
592 * the lock to the expired list */
594 cfs_list_add(&lock->l_pending_chain,
595 &expired_lock_thread.elt_expired_locks);
596 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
597 cfs_spin_unlock_bh(&waiting_locks_spinlock);
599 class_fail_export(lock->l_export);
603 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
604 struct ptlrpc_request *req, int rc,
605 const char *ast_type)
607 lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
609 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
610 LASSERT(lock->l_export);
611 if (lock->l_export->exp_libclient) {
612 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
613 " timeout, just cancelling lock", ast_type,
614 libcfs_nid2str(peer.nid));
615 ldlm_lock_cancel(lock);
617 } else if (lock->l_flags & LDLM_FL_CANCEL) {
618 LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
619 "cancel was received (AST reply lost?)",
620 ast_type, libcfs_nid2str(peer.nid));
621 ldlm_lock_cancel(lock);
624 ldlm_del_waiting_lock(lock);
625 ldlm_failed_ast(lock, rc, ast_type);
629 struct ldlm_resource *res = lock->l_resource;
630 LDLM_DEBUG(lock, "client (nid %s) returned %d"
631 " from %s AST - normal race",
632 libcfs_nid2str(peer.nid),
634 lustre_msg_get_status(req->rq_repmsg) : -1,
637 /* update lvbo to return proper attributes.
639 ldlm_resource_getref(res);
640 ldlm_res_lvbo_update(res, NULL, 1);
641 ldlm_resource_putref(res);
645 LDLM_ERROR(lock, "client (nid %s) returned %d "
646 "from %s AST", libcfs_nid2str(peer.nid),
647 (req->rq_repmsg != NULL) ?
648 lustre_msg_get_status(req->rq_repmsg) : 0,
651 ldlm_lock_cancel(lock);
652 /* Server-side AST functions are called from ldlm_reprocess_all,
653 * which needs to be told to please restart its reprocessing. */
660 static int ldlm_cb_interpret(const struct lu_env *env,
661 struct ptlrpc_request *req, void *data, int rc)
663 struct ldlm_cb_async_args *ca = data;
664 struct ldlm_lock *lock = ca->ca_lock;
665 struct ldlm_cb_set_arg *arg = ca->ca_set_arg;
668 LASSERT(lock != NULL);
670 rc = ldlm_handle_ast_error(lock, req, rc,
671 arg->type == LDLM_BL_CALLBACK
672 ? "blocking" : "completion");
674 cfs_atomic_inc(&arg->restart);
676 LDLM_LOCK_RELEASE(lock);
681 static inline int ldlm_bl_and_cp_ast_tail(struct ptlrpc_request *req,
682 struct ldlm_cb_set_arg *arg,
683 struct ldlm_lock *lock,
689 if (unlikely(instant_cancel)) {
690 rc = ptl_send_rpc(req, 1);
691 ptlrpc_req_finished(req);
693 cfs_atomic_inc(&arg->restart);
696 ptlrpc_set_add_req(arg->set, req);
703 * Check if there are requests in the export request list which prevent
704 * the lock canceling and make these requests high priority ones.
706 static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
708 struct ptlrpc_request *req;
711 if (lock->l_export == NULL) {
712 LDLM_DEBUG(lock, "client lock: no-op");
716 cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
717 cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
719 /* Do not process requests that were not yet added to there
720 * incoming queue or were already removed from there for
722 if (!req->rq_hp && !cfs_list_empty(&req->rq_list) &&
723 req->rq_ops->hpreq_lock_match &&
724 req->rq_ops->hpreq_lock_match(req, lock))
725 ptlrpc_hpreq_reorder(req);
727 cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
732 * ->l_blocking_ast() method for server-side locks. This is invoked when newly
733 * enqueued server lock conflicts with given one.
735 * Sends blocking ast rpc to the client owning that lock; arms timeout timer
736 * to wait for client response.
738 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
739 struct ldlm_lock_desc *desc,
740 void *data, int flag)
742 struct ldlm_cb_async_args *ca;
743 struct ldlm_cb_set_arg *arg = data;
744 struct ldlm_request *body;
745 struct ptlrpc_request *req;
746 int instant_cancel = 0;
750 if (flag == LDLM_CB_CANCELING)
751 /* Don't need to do anything here. */
755 LASSERT(data != NULL);
756 if (lock->l_export->exp_obd->obd_recovering != 0)
757 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
759 ldlm_lock_reorder_req(lock);
761 req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
762 &RQF_LDLM_BL_CALLBACK,
763 LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
767 CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
768 ca = ptlrpc_req_async_args(req);
769 ca->ca_set_arg = arg;
772 req->rq_interpret_reply = ldlm_cb_interpret;
773 req->rq_no_resend = 1;
775 lock_res(lock->l_resource);
776 if (lock->l_granted_mode != lock->l_req_mode) {
777 /* this blocking AST will be communicated as part of the
778 * completion AST instead */
779 unlock_res(lock->l_resource);
780 ptlrpc_req_finished(req);
781 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
785 if (lock->l_destroyed) {
786 /* What's the point? */
787 unlock_res(lock->l_resource);
788 ptlrpc_req_finished(req);
792 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
795 body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
796 body->lock_handle[0] = lock->l_remote_handle;
797 body->lock_desc = *desc;
798 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
800 LDLM_DEBUG(lock, "server preparing blocking AST");
802 ptlrpc_request_set_replen(req);
803 if (instant_cancel) {
804 unlock_res(lock->l_resource);
805 ldlm_lock_cancel(lock);
807 LASSERT(lock->l_granted_mode == lock->l_req_mode);
808 ldlm_add_waiting_lock(lock);
809 unlock_res(lock->l_resource);
812 req->rq_send_state = LUSTRE_IMP_FULL;
813 /* ptlrpc_request_alloc_pack already set timeout */
815 req->rq_timeout = ldlm_get_rq_timeout();
817 if (lock->l_export && lock->l_export->exp_nid_stats &&
818 lock->l_export->exp_nid_stats->nid_ldlm_stats)
819 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
820 LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
822 rc = ldlm_bl_and_cp_ast_tail(req, arg, lock, instant_cancel);
827 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
829 struct ldlm_cb_set_arg *arg = data;
830 struct ldlm_request *body;
831 struct ptlrpc_request *req;
832 struct ldlm_cb_async_args *ca;
833 long total_enqueue_wait;
834 int instant_cancel = 0;
838 LASSERT(lock != NULL);
839 LASSERT(data != NULL);
841 total_enqueue_wait = cfs_time_sub(cfs_time_current_sec(),
842 lock->l_last_activity);
844 req = ptlrpc_request_alloc(lock->l_export->exp_imp_reverse,
845 &RQF_LDLM_CP_CALLBACK);
849 /* server namespace, doesn't need lock */
850 if (lock->l_resource->lr_lvb_len) {
851 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT,
852 lock->l_resource->lr_lvb_len);
855 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
857 ptlrpc_request_free(req);
861 CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
862 ca = ptlrpc_req_async_args(req);
863 ca->ca_set_arg = arg;
866 req->rq_interpret_reply = ldlm_cb_interpret;
867 req->rq_no_resend = 1;
868 body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
870 body->lock_handle[0] = lock->l_remote_handle;
871 body->lock_flags = flags;
872 ldlm_lock2desc(lock, &body->lock_desc);
873 if (lock->l_resource->lr_lvb_len) {
874 void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
876 lock_res(lock->l_resource);
877 memcpy(lvb, lock->l_resource->lr_lvb_data,
878 lock->l_resource->lr_lvb_len);
879 unlock_res(lock->l_resource);
882 LDLM_DEBUG(lock, "server preparing completion AST (after %lds wait)",
885 /* Server-side enqueue wait time estimate, used in
886 __ldlm_add_waiting_lock to set future enqueue timers */
887 if (total_enqueue_wait < ldlm_get_enq_timeout(lock))
888 at_measured(ldlm_lock_to_ns_at(lock),
891 /* bz18618. Don't add lock enqueue time we spend waiting for a
892 previous callback to fail. Locks waiting legitimately will
893 get extended by ldlm_refresh_waiting_lock regardless of the
894 estimate, so it's okay to underestimate here. */
895 LDLM_DEBUG(lock, "lock completed after %lus; estimate was %ds. "
896 "It is likely that a previous callback timed out.",
898 at_get(ldlm_lock_to_ns_at(lock)));
900 ptlrpc_request_set_replen(req);
902 req->rq_send_state = LUSTRE_IMP_FULL;
903 /* ptlrpc_request_pack already set timeout */
905 req->rq_timeout = ldlm_get_rq_timeout();
907 /* We only send real blocking ASTs after the lock is granted */
908 lock_res_and_lock(lock);
909 if (lock->l_flags & LDLM_FL_AST_SENT) {
910 body->lock_flags |= LDLM_FL_AST_SENT;
911 /* copy ast flags like LDLM_FL_DISCARD_DATA */
912 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
914 /* We might get here prior to ldlm_handle_enqueue setting
915 * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
916 * into waiting list, but this is safe and similar code in
917 * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
918 * that would not only cancel the lock, but will also remove
919 * it from waiting list */
920 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
921 unlock_res_and_lock(lock);
922 ldlm_lock_cancel(lock);
924 lock_res_and_lock(lock);
926 /* start the lock-timeout clock */
927 ldlm_add_waiting_lock(lock);
930 unlock_res_and_lock(lock);
932 if (lock->l_export && lock->l_export->exp_nid_stats &&
933 lock->l_export->exp_nid_stats->nid_ldlm_stats)
934 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
935 LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
937 rc = ldlm_bl_and_cp_ast_tail(req, arg, lock, instant_cancel);
942 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
944 struct ldlm_resource *res = lock->l_resource;
945 struct ldlm_request *body;
946 struct ptlrpc_request *req;
950 LASSERT(lock != NULL);
952 req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
953 &RQF_LDLM_GL_CALLBACK,
954 LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK);
959 body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
960 body->lock_handle[0] = lock->l_remote_handle;
961 ldlm_lock2desc(lock, &body->lock_desc);
963 /* server namespace, doesn't need lock */
964 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
965 lock->l_resource->lr_lvb_len);
966 res = lock->l_resource;
967 ptlrpc_request_set_replen(req);
970 req->rq_send_state = LUSTRE_IMP_FULL;
971 /* ptlrpc_request_alloc_pack already set timeout */
973 req->rq_timeout = ldlm_get_rq_timeout();
975 if (lock->l_export && lock->l_export->exp_nid_stats &&
976 lock->l_export->exp_nid_stats->nid_ldlm_stats)
977 lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
978 LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
980 rc = ptlrpc_queue_wait(req);
981 /* Update the LVB from disk if the AST failed (this is a legal race)
983 * - Glimpse callback of local lock just return -ELDLM_NO_LOCK_DATA.
984 * - Glimpse callback of remote lock might return -ELDLM_NO_LOCK_DATA
985 * when inode is cleared. LU-274
987 if (rc == -ELDLM_NO_LOCK_DATA) {
988 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
989 ldlm_res_lvbo_update(res, NULL, 1);
990 } else if (rc != 0) {
991 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
993 rc = ldlm_res_lvbo_update(res, req, 1);
996 ptlrpc_req_finished(req);
998 ldlm_reprocess_all(res);
1003 static void ldlm_svc_get_eopc(const struct ldlm_request *dlm_req,
1004 struct lprocfs_stats *srv_stats)
1006 int lock_type = 0, op = 0;
1008 lock_type = dlm_req->lock_desc.l_resource.lr_type;
1010 switch (lock_type) {
1012 op = PTLRPC_LAST_CNTR + LDLM_PLAIN_ENQUEUE;
1015 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT)
1016 op = PTLRPC_LAST_CNTR + LDLM_GLIMPSE_ENQUEUE;
1018 op = PTLRPC_LAST_CNTR + LDLM_EXTENT_ENQUEUE;
1021 op = PTLRPC_LAST_CNTR + LDLM_FLOCK_ENQUEUE;
1024 op = PTLRPC_LAST_CNTR + LDLM_IBITS_ENQUEUE;
1032 lprocfs_counter_incr(srv_stats, op);
1038 * Main server-side entry point into LDLM. This is called by ptlrpc service
1039 * threads to carry out client lock enqueueing requests.
1041 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
1042 struct ptlrpc_request *req,
1043 const struct ldlm_request *dlm_req,
1044 const struct ldlm_callback_suite *cbs)
1046 struct ldlm_reply *dlm_rep;
1048 ldlm_error_t err = ELDLM_OK;
1049 struct ldlm_lock *lock = NULL;
1050 void *cookie = NULL;
1054 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
1056 ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
1057 flags = dlm_req->lock_flags;
1059 LASSERT(req->rq_export);
1061 if (req->rq_rqbd->rqbd_service->srv_stats)
1062 ldlm_svc_get_eopc(dlm_req,
1063 req->rq_rqbd->rqbd_service->srv_stats);
1065 if (req->rq_export && req->rq_export->exp_nid_stats &&
1066 req->rq_export->exp_nid_stats->nid_ldlm_stats)
1067 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1068 LDLM_ENQUEUE - LDLM_FIRST_OPC);
1070 if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
1071 dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
1072 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
1073 dlm_req->lock_desc.l_resource.lr_type);
1074 GOTO(out, rc = -EFAULT);
1077 if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
1078 dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
1079 dlm_req->lock_desc.l_req_mode &
1080 (dlm_req->lock_desc.l_req_mode-1))) {
1081 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
1082 dlm_req->lock_desc.l_req_mode);
1083 GOTO(out, rc = -EFAULT);
1086 if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
1087 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1089 DEBUG_REQ(D_ERROR, req,
1090 "PLAIN lock request from IBITS client?");
1091 GOTO(out, rc = -EPROTO);
1093 } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
1095 DEBUG_REQ(D_ERROR, req,
1096 "IBITS lock request from unaware client?");
1097 GOTO(out, rc = -EPROTO);
1101 /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
1102 against server's _CONNECT_SUPPORTED flags? (I don't want to use
1103 ibits for mgc/mgs) */
1105 /* INODEBITS_INTEROP: Perform conversion from plain lock to
1106 * inodebits lock if client does not support them. */
1107 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
1108 (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
1109 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
1110 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
1111 MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
1112 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
1113 dlm_req->lock_desc.l_req_mode = LCK_CR;
1117 if (unlikely(flags & LDLM_FL_REPLAY)) {
1118 /* Find an existing lock in the per-export lock hash */
1119 lock = cfs_hash_lookup(req->rq_export->exp_lock_hash,
1120 (void *)&dlm_req->lock_handle[0]);
1122 DEBUG_REQ(D_DLMTRACE, req, "found existing lock cookie "
1123 LPX64, lock->l_handle.h_cookie);
1124 GOTO(existing_lock, rc = 0);
1128 /* The lock's callback data might be set in the policy function */
1129 lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
1130 dlm_req->lock_desc.l_resource.lr_type,
1131 dlm_req->lock_desc.l_req_mode,
1135 GOTO(out, rc = -ENOMEM);
1137 lock->l_last_activity = cfs_time_current_sec();
1138 lock->l_remote_handle = dlm_req->lock_handle[0];
1139 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
1141 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
1142 /* Don't enqueue a lock onto the export if it is been disonnected
1143 * due to eviction (bug 3822) or server umount (bug 24324).
1144 * Cancel it now instead. */
1145 if (req->rq_export->exp_disconnected) {
1146 LDLM_ERROR(lock, "lock on disconnected export %p",
1148 GOTO(out, rc = -ENOTCONN);
1151 lock->l_export = class_export_lock_get(req->rq_export, lock);
1152 if (lock->l_export->exp_lock_hash)
1153 cfs_hash_add(lock->l_export->exp_lock_hash,
1154 &lock->l_remote_handle,
1159 if (flags & LDLM_FL_HAS_INTENT) {
1160 /* In this case, the reply buffer is allocated deep in
1161 * local_lock_enqueue by the policy function. */
1164 /* based on the assumption that lvb size never changes during
1165 * resource life time otherwise it need resource->lr_lock's
1167 if (lock->l_resource->lr_lvb_len) {
1168 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
1170 lock->l_resource->lr_lvb_len);
1173 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
1174 GOTO(out, rc = -ENOMEM);
1176 rc = req_capsule_server_pack(&req->rq_pill);
1181 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
1182 ldlm_convert_policy_to_local(req->rq_export,
1183 dlm_req->lock_desc.l_resource.lr_type,
1184 &dlm_req->lock_desc.l_policy_data,
1185 &lock->l_policy_data);
1186 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
1187 lock->l_req_extent = lock->l_policy_data.l_extent;
1189 err = ldlm_lock_enqueue(ns, &lock, cookie, (int *)&flags);
1193 dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1194 dlm_rep->lock_flags = flags;
1196 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
1197 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
1199 /* We never send a blocking AST until the lock is granted, but
1200 * we can tell it right now */
1201 lock_res_and_lock(lock);
1203 /* Now take into account flags to be inherited from original lock
1204 request both in reply to client and in our own lock flags. */
1205 dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1206 lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
1208 /* Don't move a pending lock onto the export if it has already been
1209 * disconnected due to eviction (bug 5683) or server umount (bug 24324).
1210 * Cancel it now instead. */
1211 if (unlikely(req->rq_export->exp_disconnected ||
1212 OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
1213 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
1215 } else if (lock->l_flags & LDLM_FL_AST_SENT) {
1216 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
1217 if (lock->l_granted_mode == lock->l_req_mode) {
1219 * Only cancel lock if it was granted, because it would
1220 * be destroyed immediately and would never be granted
1221 * in the future, causing timeouts on client. Not
1222 * granted lock will be cancelled immediately after
1223 * sending completion AST.
1225 if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
1226 unlock_res_and_lock(lock);
1227 ldlm_lock_cancel(lock);
1228 lock_res_and_lock(lock);
1230 ldlm_add_waiting_lock(lock);
1233 /* Make sure we never ever grant usual metadata locks to liblustre
1235 if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
1236 dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
1237 req->rq_export->exp_libclient) {
1238 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
1239 !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
1240 CERROR("Granting sync lock to libclient. "
1241 "req fl %d, rep fl %d, lock fl "LPX64"\n",
1242 dlm_req->lock_flags, dlm_rep->lock_flags,
1244 LDLM_ERROR(lock, "sync lock");
1245 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
1246 struct ldlm_intent *it;
1248 it = req_capsule_client_get(&req->rq_pill,
1251 CERROR("This is intent %s ("LPU64")\n",
1252 ldlm_it2str(it->opc), it->opc);
1258 unlock_res_and_lock(lock);
1262 req->rq_status = rc ?: err; /* return either error - bug 11190 */
1263 if (!req->rq_packed_final) {
1264 err = lustre_pack_reply(req, 1, NULL, NULL);
1269 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
1270 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
1272 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
1273 "(err=%d, rc=%d)", err, rc);
1276 if (lock->l_resource->lr_lvb_len > 0) {
1277 /* MDT path won't handle lr_lvb_data, so
1278 * lock/unlock better be contained in the
1282 lvb = req_capsule_server_get(&req->rq_pill,
1284 LASSERTF(lvb != NULL, "req %p, lock %p\n",
1286 lock_res(lock->l_resource);
1287 memcpy(lvb, lock->l_resource->lr_lvb_data,
1288 lock->l_resource->lr_lvb_len);
1289 unlock_res(lock->l_resource);
1292 lock_res_and_lock(lock);
1293 ldlm_resource_unlink_lock(lock);
1294 ldlm_lock_destroy_nolock(lock);
1295 unlock_res_and_lock(lock);
1298 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1299 ldlm_reprocess_all(lock->l_resource);
1301 LDLM_LOCK_RELEASE(lock);
1304 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1310 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1311 ldlm_completion_callback completion_callback,
1312 ldlm_blocking_callback blocking_callback,
1313 ldlm_glimpse_callback glimpse_callback)
1315 struct ldlm_request *dlm_req;
1316 struct ldlm_callback_suite cbs = {
1317 .lcs_completion = completion_callback,
1318 .lcs_blocking = blocking_callback,
1319 .lcs_glimpse = glimpse_callback
1323 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1324 if (dlm_req != NULL) {
1325 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1326 req, dlm_req, &cbs);
1333 int ldlm_handle_convert0(struct ptlrpc_request *req,
1334 const struct ldlm_request *dlm_req)
1336 struct ldlm_reply *dlm_rep;
1337 struct ldlm_lock *lock;
1341 if (req->rq_export && req->rq_export->exp_nid_stats &&
1342 req->rq_export->exp_nid_stats->nid_ldlm_stats)
1343 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1344 LDLM_CONVERT - LDLM_FIRST_OPC);
1346 rc = req_capsule_server_pack(&req->rq_pill);
1350 dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1351 dlm_rep->lock_flags = dlm_req->lock_flags;
1353 lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1355 req->rq_status = EINVAL;
1359 LDLM_DEBUG(lock, "server-side convert handler START");
1361 lock->l_last_activity = cfs_time_current_sec();
1362 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1363 &dlm_rep->lock_flags);
1365 if (ldlm_del_waiting_lock(lock))
1366 LDLM_DEBUG(lock, "converted waiting lock");
1369 req->rq_status = EDEADLOCK;
1374 if (!req->rq_status)
1375 ldlm_reprocess_all(lock->l_resource);
1376 LDLM_DEBUG(lock, "server-side convert handler END");
1377 LDLM_LOCK_PUT(lock);
1379 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1384 int ldlm_handle_convert(struct ptlrpc_request *req)
1387 struct ldlm_request *dlm_req;
1389 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1390 if (dlm_req != NULL) {
1391 rc = ldlm_handle_convert0(req, dlm_req);
1393 CERROR ("Can't unpack dlm_req\n");
1399 /* Cancel all the locks whos handles are packed into ldlm_request */
1400 int ldlm_request_cancel(struct ptlrpc_request *req,
1401 const struct ldlm_request *dlm_req, int first)
1403 struct ldlm_resource *res, *pres = NULL;
1404 struct ldlm_lock *lock;
1405 int i, count, done = 0;
1408 count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1412 /* There is no lock on the server at the replay time,
1413 * skip lock cancelling to make replay tests to pass. */
1414 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1417 LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
1418 "starting at %d", count, first);
1420 for (i = first; i < count; i++) {
1421 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1423 LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1424 "lock (cookie "LPU64")",
1425 dlm_req->lock_handle[i].cookie);
1429 res = lock->l_resource;
1434 ldlm_reprocess_all(pres);
1435 LDLM_RESOURCE_DELREF(pres);
1436 ldlm_resource_putref(pres);
1439 ldlm_resource_getref(res);
1440 LDLM_RESOURCE_ADDREF(res);
1441 ldlm_res_lvbo_update(res, NULL, 1);
1445 ldlm_lock_cancel(lock);
1446 LDLM_LOCK_PUT(lock);
1449 ldlm_reprocess_all(pres);
1450 LDLM_RESOURCE_DELREF(pres);
1451 ldlm_resource_putref(pres);
1453 LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1457 int ldlm_handle_cancel(struct ptlrpc_request *req)
1459 struct ldlm_request *dlm_req;
1463 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1464 if (dlm_req == NULL) {
1465 CDEBUG(D_INFO, "bad request buffer for cancel\n");
1469 if (req->rq_export && req->rq_export->exp_nid_stats &&
1470 req->rq_export->exp_nid_stats->nid_ldlm_stats)
1471 lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
1472 LDLM_CANCEL - LDLM_FIRST_OPC);
1474 rc = req_capsule_server_pack(&req->rq_pill);
1478 if (!ldlm_request_cancel(req, dlm_req, 0))
1479 req->rq_status = ESTALE;
1481 RETURN(ptlrpc_reply(req));
1483 #endif /* HAVE_SERVER_SUPPORT */
1485 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1486 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1491 LDLM_DEBUG(lock, "client blocking AST callback handler");
1493 lock_res_and_lock(lock);
1494 lock->l_flags |= LDLM_FL_CBPENDING;
1496 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1497 lock->l_flags |= LDLM_FL_CANCEL;
1499 do_ast = (!lock->l_readers && !lock->l_writers);
1500 unlock_res_and_lock(lock);
1503 CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
1504 lock, lock->l_blocking_ast);
1505 if (lock->l_blocking_ast != NULL)
1506 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1509 CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
1513 LDLM_DEBUG(lock, "client blocking callback handler END");
1514 LDLM_LOCK_RELEASE(lock);
1518 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1519 struct ldlm_namespace *ns,
1520 struct ldlm_request *dlm_req,
1521 struct ldlm_lock *lock)
1523 CFS_LIST_HEAD(ast_list);
1526 LDLM_DEBUG(lock, "client completion callback handler START");
1528 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
1529 int to = cfs_time_seconds(1);
1531 cfs_schedule_timeout_and_set_state(
1532 CFS_TASK_INTERRUPTIBLE, to);
1533 if (lock->l_granted_mode == lock->l_req_mode ||
1539 lock_res_and_lock(lock);
1540 if (lock->l_destroyed ||
1541 lock->l_granted_mode == lock->l_req_mode) {
1542 /* bug 11300: the lock has already been granted */
1543 unlock_res_and_lock(lock);
1544 LDLM_DEBUG(lock, "Double grant race happened");
1545 LDLM_LOCK_RELEASE(lock);
1550 /* If we receive the completion AST before the actual enqueue returned,
1551 * then we might need to switch lock modes, resources, or extents. */
1552 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1553 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1554 LDLM_DEBUG(lock, "completion AST, new lock mode");
1557 if (lock->l_resource->lr_type != LDLM_PLAIN) {
1558 ldlm_convert_policy_to_local(req->rq_export,
1559 dlm_req->lock_desc.l_resource.lr_type,
1560 &dlm_req->lock_desc.l_policy_data,
1561 &lock->l_policy_data);
1562 LDLM_DEBUG(lock, "completion AST, new policy data");
1565 ldlm_resource_unlink_lock(lock);
1566 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1567 &lock->l_resource->lr_name,
1568 sizeof(lock->l_resource->lr_name)) != 0) {
1569 unlock_res_and_lock(lock);
1570 if (ldlm_lock_change_resource(ns, lock,
1571 &dlm_req->lock_desc.l_resource.lr_name) != 0) {
1572 LDLM_ERROR(lock, "Failed to allocate resource");
1573 LDLM_LOCK_RELEASE(lock);
1577 LDLM_DEBUG(lock, "completion AST, new resource");
1578 CERROR("change resource!\n");
1579 lock_res_and_lock(lock);
1582 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1583 /* BL_AST locks are not needed in lru.
1584 * let ldlm_cancel_lru() be fast. */
1585 ldlm_lock_remove_from_lru(lock);
1586 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
1587 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1590 if (lock->l_lvb_len) {
1591 if (req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
1592 RCL_CLIENT) < lock->l_lvb_len) {
1593 LDLM_ERROR(lock, "completion AST did not contain "
1596 void *lvb = req_capsule_client_get(&req->rq_pill,
1598 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1602 ldlm_grant_lock(lock, &ast_list);
1603 unlock_res_and_lock(lock);
1605 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1607 /* Let Enqueue to call osc_lock_upcall() and initialize
1609 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
1611 ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
1613 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1615 LDLM_LOCK_RELEASE(lock);
1619 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1620 struct ldlm_namespace *ns,
1621 struct ldlm_request *dlm_req,
1622 struct ldlm_lock *lock)
1627 LDLM_DEBUG(lock, "client glimpse AST callback handler");
1629 if (lock->l_glimpse_ast != NULL)
1630 rc = lock->l_glimpse_ast(lock, req);
1632 if (req->rq_repmsg != NULL) {
1635 req->rq_status = rc;
1639 lock_res_and_lock(lock);
1640 if (lock->l_granted_mode == LCK_PW &&
1641 !lock->l_readers && !lock->l_writers &&
1642 cfs_time_after(cfs_time_current(),
1643 cfs_time_add(lock->l_last_used,
1644 cfs_time_seconds(10)))) {
1645 unlock_res_and_lock(lock);
1646 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
1647 ldlm_handle_bl_callback(ns, NULL, lock);
1652 unlock_res_and_lock(lock);
1653 LDLM_LOCK_RELEASE(lock);
1657 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1659 if (req->rq_no_reply)
1662 req->rq_status = rc;
1663 if (!req->rq_packed_final) {
1664 rc = lustre_pack_reply(req, 1, NULL, NULL);
1668 return ptlrpc_reply(req);
1672 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode)
1674 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1677 cfs_spin_lock(&blp->blp_lock);
1678 if (blwi->blwi_lock && blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
1679 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
1680 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
1682 /* other blocking callbacks are added to the regular list */
1683 cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1685 cfs_spin_unlock(&blp->blp_lock);
1687 cfs_waitq_signal(&blp->blp_waitq);
1689 /* can not use blwi->blwi_mode as blwi could be already freed in
1691 if (mode == LDLM_SYNC)
1692 cfs_wait_for_completion(&blwi->blwi_comp);
1697 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
1698 struct ldlm_namespace *ns,
1699 struct ldlm_lock_desc *ld,
1700 cfs_list_t *cancels, int count,
1701 struct ldlm_lock *lock,
1704 cfs_init_completion(&blwi->blwi_comp);
1705 CFS_INIT_LIST_HEAD(&blwi->blwi_head);
1707 if (cfs_memory_pressure_get())
1708 blwi->blwi_mem_pressure = 1;
1711 blwi->blwi_mode = mode;
1713 blwi->blwi_ld = *ld;
1715 cfs_list_add(&blwi->blwi_head, cancels);
1716 cfs_list_del_init(cancels);
1717 blwi->blwi_count = count;
1719 blwi->blwi_lock = lock;
1723 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
1724 struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
1725 cfs_list_t *cancels, int count, int mode)
1729 if (cancels && count == 0)
1732 if (mode == LDLM_SYNC) {
1733 /* if it is synchronous call do minimum mem alloc, as it could
1734 * be triggered from kernel shrinker
1736 struct ldlm_bl_work_item blwi;
1737 memset(&blwi, 0, sizeof(blwi));
1738 init_blwi(&blwi, ns, ld, cancels, count, lock, LDLM_SYNC);
1739 RETURN(__ldlm_bl_to_thread(&blwi, LDLM_SYNC));
1741 struct ldlm_bl_work_item *blwi;
1742 OBD_ALLOC(blwi, sizeof(*blwi));
1745 init_blwi(blwi, ns, ld, cancels, count, lock, LDLM_ASYNC);
1747 RETURN(__ldlm_bl_to_thread(blwi, LDLM_ASYNC));
1753 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1754 struct ldlm_lock *lock)
1757 RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LDLM_ASYNC));
1763 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1764 cfs_list_t *cancels, int count, int mode)
1767 RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count, mode));
1773 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
1774 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
1776 struct obd_device *obd = req->rq_export->exp_obd;
1783 DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
1785 req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
1787 key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1789 DEBUG_REQ(D_IOCTL, req, "no set_info key");
1792 keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
1794 val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1796 DEBUG_REQ(D_IOCTL, req, "no set_info val");
1799 vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
1802 /* We are responsible for swabbing contents of val */
1804 if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
1805 /* Pass it on to mdc (the "export" in this case) */
1806 rc = obd_set_info_async(req->rq_svc_thread->t_env,
1808 sizeof(KEY_HSM_COPYTOOL_SEND),
1809 KEY_HSM_COPYTOOL_SEND,
1812 DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
1817 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
1818 const char *msg, int rc,
1819 struct lustre_handle *handle)
1821 DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
1822 "%s: [nid %s] [rc %d] [lock "LPX64"]",
1823 msg, libcfs_id2str(req->rq_peer), rc,
1824 handle ? handle->cookie : 0);
1825 if (req->rq_no_reply)
1826 CWARN("No reply was sent, maybe cause bug 21636.\n");
1828 CWARN("Send reply failed, maybe cause bug 21636.\n");
1831 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
1832 static int ldlm_callback_handler(struct ptlrpc_request *req)
1834 struct ldlm_namespace *ns;
1835 struct ldlm_request *dlm_req;
1836 struct ldlm_lock *lock;
1840 /* Requests arrive in sender's byte order. The ptlrpc service
1841 * handler has already checked and, if necessary, byte-swapped the
1842 * incoming request message body, but I am responsible for the
1843 * message buffers. */
1845 /* do nothing for sec context finalize */
1846 if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
1849 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
1851 if (req->rq_export == NULL) {
1852 rc = ldlm_callback_reply(req, -ENOTCONN);
1853 ldlm_callback_errmsg(req, "Operate on unconnected server",
1858 LASSERT(req->rq_export != NULL);
1859 LASSERT(req->rq_export->exp_obd != NULL);
1861 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1862 case LDLM_BL_CALLBACK:
1863 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK))
1866 case LDLM_CP_CALLBACK:
1867 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK))
1870 case LDLM_GL_CALLBACK:
1871 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK))
1875 rc = ldlm_handle_setinfo(req);
1876 ldlm_callback_reply(req, rc);
1878 case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
1879 CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
1880 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
1881 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
1883 rc = llog_origin_handle_cancel(req);
1884 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
1886 ldlm_callback_reply(req, rc);
1888 case OBD_QC_CALLBACK:
1889 req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
1890 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
1892 rc = target_handle_qc_callback(req);
1893 ldlm_callback_reply(req, rc);
1897 /* reply in handler */
1898 req_capsule_set(&req->rq_pill, &RQF_MDS_QUOTA_DQACQ);
1899 rc = target_handle_dqacq_callback(req);
1901 case LLOG_ORIGIN_HANDLE_CREATE:
1902 req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
1903 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1905 rc = llog_origin_handle_create(req);
1906 ldlm_callback_reply(req, rc);
1908 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1909 req_capsule_set(&req->rq_pill,
1910 &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
1911 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1913 rc = llog_origin_handle_next_block(req);
1914 ldlm_callback_reply(req, rc);
1916 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1917 req_capsule_set(&req->rq_pill,
1918 &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
1919 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1921 rc = llog_origin_handle_read_header(req);
1922 ldlm_callback_reply(req, rc);
1924 case LLOG_ORIGIN_HANDLE_CLOSE:
1925 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
1927 rc = llog_origin_handle_close(req);
1928 ldlm_callback_reply(req, rc);
1931 CERROR("unknown opcode %u\n",
1932 lustre_msg_get_opc(req->rq_reqmsg));
1933 ldlm_callback_reply(req, -EPROTO);
1937 ns = req->rq_export->exp_obd->obd_namespace;
1938 LASSERT(ns != NULL);
1940 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
1942 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1943 if (dlm_req == NULL) {
1944 rc = ldlm_callback_reply(req, -EPROTO);
1945 ldlm_callback_errmsg(req, "Operate without parameter", rc,
1950 /* Force a known safe race, send a cancel to the server for a lock
1951 * which the server has already started a blocking callback on. */
1952 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
1953 lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
1954 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0]);
1956 CERROR("ldlm_cli_cancel: %d\n", rc);
1959 lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
1961 CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
1962 "disappeared\n", dlm_req->lock_handle[0].cookie);
1963 rc = ldlm_callback_reply(req, -EINVAL);
1964 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
1965 &dlm_req->lock_handle[0]);
1969 if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
1970 lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
1971 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
1973 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1974 lock_res_and_lock(lock);
1975 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1976 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
1977 /* If somebody cancels lock and cache is already dropped,
1978 * or lock is failed before cp_ast received on client,
1979 * we can tell the server we have no lock. Otherwise, we
1980 * should send cancel after dropping the cache. */
1981 if (((lock->l_flags & LDLM_FL_CANCELING) &&
1982 (lock->l_flags & LDLM_FL_BL_DONE)) ||
1983 (lock->l_flags & LDLM_FL_FAILED)) {
1984 LDLM_DEBUG(lock, "callback on lock "
1985 LPX64" - lock disappeared\n",
1986 dlm_req->lock_handle[0].cookie);
1987 unlock_res_and_lock(lock);
1988 LDLM_LOCK_RELEASE(lock);
1989 rc = ldlm_callback_reply(req, -EINVAL);
1990 ldlm_callback_errmsg(req, "Operate on stale lock", rc,
1991 &dlm_req->lock_handle[0]);
1994 /* BL_AST locks are not needed in lru.
1995 * let ldlm_cancel_lru() be fast. */
1996 ldlm_lock_remove_from_lru(lock);
1997 lock->l_flags |= LDLM_FL_BL_AST;
1999 unlock_res_and_lock(lock);
2001 /* We want the ost thread to get this reply so that it can respond
2002 * to ost requests (write cache writeback) that might be triggered
2005 * But we'd also like to be able to indicate in the reply that we're
2006 * cancelling right now, because it's unused, or have an intent result
2007 * in the reply, so we might have to push the responsibility for sending
2008 * the reply down into the AST handlers, alas. */
2010 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2011 case LDLM_BL_CALLBACK:
2012 CDEBUG(D_INODE, "blocking ast\n");
2013 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
2014 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
2015 rc = ldlm_callback_reply(req, 0);
2016 if (req->rq_no_reply || rc)
2017 ldlm_callback_errmsg(req, "Normal process", rc,
2018 &dlm_req->lock_handle[0]);
2020 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
2021 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
2023 case LDLM_CP_CALLBACK:
2024 CDEBUG(D_INODE, "completion ast\n");
2025 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
2026 ldlm_callback_reply(req, 0);
2027 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
2029 case LDLM_GL_CALLBACK:
2030 CDEBUG(D_INODE, "glimpse ast\n");
2031 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
2032 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
2035 LBUG(); /* checked above */
2041 #ifdef HAVE_SERVER_SUPPORT
2042 static int ldlm_cancel_handler(struct ptlrpc_request *req)
2047 /* Requests arrive in sender's byte order. The ptlrpc service
2048 * handler has already checked and, if necessary, byte-swapped the
2049 * incoming request message body, but I am responsible for the
2050 * message buffers. */
2052 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2054 if (req->rq_export == NULL) {
2055 struct ldlm_request *dlm_req;
2057 CERROR("%s from %s arrived at %lu with bad export cookie "
2059 ll_opcode2str(lustre_msg_get_opc(req->rq_reqmsg)),
2060 libcfs_nid2str(req->rq_peer.nid),
2061 req->rq_arrival_time.tv_sec,
2062 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
2064 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_CANCEL) {
2065 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2066 dlm_req = req_capsule_client_get(&req->rq_pill,
2068 if (dlm_req != NULL)
2069 ldlm_lock_dump_handle(D_ERROR,
2070 &dlm_req->lock_handle[0]);
2072 ldlm_callback_reply(req, -ENOTCONN);
2076 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2078 /* XXX FIXME move this back to mds/handler.c, bug 249 */
2080 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2081 CDEBUG(D_INODE, "cancel\n");
2082 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL))
2084 rc = ldlm_handle_cancel(req);
2088 case OBD_LOG_CANCEL:
2089 req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
2090 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
2092 rc = llog_origin_handle_cancel(req);
2093 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
2095 ldlm_callback_reply(req, rc);
2098 CERROR("invalid opcode %d\n",
2099 lustre_msg_get_opc(req->rq_reqmsg));
2100 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
2101 ldlm_callback_reply(req, -EINVAL);
2107 static int ldlm_cancel_hpreq_lock_match(struct ptlrpc_request *req,
2108 struct ldlm_lock *lock)
2110 struct ldlm_request *dlm_req;
2111 struct lustre_handle lockh;
2116 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2117 if (dlm_req == NULL)
2120 ldlm_lock2handle(lock, &lockh);
2121 for (i = 0; i < dlm_req->lock_count; i++) {
2122 if (lustre_handle_equal(&dlm_req->lock_handle[i],
2124 DEBUG_REQ(D_RPCTRACE, req,
2125 "Prio raised by lock "LPX64".", lockh.cookie);
2136 static int ldlm_cancel_hpreq_check(struct ptlrpc_request *req)
2138 struct ldlm_request *dlm_req;
2143 /* no prolong in recovery */
2144 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
2147 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
2148 if (dlm_req == NULL)
2151 for (i = 0; i < dlm_req->lock_count; i++) {
2152 struct ldlm_lock *lock;
2154 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
2158 rc = !!(lock->l_flags & LDLM_FL_AST_SENT);
2160 LDLM_DEBUG(lock, "hpreq cancel lock");
2161 LDLM_LOCK_PUT(lock);
2170 static struct ptlrpc_hpreq_ops ldlm_cancel_hpreq_ops = {
2171 .hpreq_lock_match = ldlm_cancel_hpreq_lock_match,
2172 .hpreq_check = ldlm_cancel_hpreq_check
2175 static int ldlm_hpreq_handler(struct ptlrpc_request *req)
2179 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2181 if (req->rq_export == NULL)
2184 if (LDLM_CANCEL == lustre_msg_get_opc(req->rq_reqmsg)) {
2185 req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
2186 req->rq_ops = &ldlm_cancel_hpreq_ops;
2191 int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
2192 cfs_hlist_node_t *hnode, void *data)
2195 cfs_list_t *rpc_list = data;
2196 struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
2198 lock_res_and_lock(lock);
2200 if (lock->l_req_mode != lock->l_granted_mode) {
2201 unlock_res_and_lock(lock);
2205 LASSERT(lock->l_resource);
2206 if (lock->l_resource->lr_type != LDLM_IBITS &&
2207 lock->l_resource->lr_type != LDLM_PLAIN) {
2208 unlock_res_and_lock(lock);
2212 if (lock->l_flags & LDLM_FL_AST_SENT) {
2213 unlock_res_and_lock(lock);
2217 LASSERT(lock->l_blocking_ast);
2218 LASSERT(!lock->l_blocking_lock);
2220 lock->l_flags |= LDLM_FL_AST_SENT;
2221 if (lock->l_export && lock->l_export->exp_lock_hash &&
2222 !cfs_hlist_unhashed(&lock->l_exp_hash))
2223 cfs_hash_del(lock->l_export->exp_lock_hash,
2224 &lock->l_remote_handle, &lock->l_exp_hash);
2225 cfs_list_add_tail(&lock->l_rk_ast, rpc_list);
2226 LDLM_LOCK_GET(lock);
2228 unlock_res_and_lock(lock);
2232 void ldlm_revoke_export_locks(struct obd_export *exp)
2234 cfs_list_t rpc_list;
2237 CFS_INIT_LIST_HEAD(&rpc_list);
2238 cfs_hash_for_each_empty(exp->exp_lock_hash,
2239 ldlm_revoke_lock_cb, &rpc_list);
2240 ldlm_run_ast_work(exp->exp_obd->obd_namespace, &rpc_list,
2241 LDLM_WORK_REVOKE_AST);
2245 #endif /* HAVE_SERVER_SUPPORT */
2248 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
2250 struct ldlm_bl_work_item *blwi = NULL;
2251 static unsigned int num_bl = 0;
2253 cfs_spin_lock(&blp->blp_lock);
2254 /* process a request from the blp_list at least every blp_num_threads */
2255 if (!cfs_list_empty(&blp->blp_list) &&
2256 (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
2257 blwi = cfs_list_entry(blp->blp_list.next,
2258 struct ldlm_bl_work_item, blwi_entry);
2260 if (!cfs_list_empty(&blp->blp_prio_list))
2261 blwi = cfs_list_entry(blp->blp_prio_list.next,
2262 struct ldlm_bl_work_item,
2266 if (++num_bl >= cfs_atomic_read(&blp->blp_num_threads))
2268 cfs_list_del(&blwi->blwi_entry);
2270 cfs_spin_unlock(&blp->blp_lock);
2275 /* This only contains temporary data until the thread starts */
2276 struct ldlm_bl_thread_data {
2277 char bltd_name[CFS_CURPROC_COMM_MAX];
2278 struct ldlm_bl_pool *bltd_blp;
2279 cfs_completion_t bltd_comp;
2283 static int ldlm_bl_thread_main(void *arg);
2285 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
2287 struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
2290 cfs_init_completion(&bltd.bltd_comp);
2291 rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
2293 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
2294 cfs_atomic_read(&blp->blp_num_threads), rc);
2297 cfs_wait_for_completion(&bltd.bltd_comp);
2302 static int ldlm_bl_thread_main(void *arg)
2304 struct ldlm_bl_pool *blp;
2308 struct ldlm_bl_thread_data *bltd = arg;
2310 blp = bltd->bltd_blp;
2313 cfs_atomic_inc_return(&blp->blp_num_threads) - 1;
2314 cfs_atomic_inc(&blp->blp_busy_threads);
2316 snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
2317 "ldlm_bl_%02d", bltd->bltd_num);
2318 cfs_daemonize(bltd->bltd_name);
2320 cfs_complete(&bltd->bltd_comp);
2321 /* cannot use bltd after this, it is only on caller's stack */
2325 struct l_wait_info lwi = { 0 };
2326 struct ldlm_bl_work_item *blwi = NULL;
2329 blwi = ldlm_bl_get_work(blp);
2332 cfs_atomic_dec(&blp->blp_busy_threads);
2333 l_wait_event_exclusive(blp->blp_waitq,
2334 (blwi = ldlm_bl_get_work(blp)) != NULL,
2336 busy = cfs_atomic_inc_return(&blp->blp_busy_threads);
2338 busy = cfs_atomic_read(&blp->blp_busy_threads);
2341 if (blwi->blwi_ns == NULL)
2342 /* added by ldlm_cleanup() */
2345 /* Not fatal if racy and have a few too many threads */
2346 if (unlikely(busy < blp->blp_max_threads &&
2347 busy >= cfs_atomic_read(&blp->blp_num_threads) &&
2348 !blwi->blwi_mem_pressure))
2349 /* discard the return value, we tried */
2350 ldlm_bl_thread_start(blp);
2352 if (blwi->blwi_mem_pressure)
2353 cfs_memory_pressure_set();
2355 if (blwi->blwi_count) {
2357 /* The special case when we cancel locks in lru
2358 * asynchronously, we pass the list of locks here.
2359 * Thus locks are marked LDLM_FL_CANCELING, but NOT
2360 * canceled locally yet. */
2361 count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
2364 ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0);
2366 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
2369 if (blwi->blwi_mem_pressure)
2370 cfs_memory_pressure_clr();
2372 if (blwi->blwi_mode == LDLM_ASYNC)
2373 OBD_FREE(blwi, sizeof(*blwi));
2375 cfs_complete(&blwi->blwi_comp);
2378 cfs_atomic_dec(&blp->blp_busy_threads);
2379 cfs_atomic_dec(&blp->blp_num_threads);
2380 cfs_complete(&blp->blp_comp);
2386 static int ldlm_setup(void);
2387 static int ldlm_cleanup(void);
2389 int ldlm_get_ref(void)
2393 cfs_mutex_lock(&ldlm_ref_mutex);
2394 if (++ldlm_refcount == 1) {
2399 cfs_mutex_unlock(&ldlm_ref_mutex);
2404 void ldlm_put_ref(void)
2407 cfs_mutex_lock(&ldlm_ref_mutex);
2408 if (ldlm_refcount == 1) {
2409 int rc = ldlm_cleanup();
2411 CERROR("ldlm_cleanup failed: %d\n", rc);
2417 cfs_mutex_unlock(&ldlm_ref_mutex);
2423 * Export handle<->lock hash operations.
2426 ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
2428 return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
2432 ldlm_export_lock_key(cfs_hlist_node_t *hnode)
2434 struct ldlm_lock *lock;
2436 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2437 return &lock->l_remote_handle;
2441 ldlm_export_lock_keycpy(cfs_hlist_node_t *hnode, void *key)
2443 struct ldlm_lock *lock;
2445 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2446 lock->l_remote_handle = *(struct lustre_handle *)key;
2450 ldlm_export_lock_keycmp(const void *key, cfs_hlist_node_t *hnode)
2452 return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
2456 ldlm_export_lock_object(cfs_hlist_node_t *hnode)
2458 return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2462 ldlm_export_lock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2464 struct ldlm_lock *lock;
2466 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2467 LDLM_LOCK_GET(lock);
2471 ldlm_export_lock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
2473 struct ldlm_lock *lock;
2475 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
2476 LDLM_LOCK_RELEASE(lock);
2479 static cfs_hash_ops_t ldlm_export_lock_ops = {
2480 .hs_hash = ldlm_export_lock_hash,
2481 .hs_key = ldlm_export_lock_key,
2482 .hs_keycmp = ldlm_export_lock_keycmp,
2483 .hs_keycpy = ldlm_export_lock_keycpy,
2484 .hs_object = ldlm_export_lock_object,
2485 .hs_get = ldlm_export_lock_get,
2486 .hs_put = ldlm_export_lock_put,
2487 .hs_put_locked = ldlm_export_lock_put,
2490 int ldlm_init_export(struct obd_export *exp)
2494 exp->exp_lock_hash =
2495 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
2496 HASH_EXP_LOCK_CUR_BITS,
2497 HASH_EXP_LOCK_MAX_BITS,
2498 HASH_EXP_LOCK_BKT_BITS, 0,
2499 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
2500 &ldlm_export_lock_ops,
2501 CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
2502 CFS_HASH_NBLK_CHANGE);
2504 if (!exp->exp_lock_hash)
2509 EXPORT_SYMBOL(ldlm_init_export);
2511 void ldlm_destroy_export(struct obd_export *exp)
2514 cfs_hash_putref(exp->exp_lock_hash);
2515 exp->exp_lock_hash = NULL;
2518 EXPORT_SYMBOL(ldlm_destroy_export);
2520 static int ldlm_setup(void)
2522 static struct ptlrpc_service_conf conf;
2523 struct ldlm_bl_pool *blp = NULL;
2530 if (ldlm_state != NULL)
2533 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
2534 if (ldlm_state == NULL)
2538 rc = ldlm_proc_setup();
2543 memset(&conf, 0, sizeof(conf));
2544 conf = (typeof(conf)) {
2545 .psc_name = "ldlm_cbd",
2546 .psc_watchdog_factor = 2,
2548 .bc_nbufs = LDLM_NBUFS,
2549 .bc_buf_size = LDLM_BUFSIZE,
2550 .bc_req_max_size = LDLM_MAXREQSIZE,
2551 .bc_rep_max_size = LDLM_MAXREPSIZE,
2552 .bc_req_portal = LDLM_CB_REQUEST_PORTAL,
2553 .bc_rep_portal = LDLM_CB_REPLY_PORTAL,
2556 .tc_thr_name = "ldlm_cb",
2557 .tc_nthrs_min = LDLM_THREADS_AUTO_MIN,
2558 .tc_nthrs_max = LDLM_THREADS_AUTO_MAX,
2559 .tc_nthrs_user = ldlm_num_threads,
2560 .tc_ctx_tags = LCT_MD_THREAD | \
2564 .so_req_handler = ldlm_callback_handler,
2567 ldlm_state->ldlm_cb_service = \
2568 ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
2569 if (IS_ERR(ldlm_state->ldlm_cb_service)) {
2570 CERROR("failed to start service\n");
2571 rc = PTR_ERR(ldlm_state->ldlm_cb_service);
2572 ldlm_state->ldlm_cb_service = NULL;
2576 #ifdef HAVE_SERVER_SUPPORT
2577 memset(&conf, 0, sizeof(conf));
2578 conf = (typeof(conf)) {
2579 .psc_name = "ldlm_canceld",
2580 .psc_watchdog_factor = 6,
2582 .bc_nbufs = LDLM_NBUFS,
2583 .bc_buf_size = LDLM_BUFSIZE,
2584 .bc_req_max_size = LDLM_MAXREQSIZE,
2585 .bc_rep_max_size = LDLM_MAXREPSIZE,
2586 .bc_req_portal = LDLM_CANCEL_REQUEST_PORTAL,
2587 .bc_rep_portal = LDLM_CANCEL_REPLY_PORTAL,
2591 .tc_thr_name = "ldlm_cn",
2592 .tc_nthrs_min = LDLM_THREADS_AUTO_MIN,
2593 .tc_nthrs_max = LDLM_THREADS_AUTO_MAX,
2594 .tc_nthrs_user = ldlm_num_threads,
2595 .tc_ctx_tags = LCT_MD_THREAD | \
2600 .so_req_handler = ldlm_cancel_handler,
2601 .so_hpreq_handler = ldlm_hpreq_handler,
2604 ldlm_state->ldlm_cancel_service = \
2605 ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
2606 if (IS_ERR(ldlm_state->ldlm_cancel_service)) {
2607 CERROR("failed to start service\n");
2608 rc = PTR_ERR(ldlm_state->ldlm_cancel_service);
2609 ldlm_state->ldlm_cancel_service = NULL;
2614 OBD_ALLOC(blp, sizeof(*blp));
2616 GOTO(out, rc = -ENOMEM);
2617 ldlm_state->ldlm_bl_pool = blp;
2619 cfs_spin_lock_init(&blp->blp_lock);
2620 CFS_INIT_LIST_HEAD(&blp->blp_list);
2621 CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
2622 cfs_waitq_init(&blp->blp_waitq);
2623 cfs_atomic_set(&blp->blp_num_threads, 0);
2624 cfs_atomic_set(&blp->blp_busy_threads, 0);
2627 if (ldlm_num_threads == 0) {
2628 blp->blp_min_threads = LDLM_THREADS_AUTO_MIN;
2629 blp->blp_max_threads = LDLM_THREADS_AUTO_MAX;
2631 blp->blp_min_threads = blp->blp_max_threads = \
2632 min_t(int, LDLM_THREADS_AUTO_MAX,
2633 max_t(int, LDLM_THREADS_AUTO_MIN,
2637 for (i = 0; i < blp->blp_min_threads; i++) {
2638 rc = ldlm_bl_thread_start(blp);
2643 CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
2644 expired_lock_thread.elt_state = ELT_STOPPED;
2645 cfs_waitq_init(&expired_lock_thread.elt_waitq);
2647 CFS_INIT_LIST_HEAD(&waiting_locks_list);
2648 cfs_spin_lock_init(&waiting_locks_spinlock);
2649 cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
2651 rc = cfs_create_thread(expired_lock_main, NULL, CFS_DAEMON_FLAGS);
2653 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
2657 cfs_wait_event(expired_lock_thread.elt_waitq,
2658 expired_lock_thread.elt_state == ELT_READY);
2660 rc = ldlm_pools_init();
2671 static int ldlm_cleanup(void)
2675 if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
2676 !cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
2677 CERROR("ldlm still has namespaces; clean these up first.\n");
2678 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
2679 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
2686 if (ldlm_state->ldlm_bl_pool != NULL) {
2687 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
2689 while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
2690 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
2692 cfs_init_completion(&blp->blp_comp);
2694 cfs_spin_lock(&blp->blp_lock);
2695 cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
2696 cfs_waitq_signal(&blp->blp_waitq);
2697 cfs_spin_unlock(&blp->blp_lock);
2699 cfs_wait_for_completion(&blp->blp_comp);
2702 OBD_FREE(blp, sizeof(*blp));
2704 #endif /* __KERNEL__ */
2706 if (ldlm_state->ldlm_cb_service != NULL)
2707 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
2708 # ifdef HAVE_SERVER_SUPPORT
2709 if (ldlm_state->ldlm_cancel_service != NULL)
2710 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
2713 ldlm_proc_cleanup();
2715 if (expired_lock_thread.elt_state != ELT_STOPPED) {
2716 expired_lock_thread.elt_state = ELT_TERMINATE;
2717 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
2718 cfs_wait_event(expired_lock_thread.elt_waitq,
2719 expired_lock_thread.elt_state == ELT_STOPPED);
2721 #endif /* __KERNEL__ */
2723 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
2731 cfs_mutex_init(&ldlm_ref_mutex);
2732 cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
2733 cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
2734 ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
2735 sizeof(struct ldlm_resource), 0,
2736 CFS_SLAB_HWCACHE_ALIGN);
2737 if (ldlm_resource_slab == NULL)
2740 ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
2741 sizeof(struct ldlm_lock), 0,
2742 CFS_SLAB_HWCACHE_ALIGN | CFS_SLAB_DESTROY_BY_RCU);
2743 if (ldlm_lock_slab == NULL) {
2744 cfs_mem_cache_destroy(ldlm_resource_slab);
2748 ldlm_interval_slab = cfs_mem_cache_create("interval_node",
2749 sizeof(struct ldlm_interval),
2750 0, CFS_SLAB_HWCACHE_ALIGN);
2751 if (ldlm_interval_slab == NULL) {
2752 cfs_mem_cache_destroy(ldlm_resource_slab);
2753 cfs_mem_cache_destroy(ldlm_lock_slab);
2756 #if LUSTRE_TRACKS_LOCK_EXP_REFS
2757 class_export_dump_hook = ldlm_dump_export_locks;
2762 void ldlm_exit(void)
2766 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
2767 rc = cfs_mem_cache_destroy(ldlm_resource_slab);
2768 LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
2770 /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
2771 * synchronize_rcu() to wait a grace period elapsed, so that
2772 * ldlm_lock_free() get a chance to be called. */
2775 rc = cfs_mem_cache_destroy(ldlm_lock_slab);
2776 LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
2777 rc = cfs_mem_cache_destroy(ldlm_interval_slab);
2778 LASSERTF(rc == 0, "couldn't free interval node slab\n");
2782 EXPORT_SYMBOL(ldlm_extent_shift_kms);
2785 #ifdef HAVE_SERVER_SUPPORT
2786 EXPORT_SYMBOL(ldlm_get_processing_policy);
2788 EXPORT_SYMBOL(ldlm_lock2desc);
2789 EXPORT_SYMBOL(ldlm_register_intent);
2790 EXPORT_SYMBOL(ldlm_lockname);
2791 EXPORT_SYMBOL(ldlm_typename);
2792 EXPORT_SYMBOL(ldlm_lock2handle);
2793 EXPORT_SYMBOL(__ldlm_handle2lock);
2794 EXPORT_SYMBOL(ldlm_lock_get);
2795 EXPORT_SYMBOL(ldlm_lock_put);
2796 EXPORT_SYMBOL(ldlm_lock_match);
2797 EXPORT_SYMBOL(ldlm_lock_cancel);
2798 EXPORT_SYMBOL(ldlm_lock_addref);
2799 EXPORT_SYMBOL(ldlm_lock_addref_try);
2800 EXPORT_SYMBOL(ldlm_lock_decref);
2801 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
2802 EXPORT_SYMBOL(ldlm_lock_change_resource);
2803 EXPORT_SYMBOL(ldlm_it2str);
2804 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2805 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
2806 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
2807 EXPORT_SYMBOL(ldlm_lock_allow_match);
2808 EXPORT_SYMBOL(ldlm_lock_downgrade);
2809 EXPORT_SYMBOL(ldlm_lock_convert);
2811 /* ldlm_request.c */
2812 EXPORT_SYMBOL(ldlm_completion_ast_async);
2813 EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
2814 EXPORT_SYMBOL(ldlm_completion_ast);
2815 EXPORT_SYMBOL(ldlm_blocking_ast);
2816 EXPORT_SYMBOL(ldlm_glimpse_ast);
2817 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2818 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2819 EXPORT_SYMBOL(ldlm_prep_elc_req);
2820 EXPORT_SYMBOL(ldlm_cli_convert);
2821 EXPORT_SYMBOL(ldlm_cli_enqueue);
2822 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2823 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2824 EXPORT_SYMBOL(ldlm_cli_cancel);
2825 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2826 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2827 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2828 EXPORT_SYMBOL(ldlm_replay_locks);
2829 EXPORT_SYMBOL(ldlm_resource_foreach);
2830 EXPORT_SYMBOL(ldlm_namespace_foreach);
2831 EXPORT_SYMBOL(ldlm_resource_iterate);
2832 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2833 EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
2834 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2837 #ifdef HAVE_SERVER_SUPPORT
2838 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2839 EXPORT_SYMBOL(ldlm_server_completion_ast);
2840 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2841 EXPORT_SYMBOL(ldlm_handle_enqueue);
2842 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2843 EXPORT_SYMBOL(ldlm_handle_cancel);
2844 EXPORT_SYMBOL(ldlm_request_cancel);
2845 EXPORT_SYMBOL(ldlm_handle_convert);
2846 EXPORT_SYMBOL(ldlm_handle_convert0);
2847 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2849 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2850 EXPORT_SYMBOL(ldlm_get_ref);
2851 EXPORT_SYMBOL(ldlm_put_ref);
2852 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2854 /* ldlm_resource.c */
2855 EXPORT_SYMBOL(ldlm_namespace_new);
2856 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2857 EXPORT_SYMBOL(ldlm_namespace_free);
2858 EXPORT_SYMBOL(ldlm_namespace_dump);
2859 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2860 EXPORT_SYMBOL(ldlm_resource_get);
2861 EXPORT_SYMBOL(ldlm_resource_putref);
2862 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
2865 EXPORT_SYMBOL(client_import_add_conn);
2866 EXPORT_SYMBOL(client_import_del_conn);
2867 EXPORT_SYMBOL(client_obd_setup);
2868 EXPORT_SYMBOL(client_obd_cleanup);
2869 EXPORT_SYMBOL(client_connect_import);
2870 EXPORT_SYMBOL(client_disconnect_export);
2871 EXPORT_SYMBOL(target_send_reply);
2872 EXPORT_SYMBOL(target_pack_pool_reply);
2874 #ifdef HAVE_SERVER_SUPPORT
2875 EXPORT_SYMBOL(server_disconnect_export);
2876 EXPORT_SYMBOL(target_stop_recovery_thread);
2877 EXPORT_SYMBOL(target_handle_connect);
2878 EXPORT_SYMBOL(target_cleanup_recovery);
2879 EXPORT_SYMBOL(target_destroy_export);
2880 EXPORT_SYMBOL(target_cancel_recovery_timer);
2881 EXPORT_SYMBOL(target_queue_recovery_request);
2882 EXPORT_SYMBOL(target_handle_ping);
2883 EXPORT_SYMBOL(target_handle_disconnect);
2887 EXPORT_SYMBOL(lock_res_and_lock);
2888 EXPORT_SYMBOL(unlock_res_and_lock);