1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002-2004 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
28 # define EXPORT_SYMTAB
30 #define DEBUG_SUBSYSTEM S_LDLM
33 # include <libcfs/libcfs.h>
35 # include <liblustre.h>
38 #include <lustre_dlm.h>
39 #include <obd_class.h>
40 #include <libcfs/list.h>
41 #include "ldlm_internal.h"
43 extern cfs_mem_cache_t *ldlm_resource_slab;
44 extern cfs_mem_cache_t *ldlm_lock_slab;
45 extern struct list_head ldlm_namespace_list;
47 extern struct semaphore ldlm_namespace_lock;
48 static struct semaphore ldlm_ref_sem;
49 static int ldlm_refcount;
53 static struct ldlm_state *ldlm_state;
55 inline cfs_time_t round_timeout(cfs_time_t timeout)
57 return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
60 /* timeout for initial callback (AST) reply */
61 static inline unsigned int ldlm_get_rq_timeout(unsigned int ldlm_timeout,
62 unsigned int obd_timeout)
64 unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
66 return timeout < 1 ? 1 : timeout;
70 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
71 static spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
72 static struct list_head waiting_locks_list;
73 static cfs_timer_t waiting_locks_timer;
75 static struct expired_lock_thread {
76 cfs_waitq_t elt_waitq;
79 struct list_head elt_expired_locks;
80 } expired_lock_thread;
85 #define ELT_TERMINATE 2
89 struct list_head blp_list;
90 cfs_waitq_t blp_waitq;
91 atomic_t blp_num_threads;
92 struct completion blp_comp;
95 struct ldlm_bl_work_item {
96 struct list_head blwi_entry;
97 struct ldlm_namespace *blwi_ns;
98 struct ldlm_lock_desc blwi_ld;
99 struct ldlm_lock *blwi_lock;
105 static inline int have_expired_locks(void)
110 spin_lock_bh(&waiting_locks_spinlock);
111 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
112 spin_unlock_bh(&waiting_locks_spinlock);
117 static int expired_lock_main(void *arg)
119 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
120 struct l_wait_info lwi = { 0 };
124 cfs_daemonize("ldlm_elt");
126 expired_lock_thread.elt_state = ELT_READY;
127 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
130 l_wait_event(expired_lock_thread.elt_waitq,
131 have_expired_locks() ||
132 expired_lock_thread.elt_state == ELT_TERMINATE,
135 spin_lock_bh(&waiting_locks_spinlock);
136 if (expired_lock_thread.elt_dump) {
137 spin_unlock_bh(&waiting_locks_spinlock);
139 /* from waiting_locks_callback, but not in timer */
140 libcfs_debug_dumplog();
141 libcfs_run_lbug_upcall(__FILE__,
142 "waiting_locks_callback",
143 expired_lock_thread.elt_dump);
145 spin_lock_bh(&waiting_locks_spinlock);
146 expired_lock_thread.elt_dump = 0;
151 while (!list_empty(expired)) {
152 struct obd_export *export;
153 struct ldlm_lock *lock;
155 lock = list_entry(expired->next, struct ldlm_lock,
157 if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
158 (void *)lock >= LP_POISON) {
159 spin_unlock_bh(&waiting_locks_spinlock);
160 CERROR("free lock on elt list %p\n", lock);
163 list_del_init(&lock->l_pending_chain);
164 if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
165 (void *)lock->l_export >= LP_POISON) {
166 CERROR("lock with free export on elt list %p\n",
168 lock->l_export = NULL;
169 LDLM_ERROR(lock, "free export");
172 export = class_export_get(lock->l_export);
173 spin_unlock_bh(&waiting_locks_spinlock);
176 class_fail_export(export);
177 class_export_put(export);
178 spin_lock_bh(&waiting_locks_spinlock);
180 spin_unlock_bh(&waiting_locks_spinlock);
182 if (do_dump && obd_dump_on_eviction) {
183 CERROR("dump the log upon eviction\n");
184 libcfs_debug_dumplog();
187 if (expired_lock_thread.elt_state == ELT_TERMINATE)
191 expired_lock_thread.elt_state = ELT_STOPPED;
192 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
196 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
198 /* This is called from within a timer interrupt and cannot schedule */
199 static void waiting_locks_callback(unsigned long unused)
201 struct ldlm_lock *lock, *last = NULL;
204 spin_lock_bh(&waiting_locks_spinlock);
205 while (!list_empty(&waiting_locks_list)) {
206 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
209 if (cfs_time_after(lock->l_callback_timeout, cfs_time_current()) ||
210 (lock->l_req_mode == LCK_GROUP))
213 if (ptlrpc_check_suspend()) {
214 /* there is a case when we talk to one mds, holding
215 * lock from another mds. this way we easily can get
216 * here, if second mds is being recovered. so, we
217 * suspend timeouts. bug 6019 */
219 LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
220 lock->l_export->exp_client_uuid.uuid,
221 lock->l_export->exp_connection->c_remote_uuid.uuid,
222 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
224 list_del_init(&lock->l_pending_chain);
225 spin_unlock_bh(&waiting_locks_spinlock);
226 ldlm_add_waiting_lock(lock);
230 /* if timeout overlaps the activation time of suspended timeouts
231 * then extend it to give a chance for client to reconnect */
232 if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
233 cfs_time_seconds(obd_timeout)/2),
234 ptlrpc_suspend_wakeup_time())) {
235 LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
236 lock->l_export->exp_client_uuid.uuid,
237 lock->l_export->exp_connection->c_remote_uuid.uuid,
238 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
240 list_del_init(&lock->l_pending_chain);
241 spin_unlock_bh(&waiting_locks_spinlock);
242 ldlm_add_waiting_lock(lock);
246 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
248 lock->l_export->exp_client_uuid.uuid,
249 lock->l_export->exp_connection->c_remote_uuid.uuid,
250 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
254 list_del(&lock->l_pending_chain);
255 list_add(&lock->l_pending_chain,
256 &expired_lock_thread.elt_expired_locks);
259 if (!list_empty(&expired_lock_thread.elt_expired_locks)) {
260 if (obd_dump_on_timeout)
261 expired_lock_thread.elt_dump = __LINE__;
263 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
267 * Make sure the timer will fire again if we have any locks
270 if (!list_empty(&waiting_locks_list)) {
271 cfs_time_t timeout_rounded;
272 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
274 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
275 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
277 spin_unlock_bh(&waiting_locks_spinlock);
281 * Indicate that we're waiting for a client to call us back cancelling a given
282 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
283 * timer to fire appropriately. (We round up to the next second, to avoid
284 * floods of timer firings during periods of high lock contention and traffic).
286 * Called with the namespace lock held.
288 static int __ldlm_add_waiting_lock(struct ldlm_lock *lock)
290 cfs_time_t timeout_rounded;
292 if (!list_empty(&lock->l_pending_chain))
295 lock->l_callback_timeout =cfs_time_add(cfs_time_current(),
296 cfs_time_seconds(obd_timeout)/2);
298 timeout_rounded = round_timeout(lock->l_callback_timeout);
300 if (cfs_time_before(timeout_rounded, cfs_timer_deadline(&waiting_locks_timer)) ||
301 !cfs_timer_is_armed(&waiting_locks_timer)) {
302 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
305 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
309 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
313 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
315 spin_lock_bh(&waiting_locks_spinlock);
316 if (lock->l_destroyed) {
317 static cfs_time_t next;
318 spin_unlock_bh(&waiting_locks_spinlock);
319 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
320 if (cfs_time_after(cfs_time_current(), next)) {
321 next = cfs_time_shift(14400);
322 libcfs_debug_dumpstack(NULL);
327 ret = __ldlm_add_waiting_lock(lock);
328 spin_unlock_bh(&waiting_locks_spinlock);
330 LDLM_DEBUG(lock, "%sadding to wait list",
331 ret == 0 ? "not re-" : "");
336 * Remove a lock from the pending list, likely because it had its cancellation
337 * callback arrive without incident. This adjusts the lock-timeout timer if
338 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
340 * Called with namespace lock held.
342 int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
344 struct list_head *list_next;
346 if (list_empty(&lock->l_pending_chain))
349 list_next = lock->l_pending_chain.next;
350 if (lock->l_pending_chain.prev == &waiting_locks_list) {
351 /* Removing the head of the list, adjust timer. */
352 if (list_next == &waiting_locks_list) {
353 /* No more, just cancel. */
354 cfs_timer_disarm(&waiting_locks_timer);
356 struct ldlm_lock *next;
357 next = list_entry(list_next, struct ldlm_lock,
359 cfs_timer_arm(&waiting_locks_timer,
360 round_timeout(next->l_callback_timeout));
363 list_del_init(&lock->l_pending_chain);
368 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
372 if (lock->l_export == NULL) {
373 /* We don't have a "waiting locks list" on clients. */
374 LDLM_DEBUG(lock, "client lock: no-op");
378 spin_lock_bh(&waiting_locks_spinlock);
379 ret = __ldlm_del_waiting_lock(lock);
380 spin_unlock_bh(&waiting_locks_spinlock);
382 LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
389 * Called with namespace lock held.
391 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
393 if (lock->l_export == NULL) {
394 /* We don't have a "waiting locks list" on clients. */
395 LDLM_DEBUG(lock, "client lock: no-op");
399 spin_lock_bh(&waiting_locks_spinlock);
401 if (list_empty(&lock->l_pending_chain)) {
402 spin_unlock_bh(&waiting_locks_spinlock);
403 LDLM_DEBUG(lock, "wasn't waiting");
407 __ldlm_del_waiting_lock(lock);
408 __ldlm_add_waiting_lock(lock);
409 spin_unlock_bh(&waiting_locks_spinlock);
411 LDLM_DEBUG(lock, "refreshed");
415 #else /* !__KERNEL__ */
417 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
419 LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
423 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
428 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
432 #endif /* __KERNEL__ */
434 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
435 const char *ast_type)
437 struct ptlrpc_connection *conn = lock->l_export->exp_connection;
438 char *str = libcfs_nid2str(conn->c_peer.nid);
440 LCONSOLE_ERROR_MSG(0x138, "A client on nid %s was evicted from "
441 "service %s.\n", str,
442 lock->l_export->exp_obd->obd_name);
444 LCONSOLE_ERROR_MSG(0x012, "Lock %s callback to %s timed out for "
445 "resource %d\n", ast_type,
446 obd_export_nid2str(lock->l_export), rc);
448 if (obd_dump_on_timeout)
449 libcfs_debug_dumplog();
450 class_fail_export(lock->l_export);
453 static int ldlm_handle_ast_error(struct ldlm_lock *lock,
454 struct ptlrpc_request *req, int rc,
455 const char *ast_type)
457 lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
459 if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
460 LASSERT(lock->l_export);
461 if (lock->l_export->exp_libclient) {
462 LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
463 " timeout, just cancelling lock", ast_type,
464 libcfs_nid2str(peer.nid));
465 ldlm_lock_cancel(lock);
467 } else if (lock->l_flags & LDLM_FL_CANCEL) {
468 LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
469 "cancel was received (AST reply lost?)",
470 ast_type, libcfs_nid2str(peer.nid));
471 ldlm_lock_cancel(lock);
474 ldlm_del_waiting_lock(lock);
475 ldlm_failed_ast(lock, rc, ast_type);
479 LDLM_DEBUG(lock, "client (nid %s) returned %d"
480 " from %s AST - normal race",
481 libcfs_nid2str(peer.nid),
483 lustre_msg_get_status(req->rq_repmsg) : -1,
486 LDLM_ERROR(lock, "client (nid %s) returned %d "
487 "from %s AST", libcfs_nid2str(peer.nid),
488 (req->rq_repmsg != NULL) ?
489 lustre_msg_get_status(req->rq_repmsg) : 0,
491 ldlm_lock_cancel(lock);
492 /* Server-side AST functions are called from ldlm_reprocess_all,
493 * which needs to be told to please restart its reprocessing. */
501 * ->l_blocking_ast() method for server-side locks. This is invoked when newly
502 * enqueued server lock conflicts with given one.
504 * Sends blocking ast rpc to the client owning that lock; arms timeout timer
505 * to wait for client response.
507 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
508 struct ldlm_lock_desc *desc,
509 void *data, int flag)
511 struct ldlm_request *body;
512 struct ptlrpc_request *req;
513 int size[] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
514 [DLM_LOCKREQ_OFF] = sizeof(*body) };
515 int instant_cancel = 0, rc = 0;
518 if (flag == LDLM_CB_CANCELING) {
519 /* Don't need to do anything here. */
524 if (lock->l_export->exp_obd->obd_recovering != 0) {
525 LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
526 ldlm_lock_dump(D_ERROR, lock, 0);
529 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
530 LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK, 2, size,
535 lock_res(lock->l_resource);
536 if (lock->l_granted_mode != lock->l_req_mode) {
537 /* this blocking AST will be communicated as part of the
538 * completion AST instead */
539 unlock_res(lock->l_resource);
540 ptlrpc_req_finished(req);
541 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
545 if (lock->l_destroyed) {
546 /* What's the point? */
547 unlock_res(lock->l_resource);
548 ptlrpc_req_finished(req);
552 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
555 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
556 body->lock_handle[0] = lock->l_remote_handle;
557 body->lock_desc = *desc;
558 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
560 LDLM_DEBUG(lock, "server preparing blocking AST");
562 ptlrpc_req_set_repsize(req, 1, NULL);
563 if (instant_cancel) {
564 unlock_res(lock->l_resource);
565 ldlm_lock_cancel(lock);
567 LASSERT(lock->l_granted_mode == lock->l_req_mode);
568 ldlm_add_waiting_lock(lock);
569 unlock_res(lock->l_resource);
572 req->rq_send_state = LUSTRE_IMP_FULL;
573 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
575 if (lock->l_export && lock->l_export->exp_ldlm_stats)
576 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
577 LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
579 if (unlikely(instant_cancel)) {
580 rc = ptl_send_rpc(req, 1);
582 rc = ptlrpc_queue_wait(req);
583 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2);
586 /* If client canceled the lock but the cancel has not been
587 * recieved yet, we need to update lvbo to have the proper
588 * attributes cached. */
590 ldlm_res_lvbo_update(lock->l_resource, NULL, 0, 1);
591 rc = ldlm_handle_ast_error(lock, req, rc, "blocking");
594 ptlrpc_req_finished(req);
596 /* If we cancelled the lock, we need to restart ldlm_reprocess_queue */
597 if (!rc && instant_cancel)
603 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
605 struct ldlm_request *body;
606 struct ptlrpc_request *req;
607 struct timeval granted_time;
608 long total_enqueue_wait;
609 int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
610 [DLM_LOCKREQ_OFF] = sizeof(*body) };
611 int rc = 0, buffers = 2, instant_cancel = 0;
614 LASSERT(lock != NULL);
616 do_gettimeofday(&granted_time);
617 total_enqueue_wait = cfs_timeval_sub(&granted_time,
618 &lock->l_enqueued_time, NULL);
620 if (total_enqueue_wait / 1000000 > obd_timeout)
621 LDLM_ERROR(lock, "enqueue wait took %luus from %lu",
622 total_enqueue_wait, lock->l_enqueued_time.tv_sec);
624 lock_res_and_lock(lock);
625 if (lock->l_resource->lr_lvb_len) {
626 size[DLM_REQ_REC_OFF] = lock->l_resource->lr_lvb_len;
629 unlock_res_and_lock(lock);
631 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
632 LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK, buffers,
637 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
638 body->lock_handle[0] = lock->l_remote_handle;
639 body->lock_flags = flags;
640 ldlm_lock2desc(lock, &body->lock_desc);
645 lvb = lustre_msg_buf(req->rq_reqmsg, DLM_REQ_REC_OFF,
646 lock->l_resource->lr_lvb_len);
647 lock_res_and_lock(lock);
648 memcpy(lvb, lock->l_resource->lr_lvb_data,
649 lock->l_resource->lr_lvb_len);
650 unlock_res_and_lock(lock);
653 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
656 ptlrpc_req_set_repsize(req, 1, NULL);
658 req->rq_send_state = LUSTRE_IMP_FULL;
659 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
661 /* We only send real blocking ASTs after the lock is granted */
662 lock_res_and_lock(lock);
663 if (lock->l_flags & LDLM_FL_AST_SENT) {
664 body->lock_flags |= LDLM_FL_AST_SENT;
666 /* We might get here prior to ldlm_handle_enqueue setting
667 * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
668 * into waiting list, but this is safe and similar code in
669 * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
670 * that would not only cancel the lock, but will also remove
671 * it from waiting list */
672 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
673 unlock_res_and_lock(lock);
674 ldlm_lock_cancel(lock);
676 lock_res_and_lock(lock);
678 /* start the lock-timeout clock */
679 ldlm_add_waiting_lock(lock);
682 unlock_res_and_lock(lock);
684 if (lock->l_export && lock->l_export->exp_ldlm_stats)
685 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
686 LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
688 rc = ptlrpc_queue_wait(req);
690 rc = ldlm_handle_ast_error(lock, req, rc, "completion");
692 ptlrpc_req_finished(req);
694 /* If we cancelled the lock, we need to restart ldlm_reprocess_queue */
695 if (!rc && instant_cancel)
701 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
703 struct ldlm_resource *res = lock->l_resource;
704 struct ldlm_request *body;
705 struct ptlrpc_request *req;
706 int size[] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
707 [DLM_LOCKREQ_OFF] = sizeof(*body) };
711 LASSERT(lock != NULL);
713 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
714 LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK, 2, size,
719 body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
720 body->lock_handle[0] = lock->l_remote_handle;
721 ldlm_lock2desc(lock, &body->lock_desc);
723 lock_res_and_lock(lock);
724 size[REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
725 unlock_res_and_lock(lock);
726 res = lock->l_resource;
727 ptlrpc_req_set_repsize(req, 2, size);
729 req->rq_send_state = LUSTRE_IMP_FULL;
730 req->rq_timeout = ldlm_get_rq_timeout(ldlm_timeout, obd_timeout);
732 if (lock->l_export && lock->l_export->exp_ldlm_stats)
733 lprocfs_counter_incr(lock->l_export->exp_ldlm_stats,
734 LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
736 rc = ptlrpc_queue_wait(req);
737 if (rc == -ELDLM_NO_LOCK_DATA)
738 LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
740 rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
742 rc = ldlm_res_lvbo_update(res, req->rq_repmsg,
744 ptlrpc_req_finished(req);
748 static struct ldlm_lock *
749 find_existing_lock(struct obd_export *exp,
750 const struct lustre_handle *remote_hdl)
752 struct list_head *iter;
754 spin_lock(&exp->exp_ldlm_data.led_lock);
755 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
756 struct ldlm_lock *lock;
757 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
758 if (lock->l_remote_handle.cookie == remote_hdl->cookie) {
760 spin_unlock(&exp->exp_ldlm_data.led_lock);
764 spin_unlock(&exp->exp_ldlm_data.led_lock);
769 extern unsigned long long lu_time_stamp_get(void);
771 #define lu_time_stamp_get() time(NULL)
775 * Main server-side entry point into LDLM. This is called by ptlrpc service
776 * threads to carry out client lock enqueueing requests.
778 int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
779 struct ptlrpc_request *req,
780 const struct ldlm_request *dlm_req,
781 const struct ldlm_callback_suite *cbs)
783 struct ldlm_reply *dlm_rep;
784 int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
785 [DLM_LOCKREPLY_OFF] = sizeof(*dlm_rep) };
788 ldlm_error_t err = ELDLM_OK;
789 struct ldlm_lock *lock = NULL;
793 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
795 ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
796 flags = dlm_req->lock_flags;
798 LASSERT(req->rq_export);
800 if (req->rq_export->exp_ldlm_stats)
801 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
802 LDLM_ENQUEUE - LDLM_FIRST_OPC);
804 if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
805 dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
806 DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
807 dlm_req->lock_desc.l_resource.lr_type);
808 GOTO(out, rc = -EFAULT);
811 if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
812 dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
813 dlm_req->lock_desc.l_req_mode &
814 (dlm_req->lock_desc.l_req_mode-1))) {
815 DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
816 dlm_req->lock_desc.l_req_mode);
817 GOTO(out, rc = -EFAULT);
820 if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
821 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
823 DEBUG_REQ(D_ERROR, req,
824 "PLAIN lock request from IBITS client?");
825 GOTO(out, rc = -EPROTO);
827 } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
829 DEBUG_REQ(D_ERROR, req,
830 "IBITS lock request from unaware client?");
831 GOTO(out, rc = -EPROTO);
835 /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
836 against server's _CONNECT_SUPPORTED flags? (I don't want to use
837 ibits for mgc/mgs) */
839 /* INODEBITS_INTEROP: Perform conversion from plain lock to
840 * inodebits lock if client does not support them. */
841 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
842 (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
843 dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
844 dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
845 MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
846 if (dlm_req->lock_desc.l_req_mode == LCK_PR)
847 dlm_req->lock_desc.l_req_mode = LCK_CR;
851 if (unlikely(flags & LDLM_FL_REPLAY)) {
852 lock = find_existing_lock(req->rq_export,
853 &dlm_req->lock_handle[0]);
855 DEBUG_REQ(D_HA, req, "found existing lock cookie "LPX64,
856 lock->l_handle.h_cookie);
857 GOTO(existing_lock, rc = 0);
861 /* The lock's callback data might be set in the policy function */
862 lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
863 dlm_req->lock_desc.l_resource.lr_type,
864 dlm_req->lock_desc.l_req_mode,
865 cbs->lcs_blocking, cbs->lcs_completion,
866 cbs->lcs_glimpse, NULL, 0);
869 GOTO(out, rc = -ENOMEM);
871 do_gettimeofday(&lock->l_enqueued_time);
872 lock->l_remote_handle = dlm_req->lock_handle[0];
873 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
875 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
876 /* Don't enqueue a lock onto the export if it has already
877 * been evicted. Cancel it now instead. (bug 3822) */
878 if (req->rq_export->exp_failed) {
879 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
880 GOTO(out, rc = -ENOTCONN);
882 lock->l_export = class_export_get(req->rq_export);
883 spin_lock(&lock->l_export->exp_ldlm_data.led_lock);
884 list_add(&lock->l_export_chain,
885 &lock->l_export->exp_ldlm_data.led_held_locks);
886 spin_unlock(&lock->l_export->exp_ldlm_data.led_lock);
890 if (flags & LDLM_FL_HAS_INTENT) {
891 /* In this case, the reply buffer is allocated deep in
892 * local_lock_enqueue by the policy function. */
897 lock_res_and_lock(lock);
898 if (lock->l_resource->lr_lvb_len) {
899 size[DLM_REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
902 unlock_res_and_lock(lock);
904 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
905 GOTO(out, rc = -ENOMEM);
907 rc = lustre_pack_reply(req, buffers, size, NULL);
912 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
913 lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
914 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
915 lock->l_req_extent = lock->l_policy_data.l_extent;
917 err = ldlm_lock_enqueue(ns, &lock, cookie, &flags);
921 dlm_rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
923 dlm_rep->lock_flags = flags;
925 ldlm_lock2desc(lock, &dlm_rep->lock_desc);
926 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
928 /* We never send a blocking AST until the lock is granted, but
929 * we can tell it right now */
930 lock_res_and_lock(lock);
932 /* Now take into account flags to be inherited from original lock
933 request both in reply to client and in our own lock flags. */
934 dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
935 lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
937 /* Don't move a pending lock onto the export if it has already
938 * been evicted. Cancel it now instead. (bug 5683) */
939 if (unlikely(req->rq_export->exp_failed ||
940 OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
941 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
943 } else if (lock->l_flags & LDLM_FL_AST_SENT) {
944 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
945 if (lock->l_granted_mode == lock->l_req_mode) {
947 * Only cancel lock if it was granted, because it would
948 * be destroyed immediatelly and would never be granted
949 * in the future, causing timeouts on client. Not
950 * granted lock will be cancelled immediatelly after
951 * sending completion AST.
953 if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
954 unlock_res_and_lock(lock);
955 ldlm_lock_cancel(lock);
956 lock_res_and_lock(lock);
958 ldlm_add_waiting_lock(lock);
961 /* Make sure we never ever grant usual metadata locks to liblustre
963 if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
964 dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
965 req->rq_export->exp_libclient) {
966 if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
967 !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
968 CERROR("Granting sync lock to libclient. "
969 "req fl %d, rep fl %d, lock fl %d\n",
970 dlm_req->lock_flags, dlm_rep->lock_flags,
972 LDLM_ERROR(lock, "sync lock");
973 if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
974 struct ldlm_intent *it;
975 it = lustre_msg_buf(req->rq_reqmsg,
979 CERROR("This is intent %s ("LPU64")\n",
980 ldlm_it2str(it->opc), it->opc);
986 unlock_res_and_lock(lock);
990 req->rq_status = err;
991 if (req->rq_reply_state == NULL) {
992 err = lustre_pack_reply(req, 1, NULL, NULL);
998 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
999 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
1001 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
1002 "(err=%d, rc=%d)", err, rc);
1004 lock_res_and_lock(lock);
1006 size[DLM_REPLY_REC_OFF] = lock->l_resource->lr_lvb_len;
1007 if (size[DLM_REPLY_REC_OFF] > 0) {
1008 void *lvb = lustre_msg_buf(req->rq_repmsg,
1010 size[DLM_REPLY_REC_OFF]);
1011 LASSERTF(lvb != NULL, "req %p, lock %p\n",
1014 memcpy(lvb, lock->l_resource->lr_lvb_data,
1015 size[DLM_REPLY_REC_OFF]);
1018 ldlm_resource_unlink_lock(lock);
1019 ldlm_lock_destroy_nolock(lock);
1021 unlock_res_and_lock(lock);
1023 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
1024 ldlm_reprocess_all(lock->l_resource);
1026 LDLM_LOCK_PUT(lock);
1029 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
1035 int ldlm_handle_enqueue(struct ptlrpc_request *req,
1036 ldlm_completion_callback completion_callback,
1037 ldlm_blocking_callback blocking_callback,
1038 ldlm_glimpse_callback glimpse_callback)
1041 struct ldlm_request *dlm_req;
1042 struct ldlm_callback_suite cbs = {
1043 .lcs_completion = completion_callback,
1044 .lcs_blocking = blocking_callback,
1045 .lcs_glimpse = glimpse_callback
1049 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1050 sizeof *dlm_req, lustre_swab_ldlm_request);
1051 if (dlm_req != NULL) {
1052 rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
1053 req, dlm_req, &cbs);
1055 CERROR ("Can't unpack dlm_req\n");
1061 int ldlm_handle_convert0(struct ptlrpc_request *req,
1062 const struct ldlm_request *dlm_req)
1064 struct ldlm_reply *dlm_rep;
1065 struct ldlm_lock *lock;
1067 int size[2] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
1068 [DLM_LOCKREPLY_OFF] = sizeof(*dlm_rep) };
1071 if (req->rq_export && req->rq_export->exp_ldlm_stats)
1072 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
1073 LDLM_CONVERT - LDLM_FIRST_OPC);
1075 rc = lustre_pack_reply(req, 2, size, NULL);
1077 CERROR("out of memory\n");
1080 dlm_rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
1082 dlm_rep->lock_flags = dlm_req->lock_flags;
1084 lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
1086 req->rq_status = EINVAL;
1090 LDLM_DEBUG(lock, "server-side convert handler START");
1092 do_gettimeofday(&lock->l_enqueued_time);
1093 res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
1094 &dlm_rep->lock_flags);
1096 if (ldlm_del_waiting_lock(lock))
1097 LDLM_DEBUG(lock, "converted waiting lock");
1100 req->rq_status = EDEADLOCK;
1105 if (!req->rq_status)
1106 ldlm_reprocess_all(lock->l_resource);
1107 LDLM_DEBUG(lock, "server-side convert handler END");
1108 LDLM_LOCK_PUT(lock);
1110 LDLM_DEBUG_NOLOCK("server-side convert handler END");
1115 int ldlm_handle_convert(struct ptlrpc_request *req)
1118 struct ldlm_request *dlm_req;
1120 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof *dlm_req,
1121 lustre_swab_ldlm_request);
1122 if (dlm_req != NULL) {
1123 rc = ldlm_handle_convert0(req, dlm_req);
1125 CERROR ("Can't unpack dlm_req\n");
1131 /* Cancel all the locks, which handles are packed into ldlm_request */
1132 int ldlm_request_cancel(struct ptlrpc_request *req,
1133 const struct ldlm_request *dlm_req, int first)
1135 struct ldlm_resource *res, *pres = NULL;
1136 struct ldlm_lock *lock;
1137 int i, count, done = 0;
1140 LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
1141 "starting at %d", dlm_req->lock_count, first);
1142 count = dlm_req->lock_count ? dlm_req->lock_count : 1;
1146 /* There is no lock on the server at the replay time,
1147 * skip lock cancelling to make replay tests to pass. */
1148 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1151 for (i = first; i < count; i++) {
1152 lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
1154 LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
1155 "lock (cookie "LPU64")",
1156 dlm_req->lock_handle[i].cookie);
1160 res = lock->l_resource;
1165 ldlm_reprocess_all(pres);
1166 ldlm_resource_putref(pres);
1169 ldlm_resource_getref(res);
1170 ldlm_res_lvbo_update(res, NULL, 0, 1);
1174 ldlm_lock_cancel(lock);
1175 LDLM_LOCK_PUT(lock);
1178 ldlm_reprocess_all(pres);
1179 ldlm_resource_putref(pres);
1181 LDLM_DEBUG_NOLOCK("server-side cancel handler END");
1185 int ldlm_handle_cancel(struct ptlrpc_request *req)
1187 struct ldlm_request *dlm_req;
1191 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof(*dlm_req),
1192 lustre_swab_ldlm_request);
1193 if (dlm_req == NULL) {
1194 CERROR("bad request buffer for cancel\n");
1198 if (req->rq_export && req->rq_export->exp_ldlm_stats)
1199 lprocfs_counter_incr(req->rq_export->exp_ldlm_stats,
1200 LDLM_CANCEL - LDLM_FIRST_OPC);
1202 rc = lustre_pack_reply(req, 1, NULL, NULL);
1204 CERROR("out of memory\n");
1208 if (!ldlm_request_cancel(req, dlm_req, 0))
1209 req->rq_status = ESTALE;
1211 if (ptlrpc_reply(req) != 0)
1217 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
1218 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
1223 LDLM_DEBUG(lock, "client blocking AST callback handler START");
1225 lock_res_and_lock(lock);
1226 lock->l_flags |= LDLM_FL_CBPENDING;
1228 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
1229 lock->l_flags |= LDLM_FL_CANCEL;
1231 do_ast = (!lock->l_readers && !lock->l_writers);
1232 unlock_res_and_lock(lock);
1235 LDLM_DEBUG(lock, "already unused, calling "
1236 "callback (%p)", lock->l_blocking_ast);
1237 if (lock->l_blocking_ast != NULL)
1238 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
1241 LDLM_DEBUG(lock, "Lock still has references, will be"
1242 " cancelled later");
1245 LDLM_DEBUG(lock, "client blocking callback handler END");
1246 LDLM_LOCK_PUT(lock);
1250 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
1251 struct ldlm_namespace *ns,
1252 struct ldlm_request *dlm_req,
1253 struct ldlm_lock *lock)
1255 CFS_LIST_HEAD(ast_list);
1258 LDLM_DEBUG(lock, "client completion callback handler START");
1260 lock_res_and_lock(lock);
1262 /* If we receive the completion AST before the actual enqueue returned,
1263 * then we might need to switch lock modes, resources, or extents. */
1264 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
1265 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
1266 LDLM_DEBUG(lock, "completion AST, new lock mode");
1269 if (lock->l_resource->lr_type != LDLM_PLAIN) {
1270 lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
1271 LDLM_DEBUG(lock, "completion AST, new policy data");
1274 ldlm_resource_unlink_lock(lock);
1275 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
1276 &lock->l_resource->lr_name,
1277 sizeof(lock->l_resource->lr_name)) != 0) {
1278 unlock_res_and_lock(lock);
1279 ldlm_lock_change_resource(ns, lock,
1280 &dlm_req->lock_desc.l_resource.lr_name);
1281 LDLM_DEBUG(lock, "completion AST, new resource");
1282 CERROR("change resource!\n");
1283 lock_res_and_lock(lock);
1286 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
1287 lock->l_flags |= LDLM_FL_CBPENDING;
1288 LDLM_DEBUG(lock, "completion AST includes blocking AST");
1291 if (lock->l_lvb_len) {
1293 lvb = lustre_swab_reqbuf(req, DLM_REQ_REC_OFF, lock->l_lvb_len,
1294 lock->l_lvb_swabber);
1296 LDLM_ERROR(lock, "completion AST did not contain "
1299 memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
1303 ldlm_grant_lock(lock, &ast_list);
1304 unlock_res_and_lock(lock);
1306 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
1308 ldlm_run_cp_ast_work(&ast_list);
1310 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
1312 LDLM_LOCK_PUT(lock);
1316 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
1317 struct ldlm_namespace *ns,
1318 struct ldlm_request *dlm_req,
1319 struct ldlm_lock *lock)
1324 LDLM_DEBUG(lock, "client glimpse AST callback handler");
1326 if (lock->l_glimpse_ast != NULL)
1327 rc = lock->l_glimpse_ast(lock, req);
1329 if (req->rq_repmsg != NULL) {
1332 req->rq_status = rc;
1336 lock_res_and_lock(lock);
1337 if (lock->l_granted_mode == LCK_PW &&
1338 !lock->l_readers && !lock->l_writers &&
1339 cfs_time_after(cfs_time_current(),
1340 cfs_time_add(lock->l_last_used,
1341 cfs_time_seconds(10)))) {
1342 unlock_res_and_lock(lock);
1343 if (ldlm_bl_to_thread(ns, NULL, lock, 0))
1344 ldlm_handle_bl_callback(ns, NULL, lock);
1349 unlock_res_and_lock(lock);
1350 LDLM_LOCK_PUT(lock);
1354 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
1356 req->rq_status = rc;
1357 if (req->rq_reply_state == NULL) {
1358 rc = lustre_pack_reply(req, 1, NULL, NULL);
1362 return ptlrpc_reply(req);
1365 int ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
1366 struct ldlm_lock *lock, int flags)
1369 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1370 struct ldlm_bl_work_item *blwi;
1373 OBD_ALLOC(blwi, sizeof(*blwi));
1379 blwi->blwi_ld = *ld;
1380 blwi->blwi_lock = lock;
1381 blwi->blwi_flags = flags;
1383 spin_lock(&blp->blp_lock);
1384 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
1385 cfs_waitq_signal(&blp->blp_waitq);
1386 spin_unlock(&blp->blp_lock);
1394 static int ldlm_callback_handler(struct ptlrpc_request *req)
1396 struct ldlm_namespace *ns;
1397 struct ldlm_request *dlm_req;
1398 struct ldlm_lock *lock;
1402 /* Requests arrive in sender's byte order. The ptlrpc service
1403 * handler has already checked and, if necessary, byte-swapped the
1404 * incoming request message body, but I am responsible for the
1405 * message buffers. */
1407 if (req->rq_export == NULL) {
1408 struct ldlm_request *dlm_req;
1410 CDEBUG(D_RPCTRACE, "operation %d from %s with bad "
1411 "export cookie "LPX64"; this is "
1412 "normal if this node rebooted with a lock held\n",
1413 lustre_msg_get_opc(req->rq_reqmsg),
1414 libcfs_id2str(req->rq_peer),
1415 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
1417 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1419 lustre_swab_ldlm_request);
1420 if (dlm_req != NULL)
1421 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
1422 dlm_req->lock_handle[0].cookie);
1424 ldlm_callback_reply(req, -ENOTCONN);
1428 LASSERT(req->rq_export != NULL);
1429 LASSERT(req->rq_export->exp_obd != NULL);
1431 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1432 case LDLM_BL_CALLBACK:
1433 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1435 case LDLM_CP_CALLBACK:
1436 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
1438 case LDLM_GL_CALLBACK:
1439 OBD_FAIL_RETURN(OBD_FAIL_LDLM_GL_CALLBACK, 0);
1441 case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
1442 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1443 rc = llog_origin_handle_cancel(req);
1444 ldlm_callback_reply(req, rc);
1446 case OBD_QC_CALLBACK:
1447 OBD_FAIL_RETURN(OBD_FAIL_OBD_QC_CALLBACK_NET, 0);
1448 rc = target_handle_qc_callback(req);
1449 ldlm_callback_reply(req, rc);
1453 /* reply in handler */
1454 rc = target_handle_dqacq_callback(req);
1456 case LLOG_ORIGIN_HANDLE_CREATE:
1457 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1458 rc = llog_origin_handle_create(req);
1459 ldlm_callback_reply(req, rc);
1461 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1462 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1463 rc = llog_origin_handle_next_block(req);
1464 ldlm_callback_reply(req, rc);
1466 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1467 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1468 rc = llog_origin_handle_read_header(req);
1469 ldlm_callback_reply(req, rc);
1471 case LLOG_ORIGIN_HANDLE_CLOSE:
1472 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1473 rc = llog_origin_handle_close(req);
1474 ldlm_callback_reply(req, rc);
1477 CERROR("unknown opcode %u\n",
1478 lustre_msg_get_opc(req->rq_reqmsg));
1479 ldlm_callback_reply(req, -EPROTO);
1483 ns = req->rq_export->exp_obd->obd_namespace;
1484 LASSERT(ns != NULL);
1486 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF, sizeof(*dlm_req),
1487 lustre_swab_ldlm_request);
1488 if (dlm_req == NULL) {
1489 CERROR ("can't unpack dlm_req\n");
1490 ldlm_callback_reply(req, -EPROTO);
1494 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle[0]);
1496 CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
1497 "disappeared\n", dlm_req->lock_handle[0].cookie);
1498 ldlm_callback_reply(req, -EINVAL);
1502 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
1503 lock_res_and_lock(lock);
1504 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
1505 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
1506 /* If somebody cancels locks and cache is already droped,
1507 * we can tell the server we have no lock. Otherwise, we
1508 * should send cancel after dropping the cache. */
1509 if ((lock->l_flags & LDLM_FL_CANCELING) &&
1510 (lock->l_flags & LDLM_FL_BL_DONE)) {
1511 LDLM_DEBUG(lock, "callback on lock "
1512 LPX64" - lock disappeared\n",
1513 dlm_req->lock_handle[0].cookie);
1514 unlock_res_and_lock(lock);
1515 LDLM_LOCK_PUT(lock);
1516 ldlm_callback_reply(req, -EINVAL);
1519 lock->l_flags |= LDLM_FL_BL_AST;
1521 unlock_res_and_lock(lock);
1523 /* We want the ost thread to get this reply so that it can respond
1524 * to ost requests (write cache writeback) that might be triggered
1527 * But we'd also like to be able to indicate in the reply that we're
1528 * cancelling right now, because it's unused, or have an intent result
1529 * in the reply, so we might have to push the responsibility for sending
1530 * the reply down into the AST handlers, alas. */
1532 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1533 case LDLM_BL_CALLBACK:
1534 CDEBUG(D_INODE, "blocking ast\n");
1535 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK))
1536 ldlm_callback_reply(req, 0);
1537 if (ldlm_bl_to_thread(ns, &dlm_req->lock_desc, lock, 0))
1538 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
1540 case LDLM_CP_CALLBACK:
1541 CDEBUG(D_INODE, "completion ast\n");
1542 ldlm_callback_reply(req, 0);
1543 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
1545 case LDLM_GL_CALLBACK:
1546 CDEBUG(D_INODE, "glimpse ast\n");
1547 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
1550 LBUG(); /* checked above */
1556 static int ldlm_cancel_handler(struct ptlrpc_request *req)
1561 /* Requests arrive in sender's byte order. The ptlrpc service
1562 * handler has already checked and, if necessary, byte-swapped the
1563 * incoming request message body, but I am responsible for the
1564 * message buffers. */
1566 if (req->rq_export == NULL) {
1567 struct ldlm_request *dlm_req;
1569 CERROR("operation %d from %s with bad export cookie "LPU64"\n",
1570 lustre_msg_get_opc(req->rq_reqmsg),
1571 libcfs_id2str(req->rq_peer),
1572 lustre_msg_get_handle(req->rq_reqmsg)->cookie);
1574 dlm_req = lustre_swab_reqbuf(req, DLM_LOCKREQ_OFF,
1576 lustre_swab_ldlm_request);
1577 if (dlm_req != NULL)
1578 ldlm_lock_dump_handle(D_ERROR,
1579 &dlm_req->lock_handle[0]);
1580 ldlm_callback_reply(req, -ENOTCONN);
1584 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1586 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1588 CDEBUG(D_INODE, "cancel\n");
1589 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
1590 rc = ldlm_handle_cancel(req);
1594 case OBD_LOG_CANCEL:
1595 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1596 rc = llog_origin_handle_cancel(req);
1597 ldlm_callback_reply(req, rc);
1600 CERROR("invalid opcode %d\n",
1601 lustre_msg_get_opc(req->rq_reqmsg));
1602 ldlm_callback_reply(req, -EINVAL);
1608 void ldlm_revoke_export_locks(struct obd_export *exp)
1610 struct list_head *locklist = &exp->exp_ldlm_data.led_held_locks;
1611 struct list_head rpc_list;
1612 struct ldlm_lock *lock, *next;
1613 struct ldlm_lock_desc desc;
1616 INIT_LIST_HEAD(&rpc_list);
1618 spin_lock(&exp->exp_ldlm_data.led_lock);
1619 list_for_each_entry_safe(lock, next, locklist, l_export_chain) {
1620 lock_res_and_lock(lock);
1622 if (lock->l_req_mode != lock->l_granted_mode) {
1623 unlock_res_and_lock(lock);
1627 LASSERT(lock->l_resource);
1628 if (lock->l_resource->lr_type != LDLM_IBITS &&
1629 lock->l_resource->lr_type != LDLM_PLAIN) {
1630 unlock_res_and_lock(lock);
1634 if (lock->l_flags & LDLM_FL_AST_SENT) {
1635 unlock_res_and_lock(lock);
1639 LASSERT(lock->l_blocking_ast);
1640 LASSERT(!lock->l_blocking_lock);
1642 lock->l_flags |= LDLM_FL_AST_SENT;
1643 list_move(&lock->l_export_chain, &rpc_list);
1645 unlock_res_and_lock(lock);
1647 spin_unlock(&exp->exp_ldlm_data.led_lock);
1649 while (!list_empty(&rpc_list)) {
1650 lock = list_entry(rpc_list.next, struct ldlm_lock,
1652 list_del_init(&lock->l_export_chain);
1654 /* the desc just pretend to exclusive */
1655 ldlm_lock2desc(lock, &desc);
1656 desc.l_req_mode = LCK_EX;
1657 desc.l_granted_mode = 0;
1659 LDLM_LOCK_GET(lock);
1660 lock->l_blocking_ast(lock, &desc, lock->l_ast_data,
1662 LDLM_LOCK_PUT(lock);
1668 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1670 struct ldlm_bl_work_item *blwi = NULL;
1672 spin_lock(&blp->blp_lock);
1673 if (!list_empty(&blp->blp_list)) {
1674 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1676 list_del(&blwi->blwi_entry);
1678 spin_unlock(&blp->blp_lock);
1683 struct ldlm_bl_thread_data {
1685 struct ldlm_bl_pool *bltd_blp;
1688 static int ldlm_bl_thread_main(void *arg)
1690 struct ldlm_bl_thread_data *bltd = arg;
1691 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1695 char name[CFS_CURPROC_COMM_MAX];
1696 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1698 cfs_daemonize(name);
1701 atomic_inc(&blp->blp_num_threads);
1702 complete(&blp->blp_comp);
1705 struct l_wait_info lwi = { 0 };
1706 struct ldlm_bl_work_item *blwi = NULL;
1708 l_wait_event_exclusive(blp->blp_waitq,
1709 (blwi = ldlm_bl_get_work(blp)) != NULL,
1712 if (blwi->blwi_ns == NULL)
1715 if (blwi->blwi_flags == LDLM_FL_CANCELING) {
1716 /* The special case when we cancel locks in lru
1717 * asynchronously, then we first remove the lock from
1718 * l_bl_ast explicitely in ldlm_cancel_lru before
1719 * sending it to this thread. Thus lock is marked
1720 * LDLM_FL_CANCELING, and already cancelled locally. */
1721 CFS_LIST_HEAD(head);
1722 LASSERT(list_empty(&blwi->blwi_lock->l_bl_ast));
1723 list_add(&blwi->blwi_lock->l_bl_ast, &head);
1724 ldlm_cli_cancel_req(blwi->blwi_lock->l_conn_export,
1726 LDLM_LOCK_PUT(blwi->blwi_lock);
1728 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1731 OBD_FREE(blwi, sizeof(*blwi));
1734 atomic_dec(&blp->blp_num_threads);
1735 complete(&blp->blp_comp);
1741 static int ldlm_setup(void);
1742 static int ldlm_cleanup(int force);
1744 int ldlm_get_ref(void)
1748 mutex_down(&ldlm_ref_sem);
1749 if (++ldlm_refcount == 1) {
1754 mutex_up(&ldlm_ref_sem);
1759 void ldlm_put_ref(int force)
1762 mutex_down(&ldlm_ref_sem);
1763 if (ldlm_refcount == 1) {
1764 int rc = ldlm_cleanup(force);
1766 CERROR("ldlm_cleanup failed: %d\n", rc);
1772 mutex_up(&ldlm_ref_sem);
1777 static int ldlm_setup(void)
1779 struct ldlm_bl_pool *blp;
1786 if (ldlm_state != NULL)
1789 OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
1790 if (ldlm_state == NULL)
1794 rc = ldlm_proc_setup();
1799 ldlm_state->ldlm_cb_service =
1800 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1801 LDLM_MAXREPSIZE, LDLM_CB_REQUEST_PORTAL,
1802 LDLM_CB_REPLY_PORTAL, ldlm_timeout * 900,
1803 ldlm_callback_handler, "ldlm_cbd",
1804 ldlm_svc_proc_dir, NULL,
1805 LDLM_THREADS_AUTO_MIN, LDLM_THREADS_AUTO_MAX,
1807 LCT_MD_THREAD|LCT_DT_THREAD);
1809 if (!ldlm_state->ldlm_cb_service) {
1810 CERROR("failed to start service\n");
1811 GOTO(out_proc, rc = -ENOMEM);
1814 ldlm_state->ldlm_cancel_service =
1815 ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
1816 LDLM_MAXREPSIZE, LDLM_CANCEL_REQUEST_PORTAL,
1817 LDLM_CANCEL_REPLY_PORTAL, ldlm_timeout * 6000,
1818 ldlm_cancel_handler, "ldlm_canceld",
1819 ldlm_svc_proc_dir, NULL,
1820 LDLM_THREADS_AUTO_MIN, LDLM_THREADS_AUTO_MAX,
1822 LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD);
1824 if (!ldlm_state->ldlm_cancel_service) {
1825 CERROR("failed to start service\n");
1826 GOTO(out_proc, rc = -ENOMEM);
1829 OBD_ALLOC(blp, sizeof(*blp));
1831 GOTO(out_proc, rc = -ENOMEM);
1832 ldlm_state->ldlm_bl_pool = blp;
1834 atomic_set(&blp->blp_num_threads, 0);
1835 cfs_waitq_init(&blp->blp_waitq);
1836 spin_lock_init(&blp->blp_lock);
1838 CFS_INIT_LIST_HEAD(&blp->blp_list);
1841 for (i = 0; i < LDLM_BL_THREADS; i++) {
1842 struct ldlm_bl_thread_data bltd = {
1846 init_completion(&blp->blp_comp);
1847 rc = cfs_kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1849 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1850 GOTO(out_thread, rc);
1852 wait_for_completion(&blp->blp_comp);
1855 rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cancel_service);
1857 GOTO(out_thread, rc);
1859 rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cb_service);
1861 GOTO(out_thread, rc);
1863 CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1864 expired_lock_thread.elt_state = ELT_STOPPED;
1865 cfs_waitq_init(&expired_lock_thread.elt_waitq);
1867 CFS_INIT_LIST_HEAD(&waiting_locks_list);
1868 spin_lock_init(&waiting_locks_spinlock);
1869 cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
1871 rc = cfs_kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FILES);
1873 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1874 GOTO(out_thread, rc);
1877 wait_event(expired_lock_thread.elt_waitq,
1878 expired_lock_thread.elt_state == ELT_READY);
1885 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1886 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1891 ldlm_proc_cleanup();
1894 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1899 static int ldlm_cleanup(int force)
1902 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1906 if (!list_empty(&ldlm_namespace_list)) {
1907 CERROR("ldlm still has namespaces; clean these up first.\n");
1908 ldlm_dump_all_namespaces(D_DLMTRACE);
1913 while (atomic_read(&blp->blp_num_threads) > 0) {
1914 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1916 init_completion(&blp->blp_comp);
1918 spin_lock(&blp->blp_lock);
1919 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1920 cfs_waitq_signal(&blp->blp_waitq);
1921 spin_unlock(&blp->blp_lock);
1923 wait_for_completion(&blp->blp_comp);
1925 OBD_FREE(blp, sizeof(*blp));
1927 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1928 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1929 ldlm_proc_cleanup();
1931 expired_lock_thread.elt_state = ELT_TERMINATE;
1932 cfs_waitq_signal(&expired_lock_thread.elt_waitq);
1933 wait_event(expired_lock_thread.elt_waitq,
1934 expired_lock_thread.elt_state == ELT_STOPPED);
1936 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1937 ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
1940 OBD_FREE(ldlm_state, sizeof(*ldlm_state));
1946 int __init ldlm_init(void)
1948 init_mutex(&ldlm_ref_sem);
1949 init_mutex(&ldlm_namespace_lock);
1950 ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
1951 sizeof(struct ldlm_resource), 0,
1952 SLAB_HWCACHE_ALIGN);
1953 if (ldlm_resource_slab == NULL)
1956 ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
1957 sizeof(struct ldlm_lock), 0,
1958 SLAB_HWCACHE_ALIGN);
1959 if (ldlm_lock_slab == NULL) {
1960 cfs_mem_cache_destroy(ldlm_resource_slab);
1967 void __exit ldlm_exit(void)
1971 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1972 rc = cfs_mem_cache_destroy(ldlm_resource_slab);
1973 LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
1974 rc = cfs_mem_cache_destroy(ldlm_lock_slab);
1975 LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
1979 EXPORT_SYMBOL(ldlm_extent_shift_kms);
1982 EXPORT_SYMBOL(ldlm_get_processing_policy);
1983 EXPORT_SYMBOL(ldlm_lock2desc);
1984 EXPORT_SYMBOL(ldlm_register_intent);
1985 EXPORT_SYMBOL(ldlm_lockname);
1986 EXPORT_SYMBOL(ldlm_typename);
1987 EXPORT_SYMBOL(ldlm_lock2handle);
1988 EXPORT_SYMBOL(__ldlm_handle2lock);
1989 EXPORT_SYMBOL(ldlm_lock_get);
1990 EXPORT_SYMBOL(ldlm_lock_put);
1991 EXPORT_SYMBOL(ldlm_lock_match);
1992 EXPORT_SYMBOL(ldlm_lock_cancel);
1993 EXPORT_SYMBOL(ldlm_lock_addref);
1994 EXPORT_SYMBOL(ldlm_lock_decref);
1995 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1996 EXPORT_SYMBOL(ldlm_lock_change_resource);
1997 EXPORT_SYMBOL(ldlm_lock_set_data);
1998 EXPORT_SYMBOL(ldlm_it2str);
1999 EXPORT_SYMBOL(ldlm_lock_dump);
2000 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2001 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
2002 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
2003 EXPORT_SYMBOL(ldlm_lock_allow_match);
2005 /* ldlm_request.c */
2006 EXPORT_SYMBOL(ldlm_completion_ast);
2007 EXPORT_SYMBOL(ldlm_blocking_ast);
2008 EXPORT_SYMBOL(ldlm_glimpse_ast);
2009 EXPORT_SYMBOL(ldlm_expired_completion_wait);
2010 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
2011 EXPORT_SYMBOL(ldlm_cli_convert);
2012 EXPORT_SYMBOL(ldlm_cli_enqueue);
2013 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
2014 EXPORT_SYMBOL(ldlm_cli_enqueue_local);
2015 EXPORT_SYMBOL(ldlm_cli_cancel);
2016 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
2017 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
2018 EXPORT_SYMBOL(ldlm_cli_cancel_req);
2019 EXPORT_SYMBOL(ldlm_cli_join_lru);
2020 EXPORT_SYMBOL(ldlm_replay_locks);
2021 EXPORT_SYMBOL(ldlm_resource_foreach);
2022 EXPORT_SYMBOL(ldlm_namespace_foreach);
2023 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
2024 EXPORT_SYMBOL(ldlm_resource_iterate);
2025 EXPORT_SYMBOL(ldlm_cancel_resource_local);
2026 EXPORT_SYMBOL(ldlm_cli_cancel_list);
2029 EXPORT_SYMBOL(ldlm_server_blocking_ast);
2030 EXPORT_SYMBOL(ldlm_server_completion_ast);
2031 EXPORT_SYMBOL(ldlm_server_glimpse_ast);
2032 EXPORT_SYMBOL(ldlm_handle_enqueue);
2033 EXPORT_SYMBOL(ldlm_handle_enqueue0);
2034 EXPORT_SYMBOL(ldlm_handle_cancel);
2035 EXPORT_SYMBOL(ldlm_request_cancel);
2036 EXPORT_SYMBOL(ldlm_handle_convert);
2037 EXPORT_SYMBOL(ldlm_handle_convert0);
2038 EXPORT_SYMBOL(ldlm_del_waiting_lock);
2039 EXPORT_SYMBOL(ldlm_get_ref);
2040 EXPORT_SYMBOL(ldlm_put_ref);
2041 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
2042 EXPORT_SYMBOL(ldlm_revoke_export_locks);
2044 /* ldlm_resource.c */
2045 EXPORT_SYMBOL(ldlm_namespace_new);
2046 EXPORT_SYMBOL(ldlm_namespace_cleanup);
2047 EXPORT_SYMBOL(ldlm_namespace_free);
2048 EXPORT_SYMBOL(ldlm_namespace_dump);
2049 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
2050 EXPORT_SYMBOL(ldlm_resource_get);
2051 EXPORT_SYMBOL(ldlm_resource_putref);
2052 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
2055 EXPORT_SYMBOL(client_import_add_conn);
2056 EXPORT_SYMBOL(client_import_del_conn);
2057 EXPORT_SYMBOL(client_obd_setup);
2058 EXPORT_SYMBOL(client_obd_cleanup);
2059 EXPORT_SYMBOL(client_connect_import);
2060 EXPORT_SYMBOL(client_disconnect_export);
2061 EXPORT_SYMBOL(target_start_recovery_thread);
2062 EXPORT_SYMBOL(target_stop_recovery_thread);
2063 EXPORT_SYMBOL(target_handle_connect);
2064 EXPORT_SYMBOL(target_cleanup_recovery);
2065 EXPORT_SYMBOL(target_destroy_export);
2066 EXPORT_SYMBOL(target_cancel_recovery_timer);
2067 EXPORT_SYMBOL(target_send_reply);
2068 EXPORT_SYMBOL(target_queue_recovery_request);
2069 EXPORT_SYMBOL(target_handle_ping);
2070 EXPORT_SYMBOL(target_handle_disconnect);
2073 EXPORT_SYMBOL(lock_res_and_lock);
2074 EXPORT_SYMBOL(unlock_res_and_lock);