1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 # define EXPORT_SYMTAB
27 #define DEBUG_SUBSYSTEM S_LDLM
30 # include <linux/module.h>
31 # include <linux/slab.h>
32 # include <linux/init.h>
33 # include <linux/wait.h>
35 # include <liblustre.h>
38 #include <linux/lustre_dlm.h>
39 #include <linux/obd_class.h>
40 #include "ldlm_internal.h"
42 extern kmem_cache_t *ldlm_resource_slab;
43 extern kmem_cache_t *ldlm_lock_slab;
44 extern struct lustre_lock ldlm_handle_lock;
45 extern struct list_head ldlm_namespace_list;
46 extern int (*mds_reint_p)(int offset, struct ptlrpc_request *req);
47 extern int (*mds_getattr_name_p)(int offset, struct ptlrpc_request *req);
49 static DECLARE_MUTEX(ldlm_ref_sem);
50 static int ldlm_refcount = 0;
54 static struct ldlm_state *ldlm ;
56 inline unsigned long round_timeout(unsigned long timeout)
58 return ((timeout / HZ) + 1) * HZ;
62 /* XXX should this be per-ldlm? */
63 static struct list_head waiting_locks_list;
64 static spinlock_t waiting_locks_spinlock;
65 static struct timer_list waiting_locks_timer;
67 static struct expired_lock_thread {
68 wait_queue_head_t elt_waitq;
70 struct list_head elt_expired_locks;
72 } expired_lock_thread;
77 #define ELT_TERMINATE 2
81 struct list_head blp_list;
82 wait_queue_head_t blp_waitq;
83 atomic_t blp_num_threads;
84 struct completion blp_comp;
87 struct ldlm_bl_work_item {
88 struct list_head blwi_entry;
89 struct ldlm_namespace *blwi_ns;
90 struct ldlm_lock_desc blwi_ld;
91 struct ldlm_lock *blwi_lock;
96 static inline int have_expired_locks(void)
100 spin_lock_bh(&expired_lock_thread.elt_lock);
101 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
102 spin_unlock_bh(&expired_lock_thread.elt_lock);
107 static int expired_lock_main(void *arg)
109 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
110 struct l_wait_info lwi = { 0 };
115 kportal_daemonize("ldlm_elt");
117 SIGNAL_MASK_LOCK(current, flags);
118 sigfillset(¤t->blocked);
120 SIGNAL_MASK_UNLOCK(current, flags);
124 expired_lock_thread.elt_state = ELT_READY;
125 wake_up(&expired_lock_thread.elt_waitq);
128 struct list_head *tmp, *n, work_list;
129 l_wait_event(expired_lock_thread.elt_waitq,
130 have_expired_locks() ||
131 expired_lock_thread.elt_state == ELT_TERMINATE,
134 spin_lock_bh(&expired_lock_thread.elt_lock);
135 while (!list_empty(expired)) {
136 struct ldlm_lock *lock;
138 list_add(&work_list, expired);
139 list_del_init(expired);
141 list_for_each_entry(lock, &work_list, l_pending_chain) {
142 LDLM_DEBUG(lock, "moving to work list");
145 spin_unlock_bh(&expired_lock_thread.elt_lock);
148 list_for_each_safe(tmp, n, &work_list) {
149 lock = list_entry(tmp, struct ldlm_lock,
151 ptlrpc_fail_export(lock->l_export);
155 if (!list_empty(&work_list)) {
156 list_for_each_entry(lock, &work_list, l_pending_chain) {
157 LDLM_ERROR(lock, "still on work list!");
160 LASSERTF (list_empty(&work_list),
161 "some exports not failed properly\n");
163 spin_lock_bh(&expired_lock_thread.elt_lock);
165 spin_unlock_bh(&expired_lock_thread.elt_lock);
167 if (expired_lock_thread.elt_state == ELT_TERMINATE)
171 expired_lock_thread.elt_state = ELT_STOPPED;
172 wake_up(&expired_lock_thread.elt_waitq);
176 static void waiting_locks_callback(unsigned long unused)
178 struct ldlm_lock *lock;
179 char str[PTL_NALFMT_SIZE];
181 spin_lock_bh(&waiting_locks_spinlock);
182 while (!list_empty(&waiting_locks_list)) {
183 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
186 if (lock->l_callback_timeout > jiffies)
189 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
190 "%s@%s nid "LPX64" (%s) ",
191 lock->l_export->exp_client_uuid.uuid,
192 lock->l_export->exp_connection->c_remote_uuid.uuid,
193 lock->l_export->exp_connection->c_peer.peer_nid,
194 portals_nid2str(lock->l_export->exp_connection->c_peer.peer_ni->pni_number,
195 lock->l_export->exp_connection->c_peer.peer_nid,
198 spin_lock_bh(&expired_lock_thread.elt_lock);
199 list_del(&lock->l_pending_chain);
200 list_add(&lock->l_pending_chain,
201 &expired_lock_thread.elt_expired_locks);
202 spin_unlock_bh(&expired_lock_thread.elt_lock);
203 wake_up(&expired_lock_thread.elt_waitq);
207 * Make sure the timer will fire again if we have any locks
210 if (!list_empty(&waiting_locks_list)) {
211 unsigned long timeout_rounded;
212 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
214 timeout_rounded = round_timeout(lock->l_callback_timeout);
215 mod_timer(&waiting_locks_timer, timeout_rounded);
217 spin_unlock_bh(&waiting_locks_spinlock);
221 * Indicate that we're waiting for a client to call us back cancelling a given
222 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
223 * timer to fire appropriately. (We round up to the next second, to avoid
224 * floods of timer firings during periods of high lock contention and traffic).
226 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
228 unsigned long timeout_rounded;
230 spin_lock_bh(&waiting_locks_spinlock);
231 if (!list_empty(&lock->l_pending_chain)) {
232 LDLM_DEBUG(lock, "not re-adding to wait list");
233 spin_unlock_bh(&waiting_locks_spinlock);
236 LDLM_DEBUG(lock, "adding to wait list");
238 lock->l_callback_timeout = jiffies + (obd_timeout * HZ / 2);
240 timeout_rounded = round_timeout(lock->l_callback_timeout);
242 if (timeout_rounded < waiting_locks_timer.expires ||
243 !timer_pending(&waiting_locks_timer)) {
244 mod_timer(&waiting_locks_timer, timeout_rounded);
246 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
247 spin_unlock_bh(&waiting_locks_spinlock);
252 * Remove a lock from the pending list, likely because it had its cancellation
253 * callback arrive without incident. This adjusts the lock-timeout timer if
254 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
256 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
258 struct list_head *list_next;
260 if (lock->l_export == NULL) {
261 /* We don't have a "waiting locks list" on clients. */
262 LDLM_DEBUG(lock, "client lock: no-op");
266 spin_lock_bh(&waiting_locks_spinlock);
268 if (list_empty(&lock->l_pending_chain)) {
269 spin_unlock_bh(&waiting_locks_spinlock);
270 LDLM_DEBUG(lock, "wasn't waiting");
274 list_next = lock->l_pending_chain.next;
275 if (lock->l_pending_chain.prev == &waiting_locks_list) {
276 /* Removing the head of the list, adjust timer. */
277 if (list_next == &waiting_locks_list) {
278 /* No more, just cancel. */
279 del_timer(&waiting_locks_timer);
281 struct ldlm_lock *next;
282 next = list_entry(list_next, struct ldlm_lock,
284 mod_timer(&waiting_locks_timer,
285 round_timeout(next->l_callback_timeout));
289 spin_lock_bh(&expired_lock_thread.elt_lock);
290 list_del_init(&lock->l_pending_chain);
291 spin_unlock_bh(&expired_lock_thread.elt_lock);
293 spin_unlock_bh(&waiting_locks_spinlock);
294 LDLM_DEBUG(lock, "removed");
298 #else /* !__KERNEL__ */
300 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
305 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
310 #endif /* __KERNEL__ */
312 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc, char *ast_type)
314 const struct ptlrpc_connection *conn = lock->l_export->exp_connection;
315 char str[PTL_NALFMT_SIZE];
317 CERROR("%s AST failed (%d) for res "LPU64"/"LPU64
318 ", mode %s: evicting client %s@%s NID "LPX64" (%s)\n",
320 lock->l_resource->lr_name.name[0],
321 lock->l_resource->lr_name.name[1],
322 ldlm_lockname[lock->l_granted_mode],
323 lock->l_export->exp_client_uuid.uuid,
324 conn->c_remote_uuid.uuid, conn->c_peer.peer_nid,
325 portals_nid2str(conn->c_peer.peer_ni->pni_number,
326 conn->c_peer.peer_nid, str));
327 ptlrpc_fail_export(lock->l_export);
330 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
331 struct ldlm_lock_desc *desc,
332 void *data, int flag)
334 struct ldlm_request *body;
335 struct ptlrpc_request *req;
336 int rc = 0, size = sizeof(*body);
339 if (flag == LDLM_CB_CANCELING) {
340 /* Don't need to do anything here. */
346 l_lock(&lock->l_resource->lr_namespace->ns_lock);
347 if (lock->l_granted_mode != lock->l_req_mode) {
348 /* this blocking AST will be communicated as part of the
349 * completion AST instead */
350 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
351 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST"); RETURN(0);
354 if (lock->l_destroyed) {
355 /* What's the point? */
356 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
361 if (LTIME_S(CURRENT_TIME) - lock->l_export->exp_last_request_time > 30){
362 ldlm_failed_ast(lock, -ETIMEDOUT, "Not-attempted blocking");
363 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
368 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
369 LDLM_BL_CALLBACK, 1, &size, NULL);
371 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
375 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
376 memcpy(&body->lock_handle1, &lock->l_remote_handle,
377 sizeof(body->lock_handle1));
378 memcpy(&body->lock_desc, desc, sizeof(*desc));
379 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
381 LDLM_DEBUG(lock, "server preparing blocking AST");
382 req->rq_replen = lustre_msg_size(0, NULL);
384 if (lock->l_granted_mode == lock->l_req_mode)
385 ldlm_add_waiting_lock(lock);
386 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
388 req->rq_send_state = LUSTRE_IMP_FULL;
389 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
390 rc = ptlrpc_queue_wait(req);
391 if (rc == -ETIMEDOUT || rc == -EINTR) {
393 ldlm_del_waiting_lock(lock);
394 ldlm_failed_ast(lock, rc, "blocking");
397 * Here we treat all clients as liblustre. When BLOCKING AST
398 * timeout we don't evicting the client and only cancel
400 * restore to orignial implementation later!!!
403 CERROR("BLOCKING AST to client (nid "LPU64") timeout, "
404 "simply cancel lock 0x%p\n",
405 req->rq_peer.peer_nid, lock);
406 ldlm_lock_cancel(lock);
411 CDEBUG(D_DLMTRACE, "client (nid "LPU64") returned %d "
412 "from blocking AST for lock %p--normal race\n",
413 req->rq_peer.peer_nid,
414 req->rq_repmsg->status, lock);
415 else if (rc == -ENOTCONN)
416 CDEBUG(D_DLMTRACE, "client (nid "LPU64") returned %d "
417 "from blocking AST for lock %p--this client was "
418 "probably rebooted while it held a lock, nothing"
419 " serious\n",req->rq_peer.peer_nid,
420 req->rq_repmsg->status, lock);
422 CDEBUG(D_ERROR, "client (nid "LPU64") returned %d "
423 "from blocking AST for lock %p\n",
424 req->rq_peer.peer_nid,
425 (req->rq_repmsg != NULL)?
426 req->rq_repmsg->status : 0,
428 LDLM_DEBUG(lock, "client sent rc %d rq_status %d from blocking "
429 "AST", rc, req->rq_status);
430 ldlm_lock_cancel(lock);
431 /* Server-side AST functions are called from ldlm_reprocess_all,
432 * which needs to be told to please restart its reprocessing. */
436 ptlrpc_req_finished(req);
441 /* XXX copied from ptlrpc/service.c */
442 static long timeval_sub(struct timeval *large, struct timeval *small)
444 return (large->tv_sec - small->tv_sec) * 1000000 +
445 (large->tv_usec - small->tv_usec);
448 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
450 struct ldlm_request *body;
451 struct ptlrpc_request *req;
452 struct timeval granted_time;
453 long total_enqueue_wait;
454 int rc = 0, size = sizeof(*body);
462 do_gettimeofday(&granted_time);
463 total_enqueue_wait = timeval_sub(&granted_time, &lock->l_enqueued_time);
465 if (total_enqueue_wait / 1000000 > obd_timeout)
466 LDLM_ERROR(lock, "enqueue wait took %ldus", total_enqueue_wait);
468 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
469 LDLM_CP_CALLBACK, 1, &size, NULL);
473 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
474 memcpy(&body->lock_handle1, &lock->l_remote_handle,
475 sizeof(body->lock_handle1));
476 body->lock_flags = flags;
477 ldlm_lock2desc(lock, &body->lock_desc);
479 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
481 req->rq_replen = lustre_msg_size(0, NULL);
483 req->rq_send_state = LUSTRE_IMP_FULL;
484 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
486 /* We only send real blocking ASTs after the lock is granted */
487 l_lock(&lock->l_resource->lr_namespace->ns_lock);
488 if (lock->l_flags & LDLM_FL_AST_SENT) {
489 body->lock_flags |= LDLM_FL_AST_SENT;
490 ldlm_add_waiting_lock(lock); /* start the lock-timeout clock */
492 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
494 rc = ptlrpc_queue_wait(req);
495 if (rc == -ETIMEDOUT || rc == -EINTR) {
496 ldlm_del_waiting_lock(lock);
497 ldlm_failed_ast(lock, rc, "completion");
499 LDLM_ERROR(lock, "client sent rc %d rq_status %d from "
500 "completion AST\n", rc, req->rq_status);
501 ldlm_lock_cancel(lock);
502 /* Server-side AST functions are called from ldlm_reprocess_all,
503 * which needs to be told to please restart its reprocessing. */
506 ptlrpc_req_finished(req);
511 int ldlm_handle_enqueue(struct ptlrpc_request *req,
512 ldlm_completion_callback completion_callback,
513 ldlm_blocking_callback blocking_callback)
515 struct obd_device *obddev = req->rq_export->exp_obd;
516 struct ldlm_reply *dlm_rep;
517 struct ldlm_request *dlm_req;
518 int rc, size = sizeof(*dlm_rep), cookielen = 0;
521 struct ldlm_lock *lock = NULL;
525 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
527 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
528 lustre_swab_ldlm_request);
529 if (dlm_req == NULL) {
530 CERROR ("Can't unpack dlm_req\n");
534 flags = dlm_req->lock_flags;
535 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN &&
536 (flags & LDLM_FL_HAS_INTENT)) {
537 /* In this case, the reply buffer is allocated deep in
538 * local_lock_enqueue by the policy function. */
540 cookielen = sizeof(*req);
542 rc = lustre_pack_reply(req, 1, &size, NULL);
544 CERROR("out of memory\n");
547 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN) {
548 cookie = &dlm_req->lock_desc.l_policy_data;
549 cookielen = sizeof(ldlm_policy_data_t);
553 /* The lock's callback data might be set in the policy function */
554 lock = ldlm_lock_create(obddev->obd_namespace,
555 &dlm_req->lock_handle2,
556 dlm_req->lock_desc.l_resource.lr_name,
557 dlm_req->lock_desc.l_resource.lr_type,
558 dlm_req->lock_desc.l_req_mode,
559 blocking_callback, completion_callback, NULL);
561 GOTO(out, err = -ENOMEM);
563 do_gettimeofday(&lock->l_enqueued_time);
564 memcpy(&lock->l_remote_handle, &dlm_req->lock_handle1,
565 sizeof(lock->l_remote_handle));
566 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
568 LASSERT(req->rq_export);
569 lock->l_export = class_export_get(req->rq_export);
570 l_lock(&lock->l_resource->lr_namespace->ns_lock);
571 list_add(&lock->l_export_chain,
572 &lock->l_export->exp_ldlm_data.led_held_locks);
573 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
575 err = ldlm_lock_enqueue(obddev->obd_namespace, &lock, cookie, cookielen,
580 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
581 dlm_rep->lock_flags = flags;
583 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
584 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN) {
585 memcpy(&dlm_rep->lock_policy_data, &lock->l_policy_data,
588 if (dlm_rep->lock_flags & LDLM_FL_LOCK_CHANGED) {
589 memcpy(&dlm_rep->lock_resource_name, &lock->l_resource->lr_name,
590 sizeof(dlm_rep->lock_resource_name));
591 dlm_rep->lock_mode = lock->l_req_mode;
594 /* We never send a blocking AST until the lock is granted, but
595 * we can tell it right now */
596 l_lock(&lock->l_resource->lr_namespace->ns_lock);
597 if (lock->l_flags & LDLM_FL_AST_SENT) {
598 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
599 if (lock->l_granted_mode == lock->l_req_mode)
600 ldlm_add_waiting_lock(lock);
602 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
606 req->rq_status = err;
608 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
609 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
611 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
613 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
614 ldlm_reprocess_all(lock->l_resource);
617 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p)", lock);
622 int ldlm_handle_convert(struct ptlrpc_request *req)
624 struct ldlm_request *dlm_req;
625 struct ldlm_reply *dlm_rep;
626 struct ldlm_lock *lock;
627 int rc, size = sizeof(*dlm_rep);
630 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
631 lustre_swab_ldlm_request);
632 if (dlm_req == NULL) {
633 CERROR ("Can't unpack dlm_req\n");
637 rc = lustre_pack_reply(req, 1, &size, NULL);
639 CERROR("out of memory\n");
642 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
643 dlm_rep->lock_flags = dlm_req->lock_flags;
645 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
647 req->rq_status = EINVAL;
649 LDLM_DEBUG(lock, "server-side convert handler START");
650 ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
651 &dlm_rep->lock_flags);
652 if (ldlm_del_waiting_lock(lock))
653 CDEBUG(D_DLMTRACE, "converted waiting lock %p\n", lock);
658 ldlm_reprocess_all(lock->l_resource);
659 LDLM_DEBUG(lock, "server-side convert handler END");
662 LDLM_DEBUG_NOLOCK("server-side convert handler END");
667 int ldlm_handle_cancel(struct ptlrpc_request *req)
669 struct ldlm_request *dlm_req;
670 struct ldlm_lock *lock;
671 char str[PTL_NALFMT_SIZE];
675 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
676 lustre_swab_ldlm_request);
677 if (dlm_req == NULL) {
678 CERROR("bad request buffer for cancel\n");
682 rc = lustre_pack_reply(req, 0, NULL, NULL);
684 CERROR("out of memory\n");
688 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
690 CERROR("received cancel for unknown lock cookie "LPX64
691 " from nid "LPX64" (%s)\n", dlm_req->lock_handle1.cookie,
692 req->rq_peer.peer_nid,
693 portals_nid2str(req->rq_peer.peer_ni->pni_number,
694 req->rq_peer.peer_nid, str));
695 LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock "
697 dlm_req->lock_handle1.cookie);
698 req->rq_status = ESTALE;
700 LDLM_DEBUG(lock, "server-side cancel handler START");
701 ldlm_lock_cancel(lock);
702 if (ldlm_del_waiting_lock(lock))
703 CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
707 if (ptlrpc_reply(req) != 0)
711 ldlm_reprocess_all(lock->l_resource);
712 LDLM_DEBUG(lock, "server-side cancel handler END");
719 static void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
720 struct ldlm_lock_desc *ld,
721 struct ldlm_lock *lock)
726 l_lock(&ns->ns_lock);
727 LDLM_DEBUG(lock, "client blocking AST callback handler START");
729 lock->l_flags |= LDLM_FL_CBPENDING;
730 do_ast = (!lock->l_readers && !lock->l_writers);
733 LDLM_DEBUG(lock, "already unused, calling "
734 "callback (%p)", lock->l_blocking_ast);
735 if (lock->l_blocking_ast != NULL) {
736 l_unlock(&ns->ns_lock);
737 l_check_no_ns_lock(ns);
738 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
740 l_lock(&ns->ns_lock);
743 LDLM_DEBUG(lock, "Lock still has references, will be"
747 LDLM_DEBUG(lock, "client blocking callback handler END");
748 l_unlock(&ns->ns_lock);
753 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
754 struct ldlm_namespace *ns,
755 struct ldlm_request *dlm_req,
756 struct ldlm_lock *lock)
761 l_lock(&ns->ns_lock);
762 LDLM_DEBUG(lock, "client completion callback handler START");
764 /* If we receive the completion AST before the actual enqueue returned,
765 * then we might need to switch lock modes, resources, or extents. */
766 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
767 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
768 LDLM_DEBUG(lock, "completion AST, new lock mode");
770 if (lock->l_resource->lr_type != LDLM_PLAIN)
771 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
772 sizeof(lock->l_policy_data));
774 ldlm_resource_unlink_lock(lock);
775 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
776 &lock->l_resource->lr_name,
777 sizeof(lock->l_resource->lr_name)) != 0) {
778 ldlm_lock_change_resource(ns, lock,
779 dlm_req->lock_desc.l_resource.lr_name);
780 LDLM_DEBUG(lock, "completion AST, new resource");
783 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
784 lock->l_flags |= LDLM_FL_CBPENDING;
785 LDLM_DEBUG(lock, "completion AST includes blocking AST");
788 lock->l_resource->lr_tmp = &ast_list;
789 ldlm_grant_lock(lock, req, sizeof(*req), 1);
790 lock->l_resource->lr_tmp = NULL;
791 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
792 l_unlock(&ns->ns_lock);
795 ldlm_run_ast_work(ns, &ast_list);
797 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
802 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
805 rc = lustre_pack_reply(req, 0, NULL, NULL);
808 return ptlrpc_reply(req);
812 static int ldlm_bl_to_thread(struct ldlm_state *ldlm, struct ldlm_namespace *ns,
813 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
815 struct ldlm_bl_pool *blp = ldlm->ldlm_bl_pool;
816 struct ldlm_bl_work_item *blwi;
819 OBD_ALLOC(blwi, sizeof(*blwi));
825 blwi->blwi_lock = lock;
827 spin_lock(&blp->blp_lock);
828 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
829 wake_up(&blp->blp_waitq);
830 spin_unlock(&blp->blp_lock);
836 static int ldlm_callback_handler(struct ptlrpc_request *req)
838 struct ldlm_namespace *ns;
839 struct ldlm_request *dlm_req;
840 struct ldlm_lock *lock;
841 char str[PTL_NALFMT_SIZE];
845 /* Requests arrive in sender's byte order. The ptlrpc service
846 * handler has already checked and, if necessary, byte-swapped the
847 * incoming request message body, but I am responsible for the
848 * message buffers. */
850 if (req->rq_export == NULL) {
851 struct ldlm_request *dlm_req;
853 CDEBUG(D_RPCTRACE, "operation %d from nid "LPX64" (%s) with bad "
854 "export cookie "LPX64" (ptl req %d/rep %d); this is "
855 "normal if this node rebooted with a lock held\n",
856 req->rq_reqmsg->opc, req->rq_peer.peer_nid,
857 portals_nid2str(req->rq_peer.peer_ni->pni_number,
858 req->rq_peer.peer_nid, str),
859 req->rq_reqmsg->handle.cookie,
860 req->rq_request_portal, req->rq_reply_portal);
862 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
863 lustre_swab_ldlm_request);
865 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
866 dlm_req->lock_handle1.cookie);
868 ldlm_callback_reply(req, -ENOTCONN);
872 if (req->rq_reqmsg->opc == LDLM_BL_CALLBACK) {
873 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
874 } else if (req->rq_reqmsg->opc == LDLM_CP_CALLBACK) {
875 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
876 } else if (req->rq_reqmsg->opc == OBD_LOG_CANCEL) {
877 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
878 } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CREATE) {
879 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
880 } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_NEXT_BLOCK) {
881 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
882 } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_READ_HEADER) {
883 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
884 } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CLOSE) {
885 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
887 ldlm_callback_reply(req, -EPROTO);
891 LASSERT(req->rq_export != NULL);
892 LASSERT(req->rq_export->exp_obd != NULL);
894 /* FIXME - how to send reply */
895 if (req->rq_reqmsg->opc == OBD_LOG_CANCEL) {
896 int rc = llog_origin_handle_cancel(req);
897 ldlm_callback_reply(req, rc);
900 if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CREATE) {
901 int rc = llog_origin_handle_create(req);
906 if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_NEXT_BLOCK) {
907 int rc = llog_origin_handle_next_block(req);
912 if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_READ_HEADER) {
913 int rc = llog_origin_handle_read_header(req);
918 if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CLOSE) {
919 int rc = llog_origin_handle_close(req);
920 ldlm_callback_reply(req, rc);
924 ns = req->rq_export->exp_obd->obd_namespace;
927 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
928 lustre_swab_ldlm_request);
929 if (dlm_req == NULL) {
930 CERROR ("can't unpack dlm_req\n");
931 ldlm_callback_reply (req, -EPROTO);
935 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle1);
937 CDEBUG(D_INODE, "callback on lock "LPX64" - lock disappeared\n",
938 dlm_req->lock_handle1.cookie);
939 ldlm_callback_reply(req, -EINVAL);
943 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
944 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
946 /* We want the ost thread to get this reply so that it can respond
947 * to ost requests (write cache writeback) that might be triggered
950 * But we'd also like to be able to indicate in the reply that we're
951 * cancelling right now, because it's unused, or have an intent result
952 * in the reply, so we might have to push the responsibility for sending
953 * the reply down into the AST handlers, alas. */
954 if (req->rq_reqmsg->opc != LDLM_BL_CALLBACK)
955 ldlm_callback_reply(req, 0);
957 switch (req->rq_reqmsg->opc) {
958 case LDLM_BL_CALLBACK:
959 CDEBUG(D_INODE, "blocking ast\n");
961 rc = ldlm_bl_to_thread(ldlm, ns, &dlm_req->lock_desc, lock);
962 ldlm_callback_reply(req, rc);
965 ldlm_callback_reply(req, rc);
966 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
969 case LDLM_CP_CALLBACK:
970 CDEBUG(D_INODE, "completion ast\n");
971 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
974 LBUG(); /* checked above */
980 static int ldlm_cancel_handler(struct ptlrpc_request *req)
985 /* Requests arrive in sender's byte order. The ptlrpc service
986 * handler has already checked and, if necessary, byte-swapped the
987 * incoming request message body, but I am responsible for the
988 * message buffers. */
990 if (req->rq_export == NULL) {
991 struct ldlm_request *dlm_req;
992 CERROR("operation %d with bad export (ptl req %d/rep %d)\n",
993 req->rq_reqmsg->opc, req->rq_request_portal,
994 req->rq_reply_portal);
995 CERROR("--> export cookie: "LPX64"\n",
996 req->rq_reqmsg->handle.cookie);
997 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
998 lustre_swab_ldlm_request);
1000 ldlm_lock_dump_handle(D_ERROR, &dlm_req->lock_handle1);
1004 switch (req->rq_reqmsg->opc) {
1006 /* XXX FIXME move this back to mds/handler.c, bug 249 */
1008 CDEBUG(D_INODE, "cancel\n");
1009 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
1010 rc = ldlm_handle_cancel(req);
1016 CERROR("invalid opcode %d\n", req->rq_reqmsg->opc);
1024 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1026 struct ldlm_bl_work_item *blwi = NULL;
1028 spin_lock(&blp->blp_lock);
1029 if (!list_empty(&blp->blp_list)) {
1030 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1032 list_del(&blwi->blwi_entry);
1034 spin_unlock(&blp->blp_lock);
1039 struct ldlm_bl_thread_data {
1041 struct ldlm_bl_pool *bltd_blp;
1044 static int ldlm_bl_thread_main(void *arg)
1046 struct ldlm_bl_thread_data *bltd = arg;
1047 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1048 unsigned long flags;
1051 /* XXX boiler-plate */
1053 char name[sizeof(current->comm)];
1054 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1056 kportal_daemonize(name);
1058 SIGNAL_MASK_LOCK(current, flags);
1059 sigfillset(¤t->blocked);
1061 SIGNAL_MASK_UNLOCK(current, flags);
1063 atomic_inc(&blp->blp_num_threads);
1064 complete(&blp->blp_comp);
1067 struct l_wait_info lwi = { 0 };
1068 struct ldlm_bl_work_item *blwi = NULL;
1070 l_wait_event_exclusive(blp->blp_waitq,
1071 (blwi = ldlm_bl_get_work(blp)) != NULL,
1074 if (blwi->blwi_ns == NULL)
1077 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1079 OBD_FREE(blwi, sizeof(*blwi));
1082 atomic_dec(&blp->blp_num_threads);
1083 complete(&blp->blp_comp);
1089 static int ldlm_setup(void);
1090 static int ldlm_cleanup(int force);
1092 int ldlm_get_ref(void)
1095 down(&ldlm_ref_sem);
1096 if (++ldlm_refcount == 1) {
1106 void ldlm_put_ref(int force)
1108 down(&ldlm_ref_sem);
1109 if (ldlm_refcount == 1) {
1110 int rc = ldlm_cleanup(force);
1112 CERROR("ldlm_cleanup failed: %d\n", rc);
1123 static int ldlm_setup(void)
1125 struct ldlm_bl_pool *blp;
1135 OBD_ALLOC(ldlm, sizeof(*ldlm));
1140 rc = ldlm_proc_setup();
1145 ldlm->ldlm_cb_service =
1146 ptlrpc_init_svc(LDLM_NEVENTS, LDLM_NBUFS, LDLM_BUFSIZE,
1147 LDLM_MAXREQSIZE, LDLM_CB_REQUEST_PORTAL,
1148 LDLM_CB_REPLY_PORTAL,
1149 ldlm_callback_handler, "ldlm_cbd",
1152 if (!ldlm->ldlm_cb_service) {
1153 CERROR("failed to start service\n");
1154 GOTO(out_proc, rc = -ENOMEM);
1157 ldlm->ldlm_cancel_service =
1158 ptlrpc_init_svc(LDLM_NEVENTS, LDLM_NBUFS, LDLM_BUFSIZE,
1159 LDLM_MAXREQSIZE, LDLM_CANCEL_REQUEST_PORTAL,
1160 LDLM_CANCEL_REPLY_PORTAL,
1161 ldlm_cancel_handler, "ldlm_canceld",
1164 if (!ldlm->ldlm_cancel_service) {
1165 CERROR("failed to start service\n");
1166 GOTO(out_proc, rc = -ENOMEM);
1169 OBD_ALLOC(blp, sizeof(*blp));
1171 GOTO(out_proc, rc = -ENOMEM);
1172 ldlm->ldlm_bl_pool = blp;
1174 atomic_set(&blp->blp_num_threads, 0);
1175 init_waitqueue_head(&blp->blp_waitq);
1176 spin_lock_init(&blp->blp_lock);
1178 INIT_LIST_HEAD(&blp->blp_list);
1181 for (i = 0; i < LDLM_NUM_THREADS; i++) {
1182 struct ldlm_bl_thread_data bltd = {
1186 init_completion(&blp->blp_comp);
1187 rc = kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1189 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1191 GOTO(out_thread, rc);
1193 wait_for_completion(&blp->blp_comp);
1196 rc = ptlrpc_start_n_threads(NULL, ldlm->ldlm_cancel_service,
1197 LDLM_NUM_THREADS, "ldlm_cn");
1200 GOTO(out_thread, rc);
1203 rc = ptlrpc_start_n_threads(NULL, ldlm->ldlm_cb_service,
1204 LDLM_NUM_THREADS, "ldlm_cb");
1207 GOTO(out_thread, rc);
1210 INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1211 spin_lock_init(&expired_lock_thread.elt_lock);
1212 expired_lock_thread.elt_state = ELT_STOPPED;
1213 init_waitqueue_head(&expired_lock_thread.elt_waitq);
1215 rc = kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FS);
1217 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1218 GOTO(out_thread, rc);
1221 wait_event(expired_lock_thread.elt_waitq,
1222 expired_lock_thread.elt_state == ELT_READY);
1224 INIT_LIST_HEAD(&waiting_locks_list);
1225 spin_lock_init(&waiting_locks_spinlock);
1226 waiting_locks_timer.function = waiting_locks_callback;
1227 waiting_locks_timer.data = 0;
1228 init_timer(&waiting_locks_timer);
1235 ptlrpc_unregister_service(ldlm->ldlm_cancel_service);
1236 ptlrpc_unregister_service(ldlm->ldlm_cb_service);
1241 ldlm_proc_cleanup();
1244 OBD_FREE(ldlm, sizeof(*ldlm));
1249 static int ldlm_cleanup(int force)
1252 struct ldlm_bl_pool *blp = ldlm->ldlm_bl_pool;
1256 if (!list_empty(&ldlm_namespace_list)) {
1257 CERROR("ldlm still has namespaces; clean these up first.\n");
1258 ldlm_dump_all_namespaces();
1263 while (atomic_read(&blp->blp_num_threads) > 0) {
1264 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1266 init_completion(&blp->blp_comp);
1268 spin_lock(&blp->blp_lock);
1269 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1270 wake_up(&blp->blp_waitq);
1271 spin_unlock(&blp->blp_lock);
1273 wait_for_completion(&blp->blp_comp);
1275 OBD_FREE(blp, sizeof(*blp));
1277 ptlrpc_stop_all_threads(ldlm->ldlm_cb_service);
1278 ptlrpc_unregister_service(ldlm->ldlm_cb_service);
1279 ptlrpc_stop_all_threads(ldlm->ldlm_cancel_service);
1280 ptlrpc_unregister_service(ldlm->ldlm_cancel_service);
1281 ldlm_proc_cleanup();
1283 expired_lock_thread.elt_state = ELT_TERMINATE;
1284 wake_up(&expired_lock_thread.elt_waitq);
1285 wait_event(expired_lock_thread.elt_waitq,
1286 expired_lock_thread.elt_state == ELT_STOPPED);
1290 OBD_FREE(ldlm, sizeof(*ldlm));
1296 int __init ldlm_init(void)
1298 ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1299 sizeof(struct ldlm_resource), 0,
1300 SLAB_HWCACHE_ALIGN, NULL, NULL);
1301 if (ldlm_resource_slab == NULL)
1304 ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1305 sizeof(struct ldlm_lock), 0,
1306 SLAB_HWCACHE_ALIGN, NULL, NULL);
1307 if (ldlm_lock_slab == NULL) {
1308 kmem_cache_destroy(ldlm_resource_slab);
1312 l_lock_init(&ldlm_handle_lock);
1317 void __exit ldlm_exit(void)
1319 if ( ldlm_refcount )
1320 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1321 if (kmem_cache_destroy(ldlm_resource_slab) != 0)
1322 CERROR("couldn't free ldlm resource slab\n");
1323 if (kmem_cache_destroy(ldlm_lock_slab) != 0)
1324 CERROR("couldn't free ldlm lock slab\n");
1328 EXPORT_SYMBOL(ldlm_flock_completion_ast);
1331 EXPORT_SYMBOL(ldlm_lock2desc);
1332 EXPORT_SYMBOL(ldlm_register_intent);
1333 EXPORT_SYMBOL(ldlm_unregister_intent);
1334 EXPORT_SYMBOL(ldlm_lockname);
1335 EXPORT_SYMBOL(ldlm_typename);
1336 EXPORT_SYMBOL(ldlm_lock2handle);
1337 EXPORT_SYMBOL(__ldlm_handle2lock);
1338 EXPORT_SYMBOL(ldlm_lock_put);
1339 EXPORT_SYMBOL(ldlm_lock_match);
1340 EXPORT_SYMBOL(ldlm_lock_cancel);
1341 EXPORT_SYMBOL(ldlm_lock_addref);
1342 EXPORT_SYMBOL(ldlm_lock_decref);
1343 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1344 EXPORT_SYMBOL(ldlm_lock_change_resource);
1345 EXPORT_SYMBOL(ldlm_lock_set_data);
1346 EXPORT_SYMBOL(ldlm_it2str);
1347 EXPORT_SYMBOL(ldlm_lock_dump);
1348 EXPORT_SYMBOL(ldlm_lock_dump_handle);
1349 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
1350 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1352 /* ldlm_request.c */
1353 EXPORT_SYMBOL(ldlm_completion_ast);
1354 EXPORT_SYMBOL(ldlm_expired_completion_wait);
1355 EXPORT_SYMBOL(ldlm_cli_convert);
1356 EXPORT_SYMBOL(ldlm_cli_enqueue);
1357 EXPORT_SYMBOL(ldlm_cli_cancel);
1358 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1359 EXPORT_SYMBOL(ldlm_replay_locks);
1360 EXPORT_SYMBOL(ldlm_resource_foreach);
1361 EXPORT_SYMBOL(ldlm_namespace_foreach);
1362 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
1363 EXPORT_SYMBOL(ldlm_change_cbdata);
1366 EXPORT_SYMBOL(ldlm_server_blocking_ast);
1367 EXPORT_SYMBOL(ldlm_server_completion_ast);
1368 EXPORT_SYMBOL(ldlm_handle_enqueue);
1369 EXPORT_SYMBOL(ldlm_handle_cancel);
1370 EXPORT_SYMBOL(ldlm_handle_convert);
1371 EXPORT_SYMBOL(ldlm_del_waiting_lock);
1372 EXPORT_SYMBOL(ldlm_get_ref);
1373 EXPORT_SYMBOL(ldlm_put_ref);
1377 EXPORT_SYMBOL(ldlm_test);
1378 EXPORT_SYMBOL(ldlm_regression_start);
1379 EXPORT_SYMBOL(ldlm_regression_stop);
1382 /* ldlm_resource.c */
1383 EXPORT_SYMBOL(ldlm_namespace_new);
1384 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1385 EXPORT_SYMBOL(ldlm_namespace_free);
1388 EXPORT_SYMBOL(l_lock);
1389 EXPORT_SYMBOL(l_unlock);
1392 EXPORT_SYMBOL(client_obd_setup);
1393 EXPORT_SYMBOL(client_obd_cleanup);
1394 EXPORT_SYMBOL(client_connect_import);
1395 EXPORT_SYMBOL(client_disconnect_export);
1396 EXPORT_SYMBOL(target_abort_recovery);
1397 EXPORT_SYMBOL(target_handle_connect);
1398 EXPORT_SYMBOL(target_destroy_export);
1399 EXPORT_SYMBOL(target_cancel_recovery_timer);
1400 EXPORT_SYMBOL(target_send_reply);
1401 EXPORT_SYMBOL(target_queue_recovery_request);
1402 EXPORT_SYMBOL(target_handle_ping);
1403 EXPORT_SYMBOL(target_handle_disconnect);
1404 EXPORT_SYMBOL(target_queue_final_reply);
1405 EXPORT_SYMBOL(ldlm_put_lock_into_req);