1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 # define EXPORT_SYMTAB
27 #define DEBUG_SUBSYSTEM S_LDLM
30 # include <linux/module.h>
31 # include <linux/slab.h>
32 # include <linux/init.h>
33 # include <linux/wait.h>
35 # include <liblustre.h>
38 #include <linux/lustre_dlm.h>
39 #include <linux/obd_class.h>
40 #include "ldlm_internal.h"
42 extern kmem_cache_t *ldlm_resource_slab;
43 extern kmem_cache_t *ldlm_lock_slab;
44 extern struct lustre_lock ldlm_handle_lock;
45 extern struct list_head ldlm_namespace_list;
46 extern int (*mds_reint_p)(int offset, struct ptlrpc_request *req);
47 extern int (*mds_getattr_name_p)(int offset, struct ptlrpc_request *req);
49 static DECLARE_MUTEX(ldlm_ref_sem);
50 static int ldlm_refcount = 0;
54 static struct ldlm_state *ldlm ;
56 inline unsigned long round_timeout(unsigned long timeout)
58 return ((timeout / HZ) + 1) * HZ;
62 /* XXX should this be per-ldlm? */
63 static struct list_head waiting_locks_list;
64 static spinlock_t waiting_locks_spinlock;
65 static struct timer_list waiting_locks_timer;
67 static struct expired_lock_thread {
68 wait_queue_head_t elt_waitq;
70 struct list_head elt_expired_locks;
72 } expired_lock_thread;
77 #define ELT_TERMINATE 2
81 struct list_head blp_list;
82 wait_queue_head_t blp_waitq;
83 atomic_t blp_num_threads;
84 struct completion blp_comp;
87 struct ldlm_bl_work_item {
88 struct list_head blwi_entry;
89 struct ldlm_namespace *blwi_ns;
90 struct ldlm_lock_desc blwi_ld;
91 struct ldlm_lock *blwi_lock;
96 static inline int have_expired_locks(void)
100 spin_lock_bh(&expired_lock_thread.elt_lock);
101 need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
102 spin_unlock_bh(&expired_lock_thread.elt_lock);
107 static int expired_lock_main(void *arg)
109 struct list_head *expired = &expired_lock_thread.elt_expired_locks;
110 struct l_wait_info lwi = { 0 };
115 kportal_daemonize("ldlm_elt");
117 SIGNAL_MASK_LOCK(current, flags);
118 sigfillset(¤t->blocked);
120 SIGNAL_MASK_UNLOCK(current, flags);
124 expired_lock_thread.elt_state = ELT_READY;
125 wake_up(&expired_lock_thread.elt_waitq);
128 l_wait_event(expired_lock_thread.elt_waitq,
129 have_expired_locks() ||
130 expired_lock_thread.elt_state == ELT_TERMINATE,
133 spin_lock_bh(&expired_lock_thread.elt_lock);
134 while (!list_empty(expired)) {
135 struct ldlm_lock *lock = list_entry(expired->next,
138 spin_unlock_bh(&expired_lock_thread.elt_lock);
140 ptlrpc_fail_export(lock->l_export);
142 spin_lock_bh(&expired_lock_thread.elt_lock);
144 spin_unlock_bh(&expired_lock_thread.elt_lock);
146 if (expired_lock_thread.elt_state == ELT_TERMINATE)
150 expired_lock_thread.elt_state = ELT_STOPPED;
151 wake_up(&expired_lock_thread.elt_waitq);
155 static void waiting_locks_callback(unsigned long unused)
157 struct ldlm_lock *lock;
158 char str[PTL_NALFMT_SIZE];
160 spin_lock_bh(&waiting_locks_spinlock);
161 while (!list_empty(&waiting_locks_list)) {
162 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
165 if (lock->l_callback_timeout > jiffies)
168 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
169 "%s@%s nid "LPX64" (%s) ",
170 lock->l_export->exp_client_uuid.uuid,
171 lock->l_export->exp_connection->c_remote_uuid.uuid,
172 lock->l_export->exp_connection->c_peer.peer_nid,
173 portals_nid2str(lock->l_export->exp_connection->c_peer.peer_ni->pni_number,
174 lock->l_export->exp_connection->c_peer.peer_nid,
177 spin_lock_bh(&expired_lock_thread.elt_lock);
178 list_del(&lock->l_pending_chain);
179 list_add(&lock->l_pending_chain,
180 &expired_lock_thread.elt_expired_locks);
181 spin_unlock_bh(&expired_lock_thread.elt_lock);
182 wake_up(&expired_lock_thread.elt_waitq);
185 spin_unlock_bh(&waiting_locks_spinlock);
189 * Indicate that we're waiting for a client to call us back cancelling a given
190 * lock. We add it to the pending-callback chain, and schedule the lock-timeout
191 * timer to fire appropriately. (We round up to the next second, to avoid
192 * floods of timer firings during periods of high lock contention and traffic).
194 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
196 unsigned long timeout_rounded;
198 spin_lock_bh(&waiting_locks_spinlock);
199 if (!list_empty(&lock->l_pending_chain)) {
200 LDLM_DEBUG(lock, "not re-adding to wait list");
201 spin_unlock_bh(&waiting_locks_spinlock);
204 LDLM_DEBUG(lock, "adding to wait list");
206 lock->l_callback_timeout = jiffies + (obd_timeout * HZ / 2);
208 timeout_rounded = round_timeout(lock->l_callback_timeout);
210 if (timeout_rounded < waiting_locks_timer.expires ||
211 !timer_pending(&waiting_locks_timer)) {
212 mod_timer(&waiting_locks_timer, timeout_rounded);
214 list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
215 spin_unlock_bh(&waiting_locks_spinlock);
220 * Remove a lock from the pending list, likely because it had its cancellation
221 * callback arrive without incident. This adjusts the lock-timeout timer if
222 * needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
224 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
226 struct list_head *list_next;
228 if (lock->l_export == NULL) {
229 /* We don't have a "waiting locks list" on clients. */
230 LDLM_DEBUG(lock, "client lock: no-op");
234 spin_lock_bh(&waiting_locks_spinlock);
236 if (list_empty(&lock->l_pending_chain)) {
237 spin_unlock_bh(&waiting_locks_spinlock);
238 LDLM_DEBUG(lock, "wasn't waiting");
242 list_next = lock->l_pending_chain.next;
243 if (lock->l_pending_chain.prev == &waiting_locks_list) {
244 /* Removing the head of the list, adjust timer. */
245 if (list_next == &waiting_locks_list) {
246 /* No more, just cancel. */
247 del_timer(&waiting_locks_timer);
249 struct ldlm_lock *next;
250 next = list_entry(list_next, struct ldlm_lock,
252 mod_timer(&waiting_locks_timer,
253 round_timeout(next->l_callback_timeout));
256 list_del_init(&lock->l_pending_chain);
257 spin_unlock_bh(&waiting_locks_spinlock);
258 LDLM_DEBUG(lock, "removed");
262 #else /* !__KERNEL__ */
264 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
269 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
274 #endif /* __KERNEL__ */
276 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc, char *ast_type)
278 const struct ptlrpc_connection *conn = lock->l_export->exp_connection;
279 char str[PTL_NALFMT_SIZE];
281 CERROR("%s AST failed (%d) for res "LPU64"/"LPU64
282 ", mode %s: evicting client %s@%s NID "LPX64" (%s)\n",
284 lock->l_resource->lr_name.name[0],
285 lock->l_resource->lr_name.name[1],
286 ldlm_lockname[lock->l_granted_mode],
287 lock->l_export->exp_client_uuid.uuid,
288 conn->c_remote_uuid.uuid, conn->c_peer.peer_nid,
289 portals_nid2str(conn->c_peer.peer_ni->pni_number,
290 conn->c_peer.peer_nid, str));
291 ptlrpc_fail_export(lock->l_export);
294 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
295 struct ldlm_lock_desc *desc,
296 void *data, int flag)
298 struct ldlm_request *body;
299 struct ptlrpc_request *req;
300 int rc = 0, size = sizeof(*body);
303 if (flag == LDLM_CB_CANCELING) {
304 /* Don't need to do anything here. */
310 l_lock(&lock->l_resource->lr_namespace->ns_lock);
311 if (lock->l_granted_mode != lock->l_req_mode) {
312 /* this blocking AST will be communicated as part of the
313 * completion AST instead */
314 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
315 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST"); RETURN(0);
318 if (lock->l_destroyed) {
319 /* What's the point? */
320 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
325 if (LTIME_S(CURRENT_TIME) - lock->l_export->exp_last_request_time > 30){
326 ldlm_failed_ast(lock, -ETIMEDOUT, "Not-attempted blocking");
331 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
332 LDLM_BL_CALLBACK, 1, &size, NULL);
336 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
337 memcpy(&body->lock_handle1, &lock->l_remote_handle,
338 sizeof(body->lock_handle1));
339 memcpy(&body->lock_desc, desc, sizeof(*desc));
340 body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
342 LDLM_DEBUG(lock, "server preparing blocking AST");
343 req->rq_replen = lustre_msg_size(0, NULL);
345 if (lock->l_granted_mode == lock->l_req_mode)
346 ldlm_add_waiting_lock(lock);
347 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
349 req->rq_send_state = LUSTRE_IMP_FULL;
350 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
351 rc = ptlrpc_queue_wait(req);
352 if (rc == -ETIMEDOUT || rc == -EINTR) {
354 ldlm_del_waiting_lock(lock);
355 ldlm_failed_ast(lock, rc, "blocking");
358 * Here we treat all clients as liblustre. When BLOCKING AST
359 * timeout we don't evicting the client and only cancel
361 * restore to orignial implementation later!!!
364 CERROR("BLOCKING AST to client (nid "LPU64") timeout, "
365 "simply cancel lock 0x%p\n",
366 req->rq_connection->c_peer.peer_nid, lock);
367 ldlm_lock_cancel(lock);
372 CDEBUG(D_DLMTRACE, "client (nid "LPU64") returned %d "
373 "from blocking AST for lock %p--normal race\n",
374 req->rq_connection->c_peer.peer_nid,
375 req->rq_repmsg->status, lock);
376 else if (rc == -ENOTCONN)
377 CDEBUG(D_DLMTRACE, "client (nid "LPU64") returned %d "
378 "from blocking AST for lock %p--this client was "
379 "probably rebooted while it held a lock, nothing"
380 " serious\n",req->rq_connection->c_peer.peer_nid,
381 req->rq_repmsg->status, lock);
383 CDEBUG(D_ERROR, "client (nid "LPU64") returned %d "
384 "from blocking AST for lock %p\n",
385 req->rq_connection->c_peer.peer_nid,
386 (req->rq_repmsg != NULL)?
387 req->rq_repmsg->status : 0,
389 LDLM_DEBUG(lock, "client sent rc %d rq_status %d from blocking "
390 "AST", rc, req->rq_status);
391 ldlm_lock_cancel(lock);
392 /* Server-side AST functions are called from ldlm_reprocess_all,
393 * which needs to be told to please restart its reprocessing. */
397 ptlrpc_req_finished(req);
402 /* XXX copied from ptlrpc/service.c */
403 static long timeval_sub(struct timeval *large, struct timeval *small)
405 return (large->tv_sec - small->tv_sec) * 1000000 +
406 (large->tv_usec - small->tv_usec);
409 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
411 struct ldlm_request *body;
412 struct ptlrpc_request *req;
413 struct timeval granted_time;
414 long total_enqueue_wait;
415 int rc = 0, size = sizeof(*body);
423 do_gettimeofday(&granted_time);
424 total_enqueue_wait = timeval_sub(&granted_time, &lock->l_enqueued_time);
426 if (total_enqueue_wait / 1000000 > obd_timeout)
427 LDLM_ERROR(lock, "enqueue wait took %ldus", total_enqueue_wait);
429 req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
430 LDLM_CP_CALLBACK, 1, &size, NULL);
434 body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
435 memcpy(&body->lock_handle1, &lock->l_remote_handle,
436 sizeof(body->lock_handle1));
437 body->lock_flags = flags;
438 ldlm_lock2desc(lock, &body->lock_desc);
440 LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
442 req->rq_replen = lustre_msg_size(0, NULL);
444 req->rq_send_state = LUSTRE_IMP_FULL;
445 req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
447 /* We only send real blocking ASTs after the lock is granted */
448 l_lock(&lock->l_resource->lr_namespace->ns_lock);
449 if (lock->l_flags & LDLM_FL_AST_SENT) {
450 body->lock_flags |= LDLM_FL_AST_SENT;
451 ldlm_add_waiting_lock(lock); /* start the lock-timeout clock */
453 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
455 rc = ptlrpc_queue_wait(req);
456 if (rc == -ETIMEDOUT || rc == -EINTR) {
457 ldlm_del_waiting_lock(lock);
458 ldlm_failed_ast(lock, rc, "completion");
460 LDLM_ERROR(lock, "client sent rc %d rq_status %d from "
461 "completion AST\n", rc, req->rq_status);
462 ldlm_lock_cancel(lock);
463 /* Server-side AST functions are called from ldlm_reprocess_all,
464 * which needs to be told to please restart its reprocessing. */
467 ptlrpc_req_finished(req);
472 int ldlm_handle_enqueue(struct ptlrpc_request *req,
473 ldlm_completion_callback completion_callback,
474 ldlm_blocking_callback blocking_callback)
476 struct obd_device *obddev = req->rq_export->exp_obd;
477 struct ldlm_reply *dlm_rep;
478 struct ldlm_request *dlm_req;
479 int rc, size = sizeof(*dlm_rep), cookielen = 0;
482 struct ldlm_lock *lock = NULL;
486 LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
488 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
489 lustre_swab_ldlm_request);
490 if (dlm_req == NULL) {
491 CERROR ("Can't unpack dlm_req\n");
495 flags = dlm_req->lock_flags;
496 if (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN &&
497 (flags & LDLM_FL_HAS_INTENT)) {
498 /* In this case, the reply buffer is allocated deep in
499 * local_lock_enqueue by the policy function. */
501 cookielen = sizeof(*req);
503 rc = lustre_pack_reply(req, 1, &size, NULL);
505 CERROR("out of memory\n");
508 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN) {
509 cookie = &dlm_req->lock_desc.l_policy_data;
510 cookielen = sizeof(ldlm_policy_data_t);
514 /* The lock's callback data might be set in the policy function */
515 lock = ldlm_lock_create(obddev->obd_namespace,
516 &dlm_req->lock_handle2,
517 dlm_req->lock_desc.l_resource.lr_name,
518 dlm_req->lock_desc.l_resource.lr_type,
519 dlm_req->lock_desc.l_req_mode,
520 blocking_callback, completion_callback, NULL);
522 GOTO(out, err = -ENOMEM);
524 do_gettimeofday(&lock->l_enqueued_time);
525 memcpy(&lock->l_remote_handle, &dlm_req->lock_handle1,
526 sizeof(lock->l_remote_handle));
527 LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
529 LASSERT(req->rq_export);
530 lock->l_export = class_export_get(req->rq_export);
531 l_lock(&lock->l_resource->lr_namespace->ns_lock);
532 list_add(&lock->l_export_chain,
533 &lock->l_export->exp_ldlm_data.led_held_locks);
534 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
536 err = ldlm_lock_enqueue(obddev->obd_namespace, &lock, cookie, cookielen,
541 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
542 dlm_rep->lock_flags = flags;
544 ldlm_lock2handle(lock, &dlm_rep->lock_handle);
545 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN) {
546 memcpy(&dlm_rep->lock_policy_data, &lock->l_policy_data,
549 if (dlm_rep->lock_flags & LDLM_FL_LOCK_CHANGED) {
550 memcpy(&dlm_rep->lock_resource_name, &lock->l_resource->lr_name,
551 sizeof(dlm_rep->lock_resource_name));
552 dlm_rep->lock_mode = lock->l_req_mode;
555 /* We never send a blocking AST until the lock is granted, but
556 * we can tell it right now */
557 l_lock(&lock->l_resource->lr_namespace->ns_lock);
558 if (lock->l_flags & LDLM_FL_AST_SENT) {
559 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
560 if (lock->l_granted_mode == lock->l_req_mode)
561 ldlm_add_waiting_lock(lock);
563 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
567 req->rq_status = err;
569 /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
570 * ldlm_reprocess_all. If this moves, revisit that code. -phil */
572 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
574 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
575 ldlm_reprocess_all(lock->l_resource);
578 LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p)", lock);
583 int ldlm_handle_convert(struct ptlrpc_request *req)
585 struct ldlm_request *dlm_req;
586 struct ldlm_reply *dlm_rep;
587 struct ldlm_lock *lock;
588 int rc, size = sizeof(*dlm_rep);
591 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
592 lustre_swab_ldlm_request);
593 if (dlm_req == NULL) {
594 CERROR ("Can't unpack dlm_req\n");
598 rc = lustre_pack_reply(req, 1, &size, NULL);
600 CERROR("out of memory\n");
603 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
604 dlm_rep->lock_flags = dlm_req->lock_flags;
606 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
608 req->rq_status = EINVAL;
610 LDLM_DEBUG(lock, "server-side convert handler START");
611 ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
612 &dlm_rep->lock_flags);
613 if (ldlm_del_waiting_lock(lock))
614 CDEBUG(D_DLMTRACE, "converted waiting lock %p\n", lock);
619 ldlm_reprocess_all(lock->l_resource);
620 LDLM_DEBUG(lock, "server-side convert handler END");
623 LDLM_DEBUG_NOLOCK("server-side convert handler END");
628 int ldlm_handle_cancel(struct ptlrpc_request *req)
630 struct ldlm_request *dlm_req;
631 struct ldlm_lock *lock;
632 char str[PTL_NALFMT_SIZE];
636 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
637 lustre_swab_ldlm_request);
638 if (dlm_req == NULL) {
639 CERROR("bad request buffer for cancel\n");
643 rc = lustre_pack_reply(req, 0, NULL, NULL);
645 CERROR("out of memory\n");
649 lock = ldlm_handle2lock(&dlm_req->lock_handle1);
651 CERROR("received cancel for unknown lock cookie "LPX64
652 " from nid "LPX64" (%s)\n", dlm_req->lock_handle1.cookie,
653 req->rq_connection->c_peer.peer_nid,
654 portals_nid2str(req->rq_connection->c_peer.peer_ni->pni_number,
655 req->rq_connection->c_peer.peer_nid, str));
656 LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock "
658 dlm_req->lock_handle1.cookie);
659 req->rq_status = ESTALE;
661 LDLM_DEBUG(lock, "server-side cancel handler START");
662 ldlm_lock_cancel(lock);
663 if (ldlm_del_waiting_lock(lock))
664 CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
668 if (ptlrpc_reply(req) != 0)
672 ldlm_reprocess_all(lock->l_resource);
673 LDLM_DEBUG(lock, "server-side cancel handler END");
680 static void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
681 struct ldlm_lock_desc *ld,
682 struct ldlm_lock *lock)
687 l_lock(&ns->ns_lock);
688 LDLM_DEBUG(lock, "client blocking AST callback handler START");
690 lock->l_flags |= LDLM_FL_CBPENDING;
691 do_ast = (!lock->l_readers && !lock->l_writers);
694 LDLM_DEBUG(lock, "already unused, calling "
695 "callback (%p)", lock->l_blocking_ast);
696 if (lock->l_blocking_ast != NULL) {
697 l_unlock(&ns->ns_lock);
698 l_check_no_ns_lock(ns);
699 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
701 l_lock(&ns->ns_lock);
704 LDLM_DEBUG(lock, "Lock still has references, will be"
708 LDLM_DEBUG(lock, "client blocking callback handler END");
709 l_unlock(&ns->ns_lock);
714 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
715 struct ldlm_namespace *ns,
716 struct ldlm_request *dlm_req,
717 struct ldlm_lock *lock)
722 l_lock(&ns->ns_lock);
723 LDLM_DEBUG(lock, "client completion callback handler START");
725 /* If we receive the completion AST before the actual enqueue returned,
726 * then we might need to switch lock modes, resources, or extents. */
727 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
728 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
729 LDLM_DEBUG(lock, "completion AST, new lock mode");
731 if (lock->l_resource->lr_type != LDLM_PLAIN)
732 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
733 sizeof(lock->l_policy_data));
735 ldlm_resource_unlink_lock(lock);
736 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
737 &lock->l_resource->lr_name,
738 sizeof(lock->l_resource->lr_name)) != 0) {
739 ldlm_lock_change_resource(ns, lock,
740 dlm_req->lock_desc.l_resource.lr_name);
741 LDLM_DEBUG(lock, "completion AST, new resource");
744 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
745 lock->l_flags |= LDLM_FL_CBPENDING;
746 LDLM_DEBUG(lock, "completion AST includes blocking AST");
749 lock->l_resource->lr_tmp = &ast_list;
750 ldlm_grant_lock(lock, req, sizeof(*req), 1);
751 lock->l_resource->lr_tmp = NULL;
752 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
753 l_unlock(&ns->ns_lock);
756 ldlm_run_ast_work(ns, &ast_list);
758 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
763 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
766 rc = lustre_pack_reply(req, 0, NULL, NULL);
769 return ptlrpc_reply(req);
773 static int ldlm_bl_to_thread(struct ldlm_state *ldlm, struct ldlm_namespace *ns,
774 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
776 struct ldlm_bl_pool *blp = ldlm->ldlm_bl_pool;
777 struct ldlm_bl_work_item *blwi;
780 OBD_ALLOC(blwi, sizeof(*blwi));
786 blwi->blwi_lock = lock;
788 spin_lock(&blp->blp_lock);
789 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
790 wake_up(&blp->blp_waitq);
791 spin_unlock(&blp->blp_lock);
797 static int ldlm_callback_handler(struct ptlrpc_request *req)
799 struct ldlm_namespace *ns;
800 struct ldlm_request *dlm_req;
801 struct ldlm_lock *lock;
802 char str[PTL_NALFMT_SIZE];
806 /* Requests arrive in sender's byte order. The ptlrpc service
807 * handler has already checked and, if necessary, byte-swapped the
808 * incoming request message body, but I am responsible for the
809 * message buffers. */
811 if (req->rq_export == NULL) {
812 struct ldlm_request *dlm_req;
814 CDEBUG(D_RPCTRACE, "operation %d from nid "LPX64" (%s) with bad "
815 "export cookie "LPX64" (ptl req %d/rep %d); this is "
816 "normal if this node rebooted with a lock held\n",
817 req->rq_reqmsg->opc, req->rq_connection->c_peer.peer_nid,
818 portals_nid2str(req->rq_connection->c_peer.peer_ni->pni_number,
819 req->rq_connection->c_peer.peer_nid, str),
820 req->rq_reqmsg->handle.cookie,
821 req->rq_request_portal, req->rq_reply_portal);
823 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
824 lustre_swab_ldlm_request);
826 CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
827 dlm_req->lock_handle1.cookie);
829 ldlm_callback_reply(req, -ENOTCONN);
833 if (req->rq_reqmsg->opc == LDLM_BL_CALLBACK) {
834 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
835 } else if (req->rq_reqmsg->opc == LDLM_CP_CALLBACK) {
836 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
837 } else if (req->rq_reqmsg->opc == OBD_LOG_CANCEL) {
838 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
839 } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CREATE) {
840 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
841 } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_NEXT_BLOCK) {
842 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
843 } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_READ_HEADER) {
844 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
845 } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CLOSE) {
846 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
848 ldlm_callback_reply(req, -EPROTO);
852 LASSERT(req->rq_export != NULL);
853 LASSERT(req->rq_export->exp_obd != NULL);
855 #ifdef ENABLE_ORPHANS
856 /* FIXME - how to send reply */
857 if (req->rq_reqmsg->opc == OBD_LOG_CANCEL) {
858 int rc = llog_origin_handle_cancel(req);
859 ldlm_callback_reply(req, rc);
863 if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CREATE) {
864 int rc = llog_origin_handle_create(req);
870 if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_NEXT_BLOCK) {
871 int rc = llog_origin_handle_next_block(req);
877 if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_READ_HEADER) {
878 int rc = llog_origin_handle_read_header(req);
884 if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CLOSE) {
885 int rc = llog_origin_handle_close(req);
886 ldlm_callback_reply(req, rc);
890 ns = req->rq_export->exp_obd->obd_namespace;
893 dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
894 lustre_swab_ldlm_request);
895 if (dlm_req == NULL) {
896 CERROR ("can't unpack dlm_req\n");
897 ldlm_callback_reply (req, -EPROTO);
901 lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle1);
903 CDEBUG(D_INODE, "callback on lock "LPX64" - lock disappeared\n",
904 dlm_req->lock_handle1.cookie);
905 ldlm_callback_reply(req, -EINVAL);
909 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
910 lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
912 /* We want the ost thread to get this reply so that it can respond
913 * to ost requests (write cache writeback) that might be triggered
916 * But we'd also like to be able to indicate in the reply that we're
917 * cancelling right now, because it's unused, or have an intent result
918 * in the reply, so we might have to push the responsibility for sending
919 * the reply down into the AST handlers, alas. */
920 if (req->rq_reqmsg->opc != LDLM_BL_CALLBACK)
921 ldlm_callback_reply(req, 0);
923 switch (req->rq_reqmsg->opc) {
924 case LDLM_BL_CALLBACK:
925 CDEBUG(D_INODE, "blocking ast\n");
927 rc = ldlm_bl_to_thread(ldlm, ns, &dlm_req->lock_desc, lock);
928 ldlm_callback_reply(req, rc);
931 ldlm_callback_reply(req, rc);
932 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
935 case LDLM_CP_CALLBACK:
936 CDEBUG(D_INODE, "completion ast\n");
937 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
940 LBUG(); /* checked above */
946 static int ldlm_cancel_handler(struct ptlrpc_request *req)
951 /* Requests arrive in sender's byte order. The ptlrpc service
952 * handler has already checked and, if necessary, byte-swapped the
953 * incoming request message body, but I am responsible for the
954 * message buffers. */
956 if (req->rq_export == NULL) {
957 struct ldlm_request *dlm_req;
958 CERROR("operation %d with bad export (ptl req %d/rep %d)\n",
959 req->rq_reqmsg->opc, req->rq_request_portal,
960 req->rq_reply_portal);
961 CERROR("--> export cookie: "LPX64"\n",
962 req->rq_reqmsg->handle.cookie);
963 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
964 lustre_swab_ldlm_request);
966 ldlm_lock_dump_handle(D_ERROR, &dlm_req->lock_handle1);
970 switch (req->rq_reqmsg->opc) {
972 /* XXX FIXME move this back to mds/handler.c, bug 249 */
974 CDEBUG(D_INODE, "cancel\n");
975 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
976 rc = ldlm_handle_cancel(req);
982 CERROR("invalid opcode %d\n", req->rq_reqmsg->opc);
990 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
992 struct ldlm_bl_work_item *blwi = NULL;
994 spin_lock(&blp->blp_lock);
995 if (!list_empty(&blp->blp_list)) {
996 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
998 list_del(&blwi->blwi_entry);
1000 spin_unlock(&blp->blp_lock);
1005 struct ldlm_bl_thread_data {
1007 struct ldlm_bl_pool *bltd_blp;
1010 static int ldlm_bl_thread_main(void *arg)
1012 struct ldlm_bl_thread_data *bltd = arg;
1013 struct ldlm_bl_pool *blp = bltd->bltd_blp;
1014 unsigned long flags;
1017 /* XXX boiler-plate */
1019 char name[sizeof(current->comm)];
1020 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1022 kportal_daemonize(name);
1024 SIGNAL_MASK_LOCK(current, flags);
1025 sigfillset(¤t->blocked);
1027 SIGNAL_MASK_UNLOCK(current, flags);
1029 atomic_inc(&blp->blp_num_threads);
1030 complete(&blp->blp_comp);
1033 struct l_wait_info lwi = { 0 };
1034 struct ldlm_bl_work_item *blwi = NULL;
1036 l_wait_event_exclusive(blp->blp_waitq,
1037 (blwi = ldlm_bl_get_work(blp)) != NULL,
1040 if (blwi->blwi_ns == NULL)
1043 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1045 OBD_FREE(blwi, sizeof(*blwi));
1048 atomic_dec(&blp->blp_num_threads);
1049 complete(&blp->blp_comp);
1055 static int ldlm_setup(void);
1056 static int ldlm_cleanup(int force);
1058 int ldlm_get_ref(void)
1061 down(&ldlm_ref_sem);
1062 if (++ldlm_refcount == 1) {
1072 void ldlm_put_ref(int force)
1074 down(&ldlm_ref_sem);
1075 if (ldlm_refcount == 1) {
1076 int rc = ldlm_cleanup(force);
1078 CERROR("ldlm_cleanup failed: %d\n", rc);
1089 static int ldlm_setup(void)
1091 struct ldlm_bl_pool *blp;
1101 OBD_ALLOC(ldlm, sizeof(*ldlm));
1106 rc = ldlm_proc_setup();
1111 ldlm->ldlm_cb_service =
1112 ptlrpc_init_svc(LDLM_NEVENTS, LDLM_NBUFS, LDLM_BUFSIZE,
1113 LDLM_MAXREQSIZE, LDLM_CB_REQUEST_PORTAL,
1114 LDLM_CB_REPLY_PORTAL,
1115 ldlm_callback_handler, "ldlm_cbd",
1118 if (!ldlm->ldlm_cb_service) {
1119 CERROR("failed to start service\n");
1120 GOTO(out_proc, rc = -ENOMEM);
1123 ldlm->ldlm_cancel_service =
1124 ptlrpc_init_svc(LDLM_NEVENTS, LDLM_NBUFS, LDLM_BUFSIZE,
1125 LDLM_MAXREQSIZE, LDLM_CANCEL_REQUEST_PORTAL,
1126 LDLM_CANCEL_REPLY_PORTAL,
1127 ldlm_cancel_handler, "ldlm_canceld",
1130 if (!ldlm->ldlm_cancel_service) {
1131 CERROR("failed to start service\n");
1132 GOTO(out_proc, rc = -ENOMEM);
1135 OBD_ALLOC(blp, sizeof(*blp));
1137 GOTO(out_proc, rc = -ENOMEM);
1138 ldlm->ldlm_bl_pool = blp;
1140 atomic_set(&blp->blp_num_threads, 0);
1141 init_waitqueue_head(&blp->blp_waitq);
1142 spin_lock_init(&blp->blp_lock);
1144 INIT_LIST_HEAD(&blp->blp_list);
1147 for (i = 0; i < LDLM_NUM_THREADS; i++) {
1148 struct ldlm_bl_thread_data bltd = {
1152 init_completion(&blp->blp_comp);
1153 rc = kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1155 CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1157 GOTO(out_thread, rc);
1159 wait_for_completion(&blp->blp_comp);
1162 rc = ptlrpc_start_n_threads(NULL, ldlm->ldlm_cancel_service,
1163 LDLM_NUM_THREADS, "ldlm_cn");
1166 GOTO(out_thread, rc);
1169 rc = ptlrpc_start_n_threads(NULL, ldlm->ldlm_cb_service,
1170 LDLM_NUM_THREADS, "ldlm_cb");
1173 GOTO(out_thread, rc);
1176 INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1177 spin_lock_init(&expired_lock_thread.elt_lock);
1178 expired_lock_thread.elt_state = ELT_STOPPED;
1179 init_waitqueue_head(&expired_lock_thread.elt_waitq);
1181 rc = kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FS);
1183 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1184 GOTO(out_thread, rc);
1187 wait_event(expired_lock_thread.elt_waitq,
1188 expired_lock_thread.elt_state == ELT_READY);
1190 INIT_LIST_HEAD(&waiting_locks_list);
1191 spin_lock_init(&waiting_locks_spinlock);
1192 waiting_locks_timer.function = waiting_locks_callback;
1193 waiting_locks_timer.data = 0;
1194 init_timer(&waiting_locks_timer);
1201 ptlrpc_unregister_service(ldlm->ldlm_cancel_service);
1202 ptlrpc_unregister_service(ldlm->ldlm_cb_service);
1207 ldlm_proc_cleanup();
1210 OBD_FREE(ldlm, sizeof(*ldlm));
1215 static int ldlm_cleanup(int force)
1218 struct ldlm_bl_pool *blp = ldlm->ldlm_bl_pool;
1222 if (!list_empty(&ldlm_namespace_list)) {
1223 CERROR("ldlm still has namespaces; clean these up first.\n");
1224 ldlm_dump_all_namespaces();
1229 while (atomic_read(&blp->blp_num_threads) > 0) {
1230 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1232 init_completion(&blp->blp_comp);
1234 spin_lock(&blp->blp_lock);
1235 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1236 wake_up(&blp->blp_waitq);
1237 spin_unlock(&blp->blp_lock);
1239 wait_for_completion(&blp->blp_comp);
1241 OBD_FREE(blp, sizeof(*blp));
1243 ptlrpc_stop_all_threads(ldlm->ldlm_cb_service);
1244 ptlrpc_unregister_service(ldlm->ldlm_cb_service);
1245 ptlrpc_stop_all_threads(ldlm->ldlm_cancel_service);
1246 ptlrpc_unregister_service(ldlm->ldlm_cancel_service);
1247 ldlm_proc_cleanup();
1249 expired_lock_thread.elt_state = ELT_TERMINATE;
1250 wake_up(&expired_lock_thread.elt_waitq);
1251 wait_event(expired_lock_thread.elt_waitq,
1252 expired_lock_thread.elt_state == ELT_STOPPED);
1256 OBD_FREE(ldlm, sizeof(*ldlm));
1262 int __init ldlm_init(void)
1264 ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1265 sizeof(struct ldlm_resource), 0,
1266 SLAB_HWCACHE_ALIGN, NULL, NULL);
1267 if (ldlm_resource_slab == NULL)
1270 ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1271 sizeof(struct ldlm_lock), 0,
1272 SLAB_HWCACHE_ALIGN, NULL, NULL);
1273 if (ldlm_lock_slab == NULL) {
1274 kmem_cache_destroy(ldlm_resource_slab);
1278 l_lock_init(&ldlm_handle_lock);
1283 void __exit ldlm_exit(void)
1285 if ( ldlm_refcount )
1286 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1287 if (kmem_cache_destroy(ldlm_resource_slab) != 0)
1288 CERROR("couldn't free ldlm resource slab\n");
1289 if (kmem_cache_destroy(ldlm_lock_slab) != 0)
1290 CERROR("couldn't free ldlm lock slab\n");
1294 EXPORT_SYMBOL(ldlm_flock_completion_ast);
1297 EXPORT_SYMBOL(ldlm_lock2desc);
1298 EXPORT_SYMBOL(ldlm_register_intent);
1299 EXPORT_SYMBOL(ldlm_unregister_intent);
1300 EXPORT_SYMBOL(ldlm_lockname);
1301 EXPORT_SYMBOL(ldlm_typename);
1302 EXPORT_SYMBOL(ldlm_lock2handle);
1303 EXPORT_SYMBOL(__ldlm_handle2lock);
1304 EXPORT_SYMBOL(ldlm_lock_put);
1305 EXPORT_SYMBOL(ldlm_lock_match);
1306 EXPORT_SYMBOL(ldlm_lock_cancel);
1307 EXPORT_SYMBOL(ldlm_lock_addref);
1308 EXPORT_SYMBOL(ldlm_lock_decref);
1309 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1310 EXPORT_SYMBOL(ldlm_lock_change_resource);
1311 EXPORT_SYMBOL(ldlm_lock_set_data);
1312 EXPORT_SYMBOL(ldlm_it2str);
1313 EXPORT_SYMBOL(ldlm_lock_dump);
1314 EXPORT_SYMBOL(ldlm_lock_dump_handle);
1315 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
1316 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1318 /* ldlm_request.c */
1319 EXPORT_SYMBOL(ldlm_completion_ast);
1320 EXPORT_SYMBOL(ldlm_expired_completion_wait);
1321 EXPORT_SYMBOL(ldlm_cli_convert);
1322 EXPORT_SYMBOL(ldlm_cli_enqueue);
1323 EXPORT_SYMBOL(ldlm_cli_cancel);
1324 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1325 EXPORT_SYMBOL(ldlm_replay_locks);
1326 EXPORT_SYMBOL(ldlm_resource_foreach);
1327 EXPORT_SYMBOL(ldlm_namespace_foreach);
1328 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
1329 EXPORT_SYMBOL(ldlm_change_cbdata);
1332 EXPORT_SYMBOL(ldlm_server_blocking_ast);
1333 EXPORT_SYMBOL(ldlm_server_completion_ast);
1334 EXPORT_SYMBOL(ldlm_handle_enqueue);
1335 EXPORT_SYMBOL(ldlm_handle_cancel);
1336 EXPORT_SYMBOL(ldlm_handle_convert);
1337 EXPORT_SYMBOL(ldlm_del_waiting_lock);
1338 EXPORT_SYMBOL(ldlm_get_ref);
1339 EXPORT_SYMBOL(ldlm_put_ref);
1343 EXPORT_SYMBOL(ldlm_test);
1344 EXPORT_SYMBOL(ldlm_regression_start);
1345 EXPORT_SYMBOL(ldlm_regression_stop);
1348 /* ldlm_resource.c */
1349 EXPORT_SYMBOL(ldlm_namespace_new);
1350 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1351 EXPORT_SYMBOL(ldlm_namespace_free);
1352 EXPORT_SYMBOL(ldlm_namespace_dump);
1355 EXPORT_SYMBOL(l_lock);
1356 EXPORT_SYMBOL(l_unlock);
1359 EXPORT_SYMBOL(client_obd_setup);
1360 EXPORT_SYMBOL(client_obd_cleanup);
1361 EXPORT_SYMBOL(client_connect_import);
1362 EXPORT_SYMBOL(client_disconnect_export);
1363 EXPORT_SYMBOL(target_abort_recovery);
1364 EXPORT_SYMBOL(target_handle_connect);
1365 EXPORT_SYMBOL(target_destroy_export);
1366 EXPORT_SYMBOL(target_cancel_recovery_timer);
1367 EXPORT_SYMBOL(target_send_reply);
1368 EXPORT_SYMBOL(target_queue_recovery_request);
1369 EXPORT_SYMBOL(target_handle_ping);
1370 EXPORT_SYMBOL(target_handle_disconnect);
1371 EXPORT_SYMBOL(target_queue_final_reply);
1372 EXPORT_SYMBOL(ldlm_put_lock_into_req);