1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
32 * Copyright (c) 2003 Hewlett-Packard Development Company LP.
33 * Developed under the sponsorship of the US Government under
34 * Subcontract No. B514193
37 * This file is part of Lustre, http://www.lustre.org/
38 * Lustre is a trademark of Sun Microsystems, Inc.
41 #define DEBUG_SUBSYSTEM S_LDLM
44 #include <lustre_dlm.h>
45 #include <obd_support.h>
46 #include <obd_class.h>
47 #include <lustre_lib.h>
48 #include <libcfs/list.h>
50 #include <liblustre.h>
51 #include <obd_class.h>
54 #include "ldlm_internal.h"
56 #define l_flock_waitq l_lru
59 * Wait queue for Posix lock deadlock detection, added with
60 * ldlm_lock::l_flock_waitq.
62 static CFS_LIST_HEAD(ldlm_flock_waitq);
64 * Lock protecting access to ldlm_flock_waitq.
66 spinlock_t ldlm_flock_waitq_lock = SPIN_LOCK_UNLOCKED;
68 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
69 void *data, int flag);
72 * list_for_remaining_safe - iterate over the remaining entries in a list
73 * and safeguard against removal of a list entry.
74 * \param pos the &struct list_head to use as a loop counter. pos MUST
75 * have been initialized prior to using it in this macro.
76 * \param n another &struct list_head to use as temporary storage
77 * \param head the head for your list.
79 #define list_for_remaining_safe(pos, n, head) \
80 for (n = pos->next; pos != (head); pos = n, n = pos->next)
83 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
85 return((new->l_policy_data.l_flock.pid ==
86 lock->l_policy_data.l_flock.pid) &&
87 (new->l_export == lock->l_export));
91 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
93 return((new->l_policy_data.l_flock.start <=
94 lock->l_policy_data.l_flock.end) &&
95 (new->l_policy_data.l_flock.end >=
96 lock->l_policy_data.l_flock.start));
100 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
104 LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
107 /* Safe to not lock here, since it should be empty anyway */
108 LASSERT(list_empty(&lock->l_flock_waitq));
110 list_del_init(&lock->l_res_link);
111 if (flags == LDLM_FL_WAIT_NOREPROC &&
112 !(lock->l_flags & LDLM_FL_FAILED)) {
113 /* client side - set a flag to prevent sending a CANCEL */
114 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
116 /* when reaching here, it is under lock_res_and_lock(). Thus,
117 need call the nolock version of ldlm_lock_decref_internal*/
118 ldlm_lock_decref_internal_nolock(lock, mode);
121 ldlm_lock_destroy_nolock(lock);
126 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
128 struct obd_export *req_export = req->l_export;
129 struct obd_export *blocking_export = blocking_lock->l_export;
130 pid_t req_pid = req->l_policy_data.l_flock.pid;
131 pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
132 struct ldlm_lock *lock;
134 spin_lock(&ldlm_flock_waitq_lock);
136 list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
137 if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
138 (lock->l_export != blocking_export))
141 blocking_pid = lock->l_policy_data.l_flock.blocking_pid;
142 blocking_export = (struct obd_export *)(long)
143 lock->l_policy_data.l_flock.blocking_export;
144 if (blocking_pid == req_pid && blocking_export == req_export) {
145 spin_unlock(&ldlm_flock_waitq_lock);
151 spin_unlock(&ldlm_flock_waitq_lock);
157 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
158 ldlm_error_t *err, struct list_head *work_list)
160 struct ldlm_resource *res = req->l_resource;
161 struct ldlm_namespace *ns = res->lr_namespace;
162 struct list_head *tmp;
163 struct list_head *ownlocks = NULL;
164 struct ldlm_lock *lock = NULL;
165 struct ldlm_lock *new = req;
166 struct ldlm_lock *new2 = NULL;
167 ldlm_mode_t mode = req->l_req_mode;
168 int local = ns_is_client(ns);
169 int added = (mode == LCK_NL);
172 const struct ldlm_callback_suite null_cbs = { NULL };
175 CDEBUG(D_DLMTRACE, "flags %#x pid %u mode %u start "LPU64" end "LPU64
176 "\n", *flags, new->l_policy_data.l_flock.pid, mode,
177 req->l_policy_data.l_flock.start,
178 req->l_policy_data.l_flock.end);
183 /* No blocking ASTs are sent to the clients for
184 * Posix file & record locks */
185 req->l_blocking_ast = NULL;
187 /* Called on the server for lock cancels. */
188 req->l_blocking_ast = ldlm_flock_blocking_ast;
192 if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
193 /* This loop determines where this processes locks start
194 * in the resource lr_granted list. */
195 list_for_each(tmp, &res->lr_granted) {
196 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
197 if (ldlm_same_flock_owner(lock, req)) {
203 lockmode_verify(mode);
205 /* This loop determines if there are existing locks
206 * that conflict with the new lock request. */
207 list_for_each(tmp, &res->lr_granted) {
208 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
210 if (ldlm_same_flock_owner(lock, req)) {
216 /* locks are compatible, overlap doesn't matter */
217 if (lockmode_compat(lock->l_granted_mode, mode))
220 if (!ldlm_flocks_overlap(lock, req))
224 RETURN(LDLM_ITER_CONTINUE);
226 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
227 ldlm_flock_destroy(req, mode, *flags);
229 RETURN(LDLM_ITER_STOP);
232 if (*flags & LDLM_FL_TEST_LOCK) {
233 ldlm_flock_destroy(req, mode, *flags);
234 req->l_req_mode = lock->l_granted_mode;
235 req->l_policy_data.l_flock.pid =
236 lock->l_policy_data.l_flock.pid;
237 req->l_policy_data.l_flock.start =
238 lock->l_policy_data.l_flock.start;
239 req->l_policy_data.l_flock.end =
240 lock->l_policy_data.l_flock.end;
241 *flags |= LDLM_FL_LOCK_CHANGED;
242 RETURN(LDLM_ITER_STOP);
245 if (ldlm_flock_deadlock(req, lock)) {
246 ldlm_flock_destroy(req, mode, *flags);
248 RETURN(LDLM_ITER_STOP);
251 req->l_policy_data.l_flock.blocking_pid =
252 lock->l_policy_data.l_flock.pid;
253 req->l_policy_data.l_flock.blocking_export =
254 (long)(void *)lock->l_export;
256 LASSERT(list_empty(&req->l_flock_waitq));
257 spin_lock(&ldlm_flock_waitq_lock);
258 list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
259 spin_unlock(&ldlm_flock_waitq_lock);
261 ldlm_resource_add_lock(res, &res->lr_waiting, req);
262 *flags |= LDLM_FL_BLOCK_GRANTED;
263 RETURN(LDLM_ITER_STOP);
267 if (*flags & LDLM_FL_TEST_LOCK) {
268 ldlm_flock_destroy(req, mode, *flags);
269 req->l_req_mode = LCK_NL;
270 *flags |= LDLM_FL_LOCK_CHANGED;
271 RETURN(LDLM_ITER_STOP);
274 /* In case we had slept on this lock request take it off of the
275 * deadlock detection waitq. */
276 spin_lock(&ldlm_flock_waitq_lock);
277 list_del_init(&req->l_flock_waitq);
278 spin_unlock(&ldlm_flock_waitq_lock);
280 /* Scan the locks owned by this process that overlap this request.
281 * We may have to merge or split existing locks. */
284 ownlocks = &res->lr_granted;
286 list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
287 lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
289 if (!ldlm_same_flock_owner(lock, new))
292 if (lock->l_granted_mode == mode) {
293 /* If the modes are the same then we need to process
294 * locks that overlap OR adjoin the new lock. The extra
295 * logic condition is necessary to deal with arithmetic
296 * overflow and underflow. */
297 if ((new->l_policy_data.l_flock.start >
298 (lock->l_policy_data.l_flock.end + 1))
299 && (lock->l_policy_data.l_flock.end !=
303 if ((new->l_policy_data.l_flock.end <
304 (lock->l_policy_data.l_flock.start - 1))
305 && (lock->l_policy_data.l_flock.start != 0))
308 if (new->l_policy_data.l_flock.start <
309 lock->l_policy_data.l_flock.start) {
310 lock->l_policy_data.l_flock.start =
311 new->l_policy_data.l_flock.start;
313 new->l_policy_data.l_flock.start =
314 lock->l_policy_data.l_flock.start;
317 if (new->l_policy_data.l_flock.end >
318 lock->l_policy_data.l_flock.end) {
319 lock->l_policy_data.l_flock.end =
320 new->l_policy_data.l_flock.end;
322 new->l_policy_data.l_flock.end =
323 lock->l_policy_data.l_flock.end;
327 ldlm_flock_destroy(lock, mode, *flags);
335 if (new->l_policy_data.l_flock.start >
336 lock->l_policy_data.l_flock.end)
339 if (new->l_policy_data.l_flock.end <
340 lock->l_policy_data.l_flock.start)
345 if (new->l_policy_data.l_flock.start <=
346 lock->l_policy_data.l_flock.start) {
347 if (new->l_policy_data.l_flock.end <
348 lock->l_policy_data.l_flock.end) {
349 lock->l_policy_data.l_flock.start =
350 new->l_policy_data.l_flock.end + 1;
353 ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
356 if (new->l_policy_data.l_flock.end >=
357 lock->l_policy_data.l_flock.end) {
358 lock->l_policy_data.l_flock.end =
359 new->l_policy_data.l_flock.start - 1;
363 /* split the existing lock into two locks */
365 /* if this is an F_UNLCK operation then we could avoid
366 * allocating a new lock and use the req lock passed in
367 * with the request but this would complicate the reply
368 * processing since updates to req get reflected in the
369 * reply. The client side replays the lock request so
370 * it must see the original lock data in the reply. */
372 /* XXX - if ldlm_lock_new() can sleep we should
373 * release the ns_lock, allocate the new lock,
374 * and restart processing this lock. */
376 unlock_res_and_lock(req);
377 new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
378 lock->l_granted_mode, &null_cbs,
380 lock_res_and_lock(req);
382 ldlm_flock_destroy(req, lock->l_granted_mode,
385 RETURN(LDLM_ITER_STOP);
392 new2->l_granted_mode = lock->l_granted_mode;
393 new2->l_policy_data.l_flock.pid =
394 new->l_policy_data.l_flock.pid;
395 new2->l_policy_data.l_flock.start =
396 lock->l_policy_data.l_flock.start;
397 new2->l_policy_data.l_flock.end =
398 new->l_policy_data.l_flock.start - 1;
399 lock->l_policy_data.l_flock.start =
400 new->l_policy_data.l_flock.end + 1;
401 new2->l_conn_export = lock->l_conn_export;
402 if (lock->l_export != NULL) {
403 new2->l_export = class_export_lock_get(lock->l_export, new2);
404 if (new2->l_export->exp_lock_hash &&
405 hlist_unhashed(&new2->l_exp_hash))
406 lustre_hash_add(new2->l_export->exp_lock_hash,
407 &new2->l_remote_handle,
410 if (*flags == LDLM_FL_WAIT_NOREPROC)
411 ldlm_lock_addref_internal_nolock(new2,
412 lock->l_granted_mode);
414 /* insert new2 at lock */
415 ldlm_resource_add_lock(res, ownlocks, new2);
416 LDLM_LOCK_RELEASE(new2);
420 /* if new2 is created but never used, destroy it*/
421 if (splitted == 0 && new2 != NULL)
422 ldlm_lock_destroy_nolock(new2);
424 /* At this point we're granting the lock request. */
425 req->l_granted_mode = req->l_req_mode;
427 /* Add req to the granted queue before calling ldlm_reprocess_all(). */
429 list_del_init(&req->l_res_link);
430 /* insert new lock before ownlocks in list. */
431 ldlm_resource_add_lock(res, ownlocks, req);
434 if (*flags != LDLM_FL_WAIT_NOREPROC) {
436 /* If this is an unlock, reprocess the waitq and
437 * send completions ASTs for locks that can now be
438 * granted. The only problem with doing this
439 * reprocessing here is that the completion ASTs for
440 * newly granted locks will be sent before the unlock
441 * completion is sent. It shouldn't be an issue. Also
442 * note that ldlm_process_flock_lock() will recurse,
443 * but only once because first_enq will be false from
444 * ldlm_reprocess_queue. */
445 if ((mode == LCK_NL) && overlaps) {
446 CFS_LIST_HEAD(rpc_list);
449 ldlm_reprocess_queue(res, &res->lr_waiting,
452 unlock_res_and_lock(req);
453 rc = ldlm_run_ast_work(&rpc_list,
455 lock_res_and_lock(req);
457 GOTO(restart, -ERESTART);
460 LASSERT(req->l_completion_ast);
461 ldlm_add_ast_work_item(req, NULL, work_list);
465 /* In case we're reprocessing the requested lock we can't destroy
466 * it until after calling ldlm_ast_work_item() above so that lawi()
467 * can bump the reference count on req. Otherwise req could be freed
468 * before the completion AST can be sent. */
470 ldlm_flock_destroy(req, mode, *flags);
472 ldlm_resource_dump(D_INFO, res);
473 RETURN(LDLM_ITER_CONTINUE);
476 struct ldlm_flock_wait_data {
477 struct ldlm_lock *fwd_lock;
482 ldlm_flock_interrupted_wait(void *data)
484 struct ldlm_lock *lock;
485 struct lustre_handle lockh;
489 lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
491 /* take lock off the deadlock detection waitq. */
492 spin_lock(&ldlm_flock_waitq_lock);
493 list_del_init(&lock->l_flock_waitq);
494 spin_unlock(&ldlm_flock_waitq_lock);
496 /* client side - set flag to prevent lock from being put on lru list */
497 lock->l_flags |= LDLM_FL_CBPENDING;
499 ldlm_lock_decref_internal(lock, lock->l_req_mode);
500 ldlm_lock2handle(lock, &lockh);
501 rc = ldlm_cli_cancel(&lockh);
503 CERROR("ldlm_cli_cancel: %d\n", rc);
509 * Flock completion calback function.
511 * \param lock [in,out]: A lock to be handled
512 * \param flags [in]: flags
513 * \param *data [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
515 * \retval 0 : success
516 * \retval <0 : failure
519 ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
521 cfs_flock_t *getlk = lock->l_ast_data;
522 struct obd_device *obd;
523 struct obd_import *imp = NULL;
524 struct ldlm_flock_wait_data fwd;
525 struct l_wait_info lwi;
530 CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
533 /* Import invalidation. We need to actually release the lock
534 * references being held, so that it can go away. No point in
535 * holding the lock even if app still believes it has it, since
536 * server already dropped it anyway. Only for granted locks too. */
537 lock_res_and_lock(lock);
538 if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
539 (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
540 unlock_res_and_lock(lock);
541 if (lock->l_req_mode == lock->l_granted_mode &&
542 lock->l_granted_mode != LCK_NL &&
544 ldlm_lock_decref_internal(lock, lock->l_req_mode);
547 unlock_res_and_lock(lock);
549 LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
551 if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
552 LDLM_FL_BLOCK_CONV))) {
554 /* mds granted the lock in the reply */
556 /* CP AST RPC: lock get granted, wake it up */
557 cfs_waitq_signal(&lock->l_waitq);
561 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
564 obd = class_exp2obd(lock->l_conn_export);
566 /* if this is a local lock, there is no import */
568 imp = obd->u.cli.cl_import;
571 spin_lock(&imp->imp_lock);
572 fwd.fwd_generation = imp->imp_generation;
573 spin_unlock(&imp->imp_lock);
576 lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
578 /* Go to sleep until the lock is granted. */
579 rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
582 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
588 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
590 lock_res_and_lock(lock);
591 if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
592 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
593 unlock_res(lock->l_resource);
597 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
599 unlock_res_and_lock(lock);
603 LDLM_DEBUG(lock, "client-side enqueue granted");
605 /* take lock off the deadlock detection waitq. */
606 spin_lock(&ldlm_flock_waitq_lock);
607 list_del_init(&lock->l_flock_waitq);
608 spin_unlock(&ldlm_flock_waitq_lock);
610 /* ldlm_lock_enqueue() has already placed lock on the granted list. */
611 list_del_init(&lock->l_res_link);
613 if (flags & LDLM_FL_TEST_LOCK) {
614 /* fcntl(F_GETLK) request */
615 /* The old mode was saved in getlk->fl_type so that if the mode
616 * in the lock changes we can decref the appropriate refcount.*/
617 ldlm_flock_destroy(lock, cfs_flock_type(getlk),
618 LDLM_FL_WAIT_NOREPROC);
619 switch (lock->l_granted_mode) {
621 cfs_flock_set_type(getlk, F_RDLCK);
624 cfs_flock_set_type(getlk, F_WRLCK);
627 cfs_flock_set_type(getlk, F_UNLCK);
629 cfs_flock_set_pid(getlk,
630 (pid_t)lock->l_policy_data.l_flock.pid);
631 cfs_flock_set_start(getlk,
632 (loff_t)lock->l_policy_data.l_flock.start);
633 cfs_flock_set_end(getlk,
634 (loff_t)lock->l_policy_data.l_flock.end);
636 int noreproc = LDLM_FL_WAIT_NOREPROC;
638 /* We need to reprocess the lock to do merges or splits
639 * with existing locks owned by this process. */
640 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
642 unlock_res_and_lock(lock);
645 EXPORT_SYMBOL(ldlm_flock_completion_ast);
647 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
648 void *data, int flag)
650 struct ldlm_namespace *ns;
654 LASSERT(flag == LDLM_CB_CANCELING);
656 ns = lock->l_resource->lr_namespace;
658 /* take lock off the deadlock detection waitq. */
659 spin_lock(&ldlm_flock_waitq_lock);
660 list_del_init(&lock->l_flock_waitq);
661 spin_unlock(&ldlm_flock_waitq_lock);