1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2003 Hewlett-Packard Development Company LP.
33 * Developed under the sponsorship of the US Government under
34 * Subcontract No. B514193
37 * This file is part of Lustre, http://www.lustre.org/
38 * Lustre is a trademark of Sun Microsystems, Inc.
41 #define DEBUG_SUBSYSTEM S_LDLM
44 #include <lustre_dlm.h>
45 #include <obd_support.h>
46 #include <obd_class.h>
47 #include <lustre_lib.h>
48 #include <libcfs/list.h>
50 #include <liblustre.h>
51 #include <obd_class.h>
54 #include "ldlm_internal.h"
56 #define l_flock_waitq l_lru
59 * Wait queue for Posix lock deadlock detection, added with
60 * ldlm_lock::l_flock_waitq.
62 static CFS_LIST_HEAD(ldlm_flock_waitq);
64 * Lock protecting access to ldlm_flock_waitq.
66 cfs_spinlock_t ldlm_flock_waitq_lock = CFS_SPIN_LOCK_UNLOCKED;
68 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
69 void *data, int flag);
72 * list_for_remaining_safe - iterate over the remaining entries in a list
73 * and safeguard against removal of a list entry.
74 * \param pos the &struct list_head to use as a loop counter. pos MUST
75 * have been initialized prior to using it in this macro.
76 * \param n another &struct list_head to use as temporary storage
77 * \param head the head for your list.
79 #define list_for_remaining_safe(pos, n, head) \
80 for (n = pos->next; pos != (head); pos = n, n = pos->next)
83 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
85 return((new->l_policy_data.l_flock.owner ==
86 lock->l_policy_data.l_flock.owner) &&
87 (new->l_export == lock->l_export));
91 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
93 return((new->l_policy_data.l_flock.start <=
94 lock->l_policy_data.l_flock.end) &&
95 (new->l_policy_data.l_flock.end >=
96 lock->l_policy_data.l_flock.start));
100 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
104 LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
107 /* Safe to not lock here, since it should be empty anyway */
108 LASSERT(cfs_list_empty(&lock->l_flock_waitq));
110 cfs_list_del_init(&lock->l_res_link);
111 if (flags == LDLM_FL_WAIT_NOREPROC &&
112 !(lock->l_flags & LDLM_FL_FAILED)) {
113 /* client side - set a flag to prevent sending a CANCEL */
114 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
116 /* when reaching here, it is under lock_res_and_lock(). Thus,
117 need call the nolock version of ldlm_lock_decref_internal*/
118 ldlm_lock_decref_internal_nolock(lock, mode);
121 ldlm_lock_destroy_nolock(lock);
126 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
128 struct obd_export *req_export = req->l_export;
129 struct obd_export *blocking_export = blocking_lock->l_export;
130 __u64 req_owner = req->l_policy_data.l_flock.owner;
131 __u64 blocking_owner = blocking_lock->l_policy_data.l_flock.owner;
132 struct ldlm_lock *lock;
134 cfs_spin_lock(&ldlm_flock_waitq_lock);
136 cfs_list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
137 if ((lock->l_policy_data.l_flock.owner != blocking_owner) ||
138 (lock->l_export != blocking_export))
141 blocking_owner = lock->l_policy_data.l_flock.blocking_owner;
142 blocking_export = (struct obd_export *)
143 lock->l_policy_data.l_flock.blocking_export;
144 if (blocking_owner == req_owner &&
145 blocking_export == req_export) {
146 cfs_spin_unlock(&ldlm_flock_waitq_lock);
152 cfs_spin_unlock(&ldlm_flock_waitq_lock);
158 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
159 ldlm_error_t *err, cfs_list_t *work_list)
161 struct ldlm_resource *res = req->l_resource;
162 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
164 cfs_list_t *ownlocks = NULL;
165 struct ldlm_lock *lock = NULL;
166 struct ldlm_lock *new = req;
167 struct ldlm_lock *new2 = NULL;
168 ldlm_mode_t mode = req->l_req_mode;
169 int local = ns_is_client(ns);
170 int added = (mode == LCK_NL);
173 const struct ldlm_callback_suite null_cbs = { NULL };
176 CDEBUG(D_DLMTRACE, "flags %#x owner "LPU64" pid %u mode %u start "LPU64
177 " end "LPU64"\n", *flags, new->l_policy_data.l_flock.owner,
178 new->l_policy_data.l_flock.pid, mode,
179 req->l_policy_data.l_flock.start,
180 req->l_policy_data.l_flock.end);
185 /* No blocking ASTs are sent to the clients for
186 * Posix file & record locks */
187 req->l_blocking_ast = NULL;
189 /* Called on the server for lock cancels. */
190 req->l_blocking_ast = ldlm_flock_blocking_ast;
194 if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
195 /* This loop determines where this processes locks start
196 * in the resource lr_granted list. */
197 cfs_list_for_each(tmp, &res->lr_granted) {
198 lock = cfs_list_entry(tmp, struct ldlm_lock,
200 if (ldlm_same_flock_owner(lock, req)) {
206 lockmode_verify(mode);
208 /* This loop determines if there are existing locks
209 * that conflict with the new lock request. */
210 cfs_list_for_each(tmp, &res->lr_granted) {
211 lock = cfs_list_entry(tmp, struct ldlm_lock,
214 if (ldlm_same_flock_owner(lock, req)) {
220 /* locks are compatible, overlap doesn't matter */
221 if (lockmode_compat(lock->l_granted_mode, mode))
224 if (!ldlm_flocks_overlap(lock, req))
228 RETURN(LDLM_ITER_CONTINUE);
230 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
231 ldlm_flock_destroy(req, mode, *flags);
233 RETURN(LDLM_ITER_STOP);
236 if (*flags & LDLM_FL_TEST_LOCK) {
237 ldlm_flock_destroy(req, mode, *flags);
238 req->l_req_mode = lock->l_granted_mode;
239 req->l_policy_data.l_flock.pid =
240 lock->l_policy_data.l_flock.pid;
241 req->l_policy_data.l_flock.start =
242 lock->l_policy_data.l_flock.start;
243 req->l_policy_data.l_flock.end =
244 lock->l_policy_data.l_flock.end;
245 *flags |= LDLM_FL_LOCK_CHANGED;
246 RETURN(LDLM_ITER_STOP);
249 if (ldlm_flock_deadlock(req, lock)) {
250 ldlm_flock_destroy(req, mode, *flags);
252 RETURN(LDLM_ITER_STOP);
255 req->l_policy_data.l_flock.blocking_owner =
256 lock->l_policy_data.l_flock.owner;
257 req->l_policy_data.l_flock.blocking_export =
260 LASSERT(cfs_list_empty(&req->l_flock_waitq));
261 cfs_spin_lock(&ldlm_flock_waitq_lock);
262 cfs_list_add_tail(&req->l_flock_waitq,
264 cfs_spin_unlock(&ldlm_flock_waitq_lock);
266 ldlm_resource_add_lock(res, &res->lr_waiting, req);
267 *flags |= LDLM_FL_BLOCK_GRANTED;
268 RETURN(LDLM_ITER_STOP);
272 if (*flags & LDLM_FL_TEST_LOCK) {
273 ldlm_flock_destroy(req, mode, *flags);
274 req->l_req_mode = LCK_NL;
275 *flags |= LDLM_FL_LOCK_CHANGED;
276 RETURN(LDLM_ITER_STOP);
279 /* In case we had slept on this lock request take it off of the
280 * deadlock detection waitq. */
281 cfs_spin_lock(&ldlm_flock_waitq_lock);
282 cfs_list_del_init(&req->l_flock_waitq);
283 cfs_spin_unlock(&ldlm_flock_waitq_lock);
285 /* Scan the locks owned by this process that overlap this request.
286 * We may have to merge or split existing locks. */
289 ownlocks = &res->lr_granted;
291 list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
292 lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
294 if (!ldlm_same_flock_owner(lock, new))
297 if (lock->l_granted_mode == mode) {
298 /* If the modes are the same then we need to process
299 * locks that overlap OR adjoin the new lock. The extra
300 * logic condition is necessary to deal with arithmetic
301 * overflow and underflow. */
302 if ((new->l_policy_data.l_flock.start >
303 (lock->l_policy_data.l_flock.end + 1))
304 && (lock->l_policy_data.l_flock.end !=
308 if ((new->l_policy_data.l_flock.end <
309 (lock->l_policy_data.l_flock.start - 1))
310 && (lock->l_policy_data.l_flock.start != 0))
313 if (new->l_policy_data.l_flock.start <
314 lock->l_policy_data.l_flock.start) {
315 lock->l_policy_data.l_flock.start =
316 new->l_policy_data.l_flock.start;
318 new->l_policy_data.l_flock.start =
319 lock->l_policy_data.l_flock.start;
322 if (new->l_policy_data.l_flock.end >
323 lock->l_policy_data.l_flock.end) {
324 lock->l_policy_data.l_flock.end =
325 new->l_policy_data.l_flock.end;
327 new->l_policy_data.l_flock.end =
328 lock->l_policy_data.l_flock.end;
332 ldlm_flock_destroy(lock, mode, *flags);
340 if (new->l_policy_data.l_flock.start >
341 lock->l_policy_data.l_flock.end)
344 if (new->l_policy_data.l_flock.end <
345 lock->l_policy_data.l_flock.start)
350 if (new->l_policy_data.l_flock.start <=
351 lock->l_policy_data.l_flock.start) {
352 if (new->l_policy_data.l_flock.end <
353 lock->l_policy_data.l_flock.end) {
354 lock->l_policy_data.l_flock.start =
355 new->l_policy_data.l_flock.end + 1;
358 ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
361 if (new->l_policy_data.l_flock.end >=
362 lock->l_policy_data.l_flock.end) {
363 lock->l_policy_data.l_flock.end =
364 new->l_policy_data.l_flock.start - 1;
368 /* split the existing lock into two locks */
370 /* if this is an F_UNLCK operation then we could avoid
371 * allocating a new lock and use the req lock passed in
372 * with the request but this would complicate the reply
373 * processing since updates to req get reflected in the
374 * reply. The client side replays the lock request so
375 * it must see the original lock data in the reply. */
377 /* XXX - if ldlm_lock_new() can sleep we should
378 * release the lr_lock, allocate the new lock,
379 * and restart processing this lock. */
381 unlock_res_and_lock(req);
382 new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
383 lock->l_granted_mode, &null_cbs,
385 lock_res_and_lock(req);
387 ldlm_flock_destroy(req, lock->l_granted_mode,
390 RETURN(LDLM_ITER_STOP);
397 new2->l_granted_mode = lock->l_granted_mode;
398 new2->l_policy_data.l_flock.pid =
399 new->l_policy_data.l_flock.pid;
400 new2->l_policy_data.l_flock.owner =
401 new->l_policy_data.l_flock.owner;
402 new2->l_policy_data.l_flock.start =
403 lock->l_policy_data.l_flock.start;
404 new2->l_policy_data.l_flock.end =
405 new->l_policy_data.l_flock.start - 1;
406 lock->l_policy_data.l_flock.start =
407 new->l_policy_data.l_flock.end + 1;
408 new2->l_conn_export = lock->l_conn_export;
409 if (lock->l_export != NULL) {
410 new2->l_export = class_export_lock_get(lock->l_export, new2);
411 if (new2->l_export->exp_lock_hash &&
412 cfs_hlist_unhashed(&new2->l_exp_hash))
413 cfs_hash_add(new2->l_export->exp_lock_hash,
414 &new2->l_remote_handle,
417 if (*flags == LDLM_FL_WAIT_NOREPROC)
418 ldlm_lock_addref_internal_nolock(new2,
419 lock->l_granted_mode);
421 /* insert new2 at lock */
422 ldlm_resource_add_lock(res, ownlocks, new2);
423 LDLM_LOCK_RELEASE(new2);
427 /* if new2 is created but never used, destroy it*/
428 if (splitted == 0 && new2 != NULL)
429 ldlm_lock_destroy_nolock(new2);
431 /* At this point we're granting the lock request. */
432 req->l_granted_mode = req->l_req_mode;
434 /* Add req to the granted queue before calling ldlm_reprocess_all(). */
436 cfs_list_del_init(&req->l_res_link);
437 /* insert new lock before ownlocks in list. */
438 ldlm_resource_add_lock(res, ownlocks, req);
441 if (*flags != LDLM_FL_WAIT_NOREPROC) {
443 /* If this is an unlock, reprocess the waitq and
444 * send completions ASTs for locks that can now be
445 * granted. The only problem with doing this
446 * reprocessing here is that the completion ASTs for
447 * newly granted locks will be sent before the unlock
448 * completion is sent. It shouldn't be an issue. Also
449 * note that ldlm_process_flock_lock() will recurse,
450 * but only once because first_enq will be false from
451 * ldlm_reprocess_queue. */
452 if ((mode == LCK_NL) && overlaps) {
453 CFS_LIST_HEAD(rpc_list);
456 ldlm_reprocess_queue(res, &res->lr_waiting,
459 unlock_res_and_lock(req);
460 rc = ldlm_run_ast_work(&rpc_list,
462 lock_res_and_lock(req);
464 GOTO(restart, -ERESTART);
467 LASSERT(req->l_completion_ast);
468 ldlm_add_ast_work_item(req, NULL, work_list);
472 /* In case we're reprocessing the requested lock we can't destroy
473 * it until after calling ldlm_ast_work_item() above so that lawi()
474 * can bump the reference count on req. Otherwise req could be freed
475 * before the completion AST can be sent. */
477 ldlm_flock_destroy(req, mode, *flags);
479 ldlm_resource_dump(D_INFO, res);
480 RETURN(LDLM_ITER_CONTINUE);
483 struct ldlm_flock_wait_data {
484 struct ldlm_lock *fwd_lock;
489 ldlm_flock_interrupted_wait(void *data)
491 struct ldlm_lock *lock;
494 lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
496 /* take lock off the deadlock detection waitq. */
497 cfs_spin_lock(&ldlm_flock_waitq_lock);
498 cfs_list_del_init(&lock->l_flock_waitq);
499 cfs_spin_unlock(&ldlm_flock_waitq_lock);
501 /* client side - set flag to prevent lock from being put on lru list */
502 lock->l_flags |= LDLM_FL_CBPENDING;
508 * Flock completion calback function.
510 * \param lock [in,out]: A lock to be handled
511 * \param flags [in]: flags
512 * \param *data [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
514 * \retval 0 : success
515 * \retval <0 : failure
518 ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
520 cfs_flock_t *getlk = lock->l_ast_data;
521 struct obd_device *obd;
522 struct obd_import *imp = NULL;
523 struct ldlm_flock_wait_data fwd;
524 struct l_wait_info lwi;
529 CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
532 /* Import invalidation. We need to actually release the lock
533 * references being held, so that it can go away. No point in
534 * holding the lock even if app still believes it has it, since
535 * server already dropped it anyway. Only for granted locks too. */
536 if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
537 (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
538 if (lock->l_req_mode == lock->l_granted_mode &&
539 lock->l_granted_mode != LCK_NL &&
541 ldlm_lock_decref_internal(lock, lock->l_req_mode);
543 /* Need to wake up the waiter if we were evicted */
544 cfs_waitq_signal(&lock->l_waitq);
548 LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
550 if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
551 LDLM_FL_BLOCK_CONV))) {
553 /* mds granted the lock in the reply */
555 /* CP AST RPC: lock get granted, wake it up */
556 cfs_waitq_signal(&lock->l_waitq);
560 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
563 obd = class_exp2obd(lock->l_conn_export);
565 /* if this is a local lock, there is no import */
567 imp = obd->u.cli.cl_import;
570 cfs_spin_lock(&imp->imp_lock);
571 fwd.fwd_generation = imp->imp_generation;
572 cfs_spin_unlock(&imp->imp_lock);
575 lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
577 /* Go to sleep until the lock is granted. */
578 rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
581 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
587 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
589 if (lock->l_destroyed) {
590 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
594 if (lock->l_flags & LDLM_FL_FAILED) {
595 LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
600 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
605 LDLM_DEBUG(lock, "client-side enqueue granted");
607 /* take lock off the deadlock detection waitq. */
608 cfs_spin_lock(&ldlm_flock_waitq_lock);
609 cfs_list_del_init(&lock->l_flock_waitq);
610 cfs_spin_unlock(&ldlm_flock_waitq_lock);
612 lock_res_and_lock(lock);
613 /* ldlm_lock_enqueue() has already placed lock on the granted list. */
614 cfs_list_del_init(&lock->l_res_link);
616 if (flags & LDLM_FL_TEST_LOCK) {
617 /* fcntl(F_GETLK) request */
618 /* The old mode was saved in getlk->fl_type so that if the mode
619 * in the lock changes we can decref the appropriate refcount.*/
620 ldlm_flock_destroy(lock, cfs_flock_type(getlk),
621 LDLM_FL_WAIT_NOREPROC);
622 switch (lock->l_granted_mode) {
624 cfs_flock_set_type(getlk, F_RDLCK);
627 cfs_flock_set_type(getlk, F_WRLCK);
630 cfs_flock_set_type(getlk, F_UNLCK);
632 cfs_flock_set_pid(getlk,
633 (pid_t)lock->l_policy_data.l_flock.pid);
634 cfs_flock_set_start(getlk,
635 (loff_t)lock->l_policy_data.l_flock.start);
636 cfs_flock_set_end(getlk,
637 (loff_t)lock->l_policy_data.l_flock.end);
639 int noreproc = LDLM_FL_WAIT_NOREPROC;
641 /* We need to reprocess the lock to do merges or splits
642 * with existing locks owned by this process. */
643 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
645 unlock_res_and_lock(lock);
648 EXPORT_SYMBOL(ldlm_flock_completion_ast);
650 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
651 void *data, int flag)
653 struct ldlm_namespace *ns;
657 LASSERT(flag == LDLM_CB_CANCELING);
659 ns = ldlm_lock_to_ns(lock);
661 /* take lock off the deadlock detection waitq. */
662 cfs_spin_lock(&ldlm_flock_waitq_lock);
663 cfs_list_del_init(&lock->l_flock_waitq);
664 cfs_spin_unlock(&ldlm_flock_waitq_lock);
668 void ldlm_flock_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
669 ldlm_policy_data_t *lpolicy)
671 memset(lpolicy, 0, sizeof(*lpolicy));
672 lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
673 lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
674 lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
675 lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
676 /* Compat code, old clients had no idea about owner field and
677 * relied solely on pid for ownership. Introduced in 2.1, April 2011 */
678 if (!lpolicy->l_flock.owner)
679 lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
682 void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
683 ldlm_wire_policy_data_t *wpolicy)
685 memset(wpolicy, 0, sizeof(*wpolicy));
686 wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
687 wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
688 wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
689 wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;