1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
32 * Copyright (c) 2003 Hewlett-Packard Development Company LP.
33 * Developed under the sponsorship of the US Government under
34 * Subcontract No. B514193
37 * This file is part of Lustre, http://www.lustre.org/
38 * Lustre is a trademark of Sun Microsystems, Inc.
41 #define DEBUG_SUBSYSTEM S_LDLM
44 #include <lustre_dlm.h>
45 #include <obd_support.h>
46 #include <obd_class.h>
47 #include <lustre_lib.h>
48 #include <libcfs/list.h>
50 #include <liblustre.h>
51 #include <obd_class.h>
54 #include "ldlm_internal.h"
56 #define l_flock_waitq l_lru
58 static struct list_head ldlm_flock_waitq = CFS_LIST_HEAD_INIT(ldlm_flock_waitq);
59 spinlock_t ldlm_flock_waitq_lock = SPIN_LOCK_UNLOCKED;
61 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
62 void *data, int flag);
65 * list_for_remaining_safe - iterate over the remaining entries in a list
66 * and safeguard against removal of a list entry.
67 * @pos: the &struct list_head to use as a loop counter. pos MUST
68 * have been initialized prior to using it in this macro.
69 * @n: another &struct list_head to use as temporary storage
70 * @head: the head for your list.
72 #define list_for_remaining_safe(pos, n, head) \
73 for (n = pos->next; pos != (head); pos = n, n = pos->next)
76 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
78 return((new->l_policy_data.l_flock.pid ==
79 lock->l_policy_data.l_flock.pid) &&
80 (new->l_export == lock->l_export));
84 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
86 return((new->l_policy_data.l_flock.start <=
87 lock->l_policy_data.l_flock.end) &&
88 (new->l_policy_data.l_flock.end >=
89 lock->l_policy_data.l_flock.start));
93 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
97 LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
100 /* Safe to not lock here, since it should be empty anyway */
101 LASSERT(list_empty(&lock->l_flock_waitq));
103 list_del_init(&lock->l_res_link);
104 if (flags == LDLM_FL_WAIT_NOREPROC &&
105 !(lock->l_flags & LDLM_FL_FAILED)) {
106 /* client side - set a flag to prevent sending a CANCEL */
107 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
109 /* when reaching here, it is under lock_res_and_lock(). Thus,
110 need call the nolock version of ldlm_lock_decref_internal*/
111 ldlm_lock_decref_internal_nolock(lock, mode);
114 ldlm_lock_destroy_nolock(lock);
119 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
121 struct obd_export *req_export = req->l_export;
122 struct obd_export *blocking_export = blocking_lock->l_export;
123 pid_t req_pid = req->l_policy_data.l_flock.pid;
124 pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
125 struct ldlm_lock *lock;
127 spin_lock(&ldlm_flock_waitq_lock);
129 list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
130 if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
131 (lock->l_export != blocking_export))
134 blocking_pid = lock->l_policy_data.l_flock.blocking_pid;
135 blocking_export = (struct obd_export *)(long)
136 lock->l_policy_data.l_flock.blocking_export;
137 if (blocking_pid == req_pid && blocking_export == req_export) {
138 spin_unlock(&ldlm_flock_waitq_lock);
144 spin_unlock(&ldlm_flock_waitq_lock);
150 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
151 ldlm_error_t *err, struct list_head *work_list)
153 struct ldlm_resource *res = req->l_resource;
154 struct ldlm_namespace *ns = res->lr_namespace;
155 struct list_head *tmp;
156 struct list_head *ownlocks = NULL;
157 struct ldlm_lock *lock = NULL;
158 struct ldlm_lock *new = req;
159 struct ldlm_lock *new2 = NULL;
160 ldlm_mode_t mode = req->l_req_mode;
161 int local = ns_is_client(ns);
162 int added = (mode == LCK_NL);
167 CDEBUG(D_DLMTRACE, "flags %#x pid %u mode %u start "LPU64" end "LPU64
168 "\n", *flags, new->l_policy_data.l_flock.pid, mode,
169 req->l_policy_data.l_flock.start,
170 req->l_policy_data.l_flock.end);
175 /* No blocking ASTs are sent to the clients for
176 * Posix file & record locks */
177 req->l_blocking_ast = NULL;
179 /* Called on the server for lock cancels. */
180 req->l_blocking_ast = ldlm_flock_blocking_ast;
184 if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
185 /* This loop determines where this processes locks start
186 * in the resource lr_granted list. */
187 list_for_each(tmp, &res->lr_granted) {
188 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
189 if (ldlm_same_flock_owner(lock, req)) {
195 lockmode_verify(mode);
197 /* This loop determines if there are existing locks
198 * that conflict with the new lock request. */
199 list_for_each(tmp, &res->lr_granted) {
200 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
202 if (ldlm_same_flock_owner(lock, req)) {
208 /* locks are compatible, overlap doesn't matter */
209 if (lockmode_compat(lock->l_granted_mode, mode))
212 if (!ldlm_flocks_overlap(lock, req))
216 RETURN(LDLM_ITER_CONTINUE);
218 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
219 ldlm_flock_destroy(req, mode, *flags);
221 RETURN(LDLM_ITER_STOP);
224 if (*flags & LDLM_FL_TEST_LOCK) {
225 ldlm_flock_destroy(req, mode, *flags);
226 req->l_req_mode = lock->l_granted_mode;
227 req->l_policy_data.l_flock.pid =
228 lock->l_policy_data.l_flock.pid;
229 req->l_policy_data.l_flock.start =
230 lock->l_policy_data.l_flock.start;
231 req->l_policy_data.l_flock.end =
232 lock->l_policy_data.l_flock.end;
233 *flags |= LDLM_FL_LOCK_CHANGED;
234 RETURN(LDLM_ITER_STOP);
237 if (ldlm_flock_deadlock(req, lock)) {
238 ldlm_flock_destroy(req, mode, *flags);
240 RETURN(LDLM_ITER_STOP);
243 req->l_policy_data.l_flock.blocking_pid =
244 lock->l_policy_data.l_flock.pid;
245 req->l_policy_data.l_flock.blocking_export =
246 (long)(void *)lock->l_export;
248 LASSERT(list_empty(&req->l_flock_waitq));
249 spin_lock(&ldlm_flock_waitq_lock);
250 list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
251 spin_unlock(&ldlm_flock_waitq_lock);
253 ldlm_resource_add_lock(res, &res->lr_waiting, req);
254 *flags |= LDLM_FL_BLOCK_GRANTED;
255 RETURN(LDLM_ITER_STOP);
259 if (*flags & LDLM_FL_TEST_LOCK) {
260 ldlm_flock_destroy(req, mode, *flags);
261 req->l_req_mode = LCK_NL;
262 *flags |= LDLM_FL_LOCK_CHANGED;
263 RETURN(LDLM_ITER_STOP);
266 /* In case we had slept on this lock request take it off of the
267 * deadlock detection waitq. */
268 spin_lock(&ldlm_flock_waitq_lock);
269 list_del_init(&req->l_flock_waitq);
270 spin_unlock(&ldlm_flock_waitq_lock);
272 /* Scan the locks owned by this process that overlap this request.
273 * We may have to merge or split existing locks. */
276 ownlocks = &res->lr_granted;
278 list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
279 lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
281 if (!ldlm_same_flock_owner(lock, new))
284 if (lock->l_granted_mode == mode) {
285 /* If the modes are the same then we need to process
286 * locks that overlap OR adjoin the new lock. The extra
287 * logic condition is necessary to deal with arithmetic
288 * overflow and underflow. */
289 if ((new->l_policy_data.l_flock.start >
290 (lock->l_policy_data.l_flock.end + 1))
291 && (lock->l_policy_data.l_flock.end !=
295 if ((new->l_policy_data.l_flock.end <
296 (lock->l_policy_data.l_flock.start - 1))
297 && (lock->l_policy_data.l_flock.start != 0))
300 if (new->l_policy_data.l_flock.start <
301 lock->l_policy_data.l_flock.start) {
302 lock->l_policy_data.l_flock.start =
303 new->l_policy_data.l_flock.start;
305 new->l_policy_data.l_flock.start =
306 lock->l_policy_data.l_flock.start;
309 if (new->l_policy_data.l_flock.end >
310 lock->l_policy_data.l_flock.end) {
311 lock->l_policy_data.l_flock.end =
312 new->l_policy_data.l_flock.end;
314 new->l_policy_data.l_flock.end =
315 lock->l_policy_data.l_flock.end;
319 ldlm_flock_destroy(lock, mode, *flags);
327 if (new->l_policy_data.l_flock.start >
328 lock->l_policy_data.l_flock.end)
331 if (new->l_policy_data.l_flock.end <
332 lock->l_policy_data.l_flock.start)
337 if (new->l_policy_data.l_flock.start <=
338 lock->l_policy_data.l_flock.start) {
339 if (new->l_policy_data.l_flock.end <
340 lock->l_policy_data.l_flock.end) {
341 lock->l_policy_data.l_flock.start =
342 new->l_policy_data.l_flock.end + 1;
345 ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
348 if (new->l_policy_data.l_flock.end >=
349 lock->l_policy_data.l_flock.end) {
350 lock->l_policy_data.l_flock.end =
351 new->l_policy_data.l_flock.start - 1;
355 /* split the existing lock into two locks */
357 /* if this is an F_UNLCK operation then we could avoid
358 * allocating a new lock and use the req lock passed in
359 * with the request but this would complicate the reply
360 * processing since updates to req get reflected in the
361 * reply. The client side replays the lock request so
362 * it must see the original lock data in the reply. */
364 /* XXX - if ldlm_lock_new() can sleep we should
365 * release the ns_lock, allocate the new lock,
366 * and restart processing this lock. */
368 unlock_res_and_lock(req);
369 new2 = ldlm_lock_create(ns, res->lr_name, LDLM_FLOCK,
370 lock->l_granted_mode, NULL, NULL, NULL,
372 lock_res_and_lock(req);
374 ldlm_flock_destroy(req, lock->l_granted_mode,
377 RETURN(LDLM_ITER_STOP);
384 new2->l_granted_mode = lock->l_granted_mode;
385 new2->l_policy_data.l_flock.pid =
386 new->l_policy_data.l_flock.pid;
387 new2->l_policy_data.l_flock.start =
388 lock->l_policy_data.l_flock.start;
389 new2->l_policy_data.l_flock.end =
390 new->l_policy_data.l_flock.start - 1;
391 lock->l_policy_data.l_flock.start =
392 new->l_policy_data.l_flock.end + 1;
393 new2->l_conn_export = lock->l_conn_export;
394 if (lock->l_export != NULL) {
395 new2->l_export = class_export_get(lock->l_export);
396 if (new2->l_export->exp_lock_hash &&
397 hlist_unhashed(&new2->l_exp_hash))
398 lustre_hash_add(new2->l_export->exp_lock_hash,
399 &new2->l_remote_handle,
402 if (*flags == LDLM_FL_WAIT_NOREPROC)
403 ldlm_lock_addref_internal_nolock(new2,
404 lock->l_granted_mode);
406 /* insert new2 at lock */
407 ldlm_resource_add_lock(res, ownlocks, new2);
412 /* if new2 is created but never used, destroy it*/
413 if (splitted == 0 && new2 != NULL)
414 ldlm_lock_destroy_nolock(new2);
416 /* At this point we're granting the lock request. */
417 req->l_granted_mode = req->l_req_mode;
419 /* Add req to the granted queue before calling ldlm_reprocess_all(). */
421 list_del_init(&req->l_res_link);
422 /* insert new lock before ownlocks in list. */
423 ldlm_resource_add_lock(res, ownlocks, req);
426 if (*flags != LDLM_FL_WAIT_NOREPROC) {
428 /* If this is an unlock, reprocess the waitq and
429 * send completions ASTs for locks that can now be
430 * granted. The only problem with doing this
431 * reprocessing here is that the completion ASTs for
432 * newly granted locks will be sent before the unlock
433 * completion is sent. It shouldn't be an issue. Also
434 * note that ldlm_process_flock_lock() will recurse,
435 * but only once because first_enq will be false from
436 * ldlm_reprocess_queue. */
437 if ((mode == LCK_NL) && overlaps) {
438 struct list_head rpc_list
439 = CFS_LIST_HEAD_INIT(rpc_list);
442 ldlm_reprocess_queue(res, &res->lr_waiting,
445 unlock_res_and_lock(req);
446 rc = ldlm_run_cp_ast_work(&rpc_list);
447 lock_res_and_lock(req);
449 GOTO(restart, -ERESTART);
452 LASSERT(req->l_completion_ast);
453 ldlm_add_ast_work_item(req, NULL, work_list);
457 /* In case we're reprocessing the requested lock we can't destroy
458 * it until after calling ldlm_ast_work_item() above so that lawi()
459 * can bump the reference count on req. Otherwise req could be freed
460 * before the completion AST can be sent. */
462 ldlm_flock_destroy(req, mode, *flags);
464 ldlm_resource_dump(D_INFO, res);
465 RETURN(LDLM_ITER_CONTINUE);
468 struct ldlm_flock_wait_data {
469 struct ldlm_lock *fwd_lock;
474 ldlm_flock_interrupted_wait(void *data)
476 struct ldlm_lock *lock;
477 struct lustre_handle lockh;
481 lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
483 /* take lock off the deadlock detection waitq. */
484 spin_lock(&ldlm_flock_waitq_lock);
485 list_del_init(&lock->l_flock_waitq);
486 spin_unlock(&ldlm_flock_waitq_lock);
488 /* client side - set flag to prevent lock from being put on lru list */
489 lock->l_flags |= LDLM_FL_CBPENDING;
491 ldlm_lock_decref_internal(lock, lock->l_req_mode);
492 ldlm_lock2handle(lock, &lockh);
493 rc = ldlm_cli_cancel(&lockh);
495 CERROR("ldlm_cli_cancel: %d\n", rc);
501 * Flock completion calback function.
503 * \param lock [in,out]: A lock to be handled
504 * \param flags [in]: flags
505 * \param *data [in]: ldlm_run_cp_ast_work() will use ldlm_cb_set_arg
507 * \retval 0 : success
508 * \retval <0 : failure
511 ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
513 cfs_flock_t *getlk = lock->l_ast_data;
514 struct obd_device *obd;
515 struct obd_import *imp = NULL;
516 struct ldlm_flock_wait_data fwd;
517 struct l_wait_info lwi;
522 CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
525 /* Import invalidation. We need to actually release the lock
526 * references being held, so that it can go away. No point in
527 * holding the lock even if app still believes it has it, since
528 * server already dropped it anyway. Only for granted locks too. */
529 lock_res_and_lock(lock);
530 if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
531 (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
532 unlock_res_and_lock(lock);
533 if (lock->l_req_mode == lock->l_granted_mode &&
534 lock->l_granted_mode != LCK_NL &&
536 ldlm_lock_decref_internal(lock, lock->l_req_mode);
539 unlock_res_and_lock(lock);
541 LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
543 if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
544 LDLM_FL_BLOCK_CONV))) {
546 /* mds granted the lock in the reply */
548 /* CP AST RPC: lock get granted, wake it up */
549 cfs_waitq_signal(&lock->l_waitq);
553 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
556 obd = class_exp2obd(lock->l_conn_export);
558 /* if this is a local lock, there is no import */
560 imp = obd->u.cli.cl_import;
563 spin_lock(&imp->imp_lock);
564 fwd.fwd_generation = imp->imp_generation;
565 spin_unlock(&imp->imp_lock);
568 lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
570 /* Go to sleep until the lock is granted. */
571 rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
574 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
580 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
582 lock_res_and_lock(lock);
583 if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
584 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
585 unlock_res_and_lock(lock);
589 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
591 unlock_res_and_lock(lock);
595 LDLM_DEBUG(lock, "client-side enqueue granted");
597 /* take lock off the deadlock detection waitq. */
598 spin_lock(&ldlm_flock_waitq_lock);
599 list_del_init(&lock->l_flock_waitq);
600 spin_unlock(&ldlm_flock_waitq_lock);
602 /* ldlm_lock_enqueue() has already placed lock on the granted list. */
603 list_del_init(&lock->l_res_link);
605 if (flags & LDLM_FL_TEST_LOCK) {
606 /* fcntl(F_GETLK) request */
607 /* The old mode was saved in getlk->fl_type so that if the mode
608 * in the lock changes we can decref the appropriate refcount.*/
609 ldlm_flock_destroy(lock, cfs_flock_type(getlk),
610 LDLM_FL_WAIT_NOREPROC);
611 switch (lock->l_granted_mode) {
613 cfs_flock_set_type(getlk, F_RDLCK);
616 cfs_flock_set_type(getlk, F_WRLCK);
619 cfs_flock_set_type(getlk, F_UNLCK);
621 cfs_flock_set_pid(getlk,
622 (pid_t)lock->l_policy_data.l_flock.pid);
623 cfs_flock_set_start(getlk,
624 (loff_t)lock->l_policy_data.l_flock.start);
625 cfs_flock_set_end(getlk,
626 (loff_t)lock->l_policy_data.l_flock.end);
628 int noreproc = LDLM_FL_WAIT_NOREPROC;
630 /* We need to reprocess the lock to do merges or splits
631 * with existing locks owned by this process. */
632 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
634 unlock_res_and_lock(lock);
637 EXPORT_SYMBOL(ldlm_flock_completion_ast);
639 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
640 void *data, int flag)
642 struct ldlm_namespace *ns;
646 LASSERT(flag == LDLM_CB_CANCELING);
648 ns = lock->l_resource->lr_namespace;
650 /* take lock off the deadlock detection waitq. */
651 spin_lock(&ldlm_flock_waitq_lock);
652 list_del_init(&lock->l_flock_waitq);
653 spin_unlock(&ldlm_flock_waitq_lock);