1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Hewlett-Packard Development Company LP.
5 * Developed under the sponsorship of the US Government under
6 * Subcontract No. B514193
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
27 #define DEBUG_SUBSYSTEM S_LDLM
30 #include <lustre_dlm.h>
31 #include <obd_support.h>
32 #include <obd_class.h>
33 #include <lustre_lib.h>
34 #include <libcfs/list.h>
36 #include <liblustre.h>
37 #include <obd_class.h>
40 #include "ldlm_internal.h"
42 #define l_flock_waitq l_lru
45 * Wait queue for Posix lock deadlock detection, added with
46 * ldlm_lock::l_flock_waitq.
48 static CFS_LIST_HEAD(ldlm_flock_waitq);
50 * Lock protecting access to ldlm_flock_waitq.
52 spinlock_t ldlm_flock_waitq_lock = SPIN_LOCK_UNLOCKED;
54 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
55 void *data, int flag);
58 * list_for_remaining_safe - iterate over the remaining entries in a list
59 * and safeguard against removal of a list entry.
60 * @pos: the &struct list_head to use as a loop counter. pos MUST
61 * have been initialized prior to using it in this macro.
62 * @n: another &struct list_head to use as temporary storage
63 * @head: the head for your list.
65 #define list_for_remaining_safe(pos, n, head) \
66 for (n = pos->next; pos != (head); pos = n, n = pos->next)
69 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
71 return((new->l_policy_data.l_flock.pid ==
72 lock->l_policy_data.l_flock.pid) &&
73 (new->l_export == lock->l_export));
77 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
79 return((new->l_policy_data.l_flock.start <=
80 lock->l_policy_data.l_flock.end) &&
81 (new->l_policy_data.l_flock.end >=
82 lock->l_policy_data.l_flock.start));
86 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
90 LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
93 /* Safe to not lock here, since it should be empty anyway */
94 LASSERT(list_empty(&lock->l_flock_waitq));
96 list_del_init(&lock->l_res_link);
97 if (flags == LDLM_FL_WAIT_NOREPROC) {
98 /* client side - set a flag to prevent sending a CANCEL */
99 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
100 ldlm_lock_decref_internal(lock, mode);
103 ldlm_lock_destroy_nolock(lock);
108 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
110 struct obd_export *req_export = req->l_export;
111 struct obd_export *blocking_export = blocking_lock->l_export;
112 pid_t req_pid = req->l_policy_data.l_flock.pid;
113 pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
114 struct ldlm_lock *lock;
116 spin_lock(&ldlm_flock_waitq_lock);
118 list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
119 if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
120 (lock->l_export != blocking_export))
123 blocking_pid = lock->l_policy_data.l_flock.blocking_pid;
124 blocking_export = (struct obd_export *)(long)
125 lock->l_policy_data.l_flock.blocking_export;
126 if (blocking_pid == req_pid && blocking_export == req_export) {
127 spin_unlock(&ldlm_flock_waitq_lock);
133 spin_unlock(&ldlm_flock_waitq_lock);
139 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
140 ldlm_error_t *err, struct list_head *work_list)
142 struct ldlm_resource *res = req->l_resource;
143 struct ldlm_namespace *ns = res->lr_namespace;
144 struct list_head *tmp;
145 struct list_head *ownlocks = NULL;
146 struct ldlm_lock *lock = NULL;
147 struct ldlm_lock *new = req;
148 struct ldlm_lock *new2 = NULL;
149 ldlm_mode_t mode = req->l_req_mode;
150 int local = ns_is_client(ns);
151 int added = (mode == LCK_NL);
155 CDEBUG(D_DLMTRACE, "flags %#x pid %u mode %u start "LPU64" end "LPU64
156 "\n", *flags, new->l_policy_data.l_flock.pid, mode,
157 req->l_policy_data.l_flock.start,
158 req->l_policy_data.l_flock.end);
163 /* No blocking ASTs are sent to the clients for
164 * Posix file & record locks */
165 req->l_blocking_ast = NULL;
167 /* Called on the server for lock cancels. */
168 req->l_blocking_ast = ldlm_flock_blocking_ast;
171 if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
172 /* This loop determines where this processes locks start
173 * in the resource lr_granted list. */
174 list_for_each(tmp, &res->lr_granted) {
175 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
176 if (ldlm_same_flock_owner(lock, req)) {
182 lockmode_verify(mode);
184 /* This loop determines if there are existing locks
185 * that conflict with the new lock request. */
186 list_for_each(tmp, &res->lr_granted) {
187 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
189 if (ldlm_same_flock_owner(lock, req)) {
195 /* locks are compatible, overlap doesn't matter */
196 if (lockmode_compat(lock->l_granted_mode, mode))
199 if (!ldlm_flocks_overlap(lock, req))
203 RETURN(LDLM_ITER_CONTINUE);
205 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
206 ldlm_flock_destroy(req, mode, *flags);
208 RETURN(LDLM_ITER_STOP);
211 if (*flags & LDLM_FL_TEST_LOCK) {
212 ldlm_flock_destroy(req, mode, *flags);
213 req->l_req_mode = lock->l_granted_mode;
214 req->l_policy_data.l_flock.pid =
215 lock->l_policy_data.l_flock.pid;
216 req->l_policy_data.l_flock.start =
217 lock->l_policy_data.l_flock.start;
218 req->l_policy_data.l_flock.end =
219 lock->l_policy_data.l_flock.end;
220 *flags |= LDLM_FL_LOCK_CHANGED;
221 RETURN(LDLM_ITER_STOP);
224 if (ldlm_flock_deadlock(req, lock)) {
225 ldlm_flock_destroy(req, mode, *flags);
227 RETURN(LDLM_ITER_STOP);
230 req->l_policy_data.l_flock.blocking_pid =
231 lock->l_policy_data.l_flock.pid;
232 req->l_policy_data.l_flock.blocking_export =
233 (long)(void *)lock->l_export;
235 LASSERT(list_empty(&req->l_flock_waitq));
236 spin_lock(&ldlm_flock_waitq_lock);
237 list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
238 spin_unlock(&ldlm_flock_waitq_lock);
240 ldlm_resource_add_lock(res, &res->lr_waiting, req);
241 *flags |= LDLM_FL_BLOCK_GRANTED;
242 RETURN(LDLM_ITER_STOP);
246 if (*flags & LDLM_FL_TEST_LOCK) {
247 ldlm_flock_destroy(req, mode, *flags);
248 req->l_req_mode = LCK_NL;
249 *flags |= LDLM_FL_LOCK_CHANGED;
250 RETURN(LDLM_ITER_STOP);
253 /* In case we had slept on this lock request take it off of the
254 * deadlock detection waitq. */
255 spin_lock(&ldlm_flock_waitq_lock);
256 list_del_init(&req->l_flock_waitq);
257 spin_unlock(&ldlm_flock_waitq_lock);
259 /* Scan the locks owned by this process that overlap this request.
260 * We may have to merge or split existing locks. */
263 ownlocks = &res->lr_granted;
265 list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
266 lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
268 if (!ldlm_same_flock_owner(lock, new))
271 if (lock->l_granted_mode == mode) {
272 /* If the modes are the same then we need to process
273 * locks that overlap OR adjoin the new lock. The extra
274 * logic condition is necessary to deal with arithmetic
275 * overflow and underflow. */
276 if ((new->l_policy_data.l_flock.start >
277 (lock->l_policy_data.l_flock.end + 1))
278 && (lock->l_policy_data.l_flock.end !=
282 if ((new->l_policy_data.l_flock.end <
283 (lock->l_policy_data.l_flock.start - 1))
284 && (lock->l_policy_data.l_flock.start != 0))
287 if (new->l_policy_data.l_flock.start <
288 lock->l_policy_data.l_flock.start) {
289 lock->l_policy_data.l_flock.start =
290 new->l_policy_data.l_flock.start;
292 new->l_policy_data.l_flock.start =
293 lock->l_policy_data.l_flock.start;
296 if (new->l_policy_data.l_flock.end >
297 lock->l_policy_data.l_flock.end) {
298 lock->l_policy_data.l_flock.end =
299 new->l_policy_data.l_flock.end;
301 new->l_policy_data.l_flock.end =
302 lock->l_policy_data.l_flock.end;
306 ldlm_flock_destroy(lock, mode, *flags);
314 if (new->l_policy_data.l_flock.start >
315 lock->l_policy_data.l_flock.end)
318 if (new->l_policy_data.l_flock.end <
319 lock->l_policy_data.l_flock.start)
324 if (new->l_policy_data.l_flock.start <=
325 lock->l_policy_data.l_flock.start) {
326 if (new->l_policy_data.l_flock.end <
327 lock->l_policy_data.l_flock.end) {
328 lock->l_policy_data.l_flock.start =
329 new->l_policy_data.l_flock.end + 1;
332 ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
335 if (new->l_policy_data.l_flock.end >=
336 lock->l_policy_data.l_flock.end) {
337 lock->l_policy_data.l_flock.end =
338 new->l_policy_data.l_flock.start - 1;
342 /* split the existing lock into two locks */
344 /* if this is an F_UNLCK operation then we could avoid
345 * allocating a new lock and use the req lock passed in
346 * with the request but this would complicate the reply
347 * processing since updates to req get reflected in the
348 * reply. The client side replays the lock request so
349 * it must see the original lock data in the reply. */
351 /* XXX - if ldlm_lock_new() can sleep we should
352 * release the ns_lock, allocate the new lock,
353 * and restart processing this lock. */
354 new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
355 lock->l_granted_mode, NULL, NULL, NULL,
358 ldlm_flock_destroy(req, lock->l_granted_mode, *flags);
360 RETURN(LDLM_ITER_STOP);
363 new2->l_granted_mode = lock->l_granted_mode;
364 new2->l_policy_data.l_flock.pid =
365 new->l_policy_data.l_flock.pid;
366 new2->l_policy_data.l_flock.start =
367 lock->l_policy_data.l_flock.start;
368 new2->l_policy_data.l_flock.end =
369 new->l_policy_data.l_flock.start - 1;
370 lock->l_policy_data.l_flock.start =
371 new->l_policy_data.l_flock.end + 1;
372 new2->l_conn_export = lock->l_conn_export;
373 if (lock->l_export != NULL) {
374 new2->l_export = class_export_get(lock->l_export);
375 spin_lock(&new2->l_export->exp_ldlm_data.led_lock);
376 list_add(&new2->l_export_chain,
377 &new2->l_export->exp_ldlm_data.led_held_locks);
378 spin_unlock(&new2->l_export->exp_ldlm_data.led_lock);
380 if (*flags == LDLM_FL_WAIT_NOREPROC)
381 ldlm_lock_addref_internal(new2, lock->l_granted_mode);
383 /* insert new2 at lock */
384 ldlm_resource_add_lock(res, ownlocks, new2);
389 /* At this point we're granting the lock request. */
390 req->l_granted_mode = req->l_req_mode;
392 /* Add req to the granted queue before calling ldlm_reprocess_all(). */
394 list_del_init(&req->l_res_link);
395 /* insert new lock before ownlocks in list. */
396 ldlm_resource_add_lock(res, ownlocks, req);
399 if (*flags != LDLM_FL_WAIT_NOREPROC) {
401 /* If this is an unlock, reprocess the waitq and
402 * send completions ASTs for locks that can now be
403 * granted. The only problem with doing this
404 * reprocessing here is that the completion ASTs for
405 * newly granted locks will be sent before the unlock
406 * completion is sent. It shouldn't be an issue. Also
407 * note that ldlm_process_flock_lock() will recurse,
408 * but only once because first_enq will be false from
409 * ldlm_reprocess_queue. */
410 if ((mode == LCK_NL) && overlaps) {
411 CFS_LIST_HEAD(rpc_list);
414 ldlm_reprocess_queue(res, &res->lr_waiting,
418 rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
421 GOTO(restart, -ERESTART);
424 LASSERT(req->l_completion_ast);
425 ldlm_add_ast_work_item(req, NULL, work_list);
429 /* In case we're reprocessing the requested lock we can't destroy
430 * it until after calling ldlm_ast_work_item() above so that lawi()
431 * can bump the reference count on req. Otherwise req could be freed
432 * before the completion AST can be sent. */
434 ldlm_flock_destroy(req, mode, *flags);
436 ldlm_resource_dump(D_OTHER, res);
437 RETURN(LDLM_ITER_CONTINUE);
440 struct ldlm_flock_wait_data {
441 struct ldlm_lock *fwd_lock;
446 ldlm_flock_interrupted_wait(void *data)
448 struct ldlm_lock *lock;
449 struct lustre_handle lockh;
453 lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
455 /* take lock off the deadlock detection waitq. */
456 spin_lock(&ldlm_flock_waitq_lock);
457 list_del_init(&lock->l_flock_waitq);
458 spin_unlock(&ldlm_flock_waitq_lock);
460 /* client side - set flag to prevent lock from being put on lru list */
461 lock->l_flags |= LDLM_FL_CBPENDING;
463 ldlm_lock_decref_internal(lock, lock->l_req_mode);
464 ldlm_lock2handle(lock, &lockh);
465 rc = ldlm_cli_cancel(&lockh);
467 CERROR("ldlm_cli_cancel: %d\n", rc);
473 ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
475 struct ldlm_namespace *ns;
476 cfs_flock_t *getlk = lock->l_ast_data;
477 struct ldlm_flock_wait_data fwd;
478 struct obd_device *obd;
479 struct obd_import *imp = NULL;
482 struct l_wait_info lwi;
485 CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
488 /* Import invalidation. We need to actually release the lock
489 * references being held, so that it can go away. No point in
490 * holding the lock even if app still believes it has it, since
491 * server already dropped it anyway. Only for granted locks too. */
492 lock_res_and_lock(lock);
493 if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
494 (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
495 unlock_res_and_lock(lock);
496 if (lock->l_req_mode == lock->l_granted_mode &&
497 lock->l_granted_mode != LCK_NL)
498 ldlm_lock_decref_internal(lock, lock->l_req_mode);
501 unlock_res_and_lock(lock);
503 LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
505 if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
506 LDLM_FL_BLOCK_CONV)))
509 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
513 obd = class_exp2obd(lock->l_conn_export);
515 /* if this is a local lock, then there is no import */
517 imp = obd->u.cli.cl_import;
520 spin_lock(&imp->imp_lock);
521 fwd.fwd_generation = imp->imp_generation;
522 spin_unlock(&imp->imp_lock);
525 lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
527 /* Go to sleep until the lock is granted. */
528 rc = l_wait_event(lock->l_waitq,
529 ((lock->l_req_mode == lock->l_granted_mode) ||
530 lock->l_destroyed), &lwi);
532 LDLM_DEBUG(lock, "client-side enqueue waking up: rc = %d", rc);
536 /* before flock's complete ast gets here, the flock
537 * can possibly be freed by another thread
539 if (lock->l_destroyed) {
540 LDLM_DEBUG(lock, "already destroyed by another thread");
544 LDLM_DEBUG(lock, "client-side enqueue granted");
545 ns = lock->l_resource->lr_namespace;
546 lock_res(lock->l_resource);
548 /* take lock off the deadlock detection waitq. */
549 spin_lock(&ldlm_flock_waitq_lock);
550 list_del_init(&lock->l_flock_waitq);
551 spin_unlock(&ldlm_flock_waitq_lock);
553 /* ldlm_lock_enqueue() has already placed lock on the granted list. */
554 list_del_init(&lock->l_res_link);
556 if (flags & LDLM_FL_TEST_LOCK) {
557 /* fcntl(F_GETLK) request */
558 /* The old mode was saved in getlk->fl_type so that if the mode
559 * in the lock changes we can decref the approprate refcount. */
560 ldlm_flock_destroy(lock, cfs_flock_type(getlk), LDLM_FL_WAIT_NOREPROC);
561 switch (lock->l_granted_mode) {
563 cfs_flock_set_type(getlk, F_RDLCK);
566 cfs_flock_set_type(getlk, F_WRLCK);
569 cfs_flock_set_type(getlk, F_UNLCK);
571 cfs_flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
572 cfs_flock_set_start(getlk, (loff_t)lock->l_policy_data.l_flock.start);
573 cfs_flock_set_end(getlk, (loff_t)lock->l_policy_data.l_flock.end);
575 int noreproc = LDLM_FL_WAIT_NOREPROC;
577 /* We need to reprocess the lock to do merges or splits
578 * with existing locks owned by this process. */
579 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
581 cfs_waitq_signal(&lock->l_waitq);
583 unlock_res(lock->l_resource);
586 EXPORT_SYMBOL(ldlm_flock_completion_ast);
588 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
589 void *data, int flag)
591 struct ldlm_namespace *ns;
595 LASSERT(flag == LDLM_CB_CANCELING);
597 ns = lock->l_resource->lr_namespace;
599 /* take lock off the deadlock detection waitq. */
600 spin_lock(&ldlm_flock_waitq_lock);
601 list_del_init(&lock->l_flock_waitq);
602 spin_unlock(&ldlm_flock_waitq_lock);