1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Hewlett-Packard Development Company LP.
5 * Developed under the sponsorship of the US Government under
6 * Subcontract No. B514193
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
27 #include <linux/lustre_dlm.h>
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_lib.h>
31 #include <libcfs/list.h>
33 #include <liblustre.h>
34 #include <linux/obd_class.h>
37 #include "ldlm_internal.h"
39 #define l_flock_waitq l_lru
41 static struct list_head ldlm_flock_waitq = LIST_HEAD_INIT(ldlm_flock_waitq);
43 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
44 void *data, int flag);
47 * list_for_remaining_safe - iterate over the remaining entries in a list
48 * and safeguard against removal of a list entry.
49 * @pos: the &struct list_head to use as a loop counter. pos MUST
50 * have been initialized prior to using it in this macro.
51 * @n: another &struct list_head to use as temporary storage
52 * @head: the head for your list.
54 #define list_for_remaining_safe(pos, n, head) \
55 for (n = pos->next; pos != (head); pos = n, n = pos->next)
58 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
60 return((new->l_policy_data.l_flock.pid ==
61 lock->l_policy_data.l_flock.pid) &&
62 (new->l_export == lock->l_export));
66 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
68 return((new->l_policy_data.l_flock.start <=
69 lock->l_policy_data.l_flock.end) &&
70 (new->l_policy_data.l_flock.end >=
71 lock->l_policy_data.l_flock.start));
75 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
79 LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
82 LASSERT(list_empty(&lock->l_flock_waitq));
84 list_del_init(&lock->l_res_link);
85 if (flags == LDLM_FL_WAIT_NOREPROC) {
86 /* client side - set a flag to prevent sending a CANCEL */
87 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
88 ldlm_lock_decref_internal(lock, mode);
91 ldlm_lock_destroy(lock);
96 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
98 struct obd_export *req_export = req->l_export;
99 struct obd_export *blocking_export = blocking_lock->l_export;
100 pid_t req_pid = req->l_policy_data.l_flock.pid;
101 pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
102 struct ldlm_lock *lock;
105 list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
106 if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
107 (lock->l_export != blocking_export))
110 blocking_pid = lock->l_policy_data.l_flock.blocking_pid;
111 blocking_export = (struct obd_export *)(long)
112 lock->l_policy_data.l_flock.blocking_export;
113 if (blocking_pid == req_pid && blocking_export == req_export)
123 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
126 struct ldlm_resource *res = req->l_resource;
127 struct ldlm_namespace *ns = res->lr_namespace;
128 struct list_head *tmp;
129 struct list_head *ownlocks = NULL;
130 struct ldlm_lock *lock = NULL;
131 struct ldlm_lock *new = req;
132 struct ldlm_lock *new2 = NULL;
133 ldlm_mode_t mode = req->l_req_mode;
134 int local = ns->ns_client;
135 int added = (mode == LCK_NL);
139 CDEBUG(D_DLMTRACE, "flags %#x pid %u mode %u start "LPU64" end "LPU64
140 "\n", *flags, new->l_policy_data.l_flock.pid, mode,
141 req->l_policy_data.l_flock.start,
142 req->l_policy_data.l_flock.end);
147 /* No blocking ASTs are sent to the clients for
148 * Posix file & record locks */
149 req->l_blocking_ast = NULL;
151 /* Called on the server for lock cancels. */
152 req->l_blocking_ast = ldlm_flock_blocking_ast;
155 if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
156 /* This loop determines where this processes locks start
157 * in the resource lr_granted list. */
158 list_for_each(tmp, &res->lr_granted) {
159 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
160 if (ldlm_same_flock_owner(lock, req)) {
166 lockmode_verify(mode);
168 /* This loop determines if there are existing locks
169 * that conflict with the new lock request. */
170 list_for_each(tmp, &res->lr_granted) {
171 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
173 if (ldlm_same_flock_owner(lock, req)) {
179 /* locks are compatible, overlap doesn't matter */
180 if (lockmode_compat(lock->l_granted_mode, mode))
183 if (!ldlm_flocks_overlap(lock, req))
187 RETURN(LDLM_ITER_CONTINUE);
189 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
190 ldlm_flock_destroy(req, mode, *flags);
192 RETURN(LDLM_ITER_STOP);
195 if (*flags & LDLM_FL_TEST_LOCK) {
196 ldlm_flock_destroy(req, mode, *flags);
197 req->l_req_mode = lock->l_granted_mode;
198 req->l_policy_data.l_flock.pid =
199 lock->l_policy_data.l_flock.pid;
200 req->l_policy_data.l_flock.start =
201 lock->l_policy_data.l_flock.start;
202 req->l_policy_data.l_flock.end =
203 lock->l_policy_data.l_flock.end;
204 *flags |= LDLM_FL_LOCK_CHANGED;
205 RETURN(LDLM_ITER_STOP);
208 if (ldlm_flock_deadlock(req, lock)) {
209 ldlm_flock_destroy(req, mode, *flags);
211 RETURN(LDLM_ITER_STOP);
214 req->l_policy_data.l_flock.blocking_pid =
215 lock->l_policy_data.l_flock.pid;
216 req->l_policy_data.l_flock.blocking_export =
217 (long)(void *)lock->l_export;
219 LASSERT(list_empty(&req->l_flock_waitq));
220 list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
222 ldlm_resource_add_lock(res, &res->lr_waiting, req);
223 *flags |= LDLM_FL_BLOCK_GRANTED;
224 RETURN(LDLM_ITER_STOP);
228 if (*flags & LDLM_FL_TEST_LOCK) {
229 ldlm_flock_destroy(req, mode, *flags);
230 req->l_req_mode = LCK_NL;
231 *flags |= LDLM_FL_LOCK_CHANGED;
232 RETURN(LDLM_ITER_STOP);
235 /* In case we had slept on this lock request take it off of the
236 * deadlock detection waitq. */
237 list_del_init(&req->l_flock_waitq);
239 /* Scan the locks owned by this process that overlap this request.
240 * We may have to merge or split existing locks. */
243 ownlocks = &res->lr_granted;
245 list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
246 lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
248 if (!ldlm_same_flock_owner(lock, new))
251 if (lock->l_granted_mode == mode) {
252 /* If the modes are the same then we need to process
253 * locks that overlap OR adjoin the new lock. The extra
254 * logic condition is necessary to deal with arithmetic
255 * overflow and underflow. */
256 if ((new->l_policy_data.l_flock.start >
257 (lock->l_policy_data.l_flock.end + 1))
258 && (lock->l_policy_data.l_flock.end != ~0))
261 if ((new->l_policy_data.l_flock.end <
262 (lock->l_policy_data.l_flock.start - 1))
263 && (lock->l_policy_data.l_flock.start != 0))
266 if (new->l_policy_data.l_flock.start <
267 lock->l_policy_data.l_flock.start) {
268 lock->l_policy_data.l_flock.start =
269 new->l_policy_data.l_flock.start;
271 new->l_policy_data.l_flock.start =
272 lock->l_policy_data.l_flock.start;
275 if (new->l_policy_data.l_flock.end >
276 lock->l_policy_data.l_flock.end) {
277 lock->l_policy_data.l_flock.end =
278 new->l_policy_data.l_flock.end;
280 new->l_policy_data.l_flock.end =
281 lock->l_policy_data.l_flock.end;
285 ldlm_flock_destroy(lock, mode, *flags);
293 if (new->l_policy_data.l_flock.start >
294 lock->l_policy_data.l_flock.end)
297 if (new->l_policy_data.l_flock.end <
298 lock->l_policy_data.l_flock.start)
303 if (new->l_policy_data.l_flock.start <=
304 lock->l_policy_data.l_flock.start) {
305 if (new->l_policy_data.l_flock.end <
306 lock->l_policy_data.l_flock.end) {
307 lock->l_policy_data.l_flock.start =
308 new->l_policy_data.l_flock.end + 1;
311 ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
314 if (new->l_policy_data.l_flock.end >=
315 lock->l_policy_data.l_flock.end) {
316 lock->l_policy_data.l_flock.end =
317 new->l_policy_data.l_flock.start - 1;
321 /* split the existing lock into two locks */
323 /* if this is an F_UNLCK operation then we could avoid
324 * allocating a new lock and use the req lock passed in
325 * with the request but this would complicate the reply
326 * processing since updates to req get reflected in the
327 * reply. The client side replays the lock request so
328 * it must see the original lock data in the reply. */
330 /* XXX - if ldlm_lock_new() can sleep we should
331 * release the ns_lock, allocate the new lock,
332 * and restart processing this lock. */
333 new2 = ldlm_lock_create(ns, NULL, res->lr_name, LDLM_FLOCK,
334 lock->l_granted_mode, NULL, NULL, NULL,
337 ldlm_flock_destroy(req, lock->l_granted_mode, *flags);
339 RETURN(LDLM_ITER_STOP);
342 new2->l_granted_mode = lock->l_granted_mode;
343 new2->l_policy_data.l_flock.pid =
344 new->l_policy_data.l_flock.pid;
345 new2->l_policy_data.l_flock.start =
346 lock->l_policy_data.l_flock.start;
347 new2->l_policy_data.l_flock.end =
348 new->l_policy_data.l_flock.start - 1;
349 lock->l_policy_data.l_flock.start =
350 new->l_policy_data.l_flock.end + 1;
351 new2->l_conn_export = lock->l_conn_export;
352 if (lock->l_export != NULL) {
353 new2->l_export = class_export_get(lock->l_export);
354 list_add(&new2->l_export_chain,
355 &new2->l_export->exp_ldlm_data.led_held_locks);
357 if (*flags == LDLM_FL_WAIT_NOREPROC)
358 ldlm_lock_addref_internal(new2, lock->l_granted_mode);
360 /* insert new2 at lock */
361 ldlm_resource_add_lock(res, ownlocks, new2);
366 /* At this point we're granting the lock request. */
367 req->l_granted_mode = req->l_req_mode;
369 /* Add req to the granted queue before calling ldlm_reprocess_all(). */
371 list_del_init(&req->l_res_link);
372 /* insert new lock before ownlocks in list. */
373 ldlm_resource_add_lock(res, ownlocks, req);
376 if (*flags != LDLM_FL_WAIT_NOREPROC) {
378 /* If this is an unlock, reprocess the waitq and
379 * send completions ASTs for locks that can now be
380 * granted. The only problem with doing this
381 * reprocessing here is that the completion ASTs for
382 * newly granted locks will be sent before the unlock
383 * completion is sent. It shouldn't be an issue. Also
384 * note that ldlm_process_flock_lock() will recurse,
385 * but only once because first_enq will be false from
386 * ldlm_reprocess_queue. */
387 if ((mode == LCK_NL) && overlaps) {
388 struct list_head rpc_list
389 = LIST_HEAD_INIT(rpc_list);
392 res->lr_tmp = &rpc_list;
393 ldlm_reprocess_queue(res, &res->lr_waiting);
396 l_unlock(&ns->ns_lock);
397 rc = ldlm_run_ast_work(res->lr_namespace,
399 l_lock(&ns->ns_lock);
401 GOTO(restart, -ERESTART);
404 LASSERT(req->l_completion_ast);
405 ldlm_add_ast_work_item(req, NULL, NULL, 0);
409 /* In case we're reprocessing the requested lock we can't destroy
410 * it until after calling ldlm_ast_work_item() above so that lawi()
411 * can bump the reference count on req. Otherwise req could be freed
412 * before the completion AST can be sent. */
414 ldlm_flock_destroy(req, mode, *flags);
416 ldlm_resource_dump(D_OTHER, res);
417 RETURN(LDLM_ITER_CONTINUE);
420 struct ldlm_flock_wait_data {
421 struct ldlm_lock *fwd_lock;
426 ldlm_flock_interrupted_wait(void *data)
428 struct ldlm_lock *lock;
429 struct lustre_handle lockh;
432 lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
434 /* take lock off the deadlock detection waitq. */
435 list_del_init(&lock->l_flock_waitq);
437 /* client side - set flag to prevent lock from being put on lru list */
438 lock->l_flags |= LDLM_FL_CBPENDING;
440 ldlm_lock_decref_internal(lock, lock->l_req_mode);
441 ldlm_lock2handle(lock, &lockh);
442 ldlm_cli_cancel(&lockh);
447 ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
449 struct ldlm_namespace *ns;
450 struct file_lock *getlk = lock->l_ast_data;
451 struct ldlm_flock_wait_data fwd;
452 unsigned long irqflags;
453 struct obd_device *obd;
454 struct obd_import *imp = NULL;
457 struct l_wait_info lwi;
460 CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
463 LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
465 if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
466 LDLM_FL_BLOCK_CONV)))
469 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
473 obd = class_exp2obd(lock->l_conn_export);
475 /* if this is a local lock, then there is no import */
477 imp = obd->u.cli.cl_import;
480 spin_lock_irqsave(&imp->imp_lock, irqflags);
481 fwd.fwd_generation = imp->imp_generation;
482 spin_unlock_irqrestore(&imp->imp_lock, irqflags);
485 lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
487 /* Go to sleep until the lock is granted. */
488 rc = l_wait_event(lock->l_waitq,
489 ((lock->l_req_mode == lock->l_granted_mode) ||
490 lock->l_destroyed), &lwi);
492 LDLM_DEBUG(lock, "client-side enqueue waking up: rc = %d", rc);
497 LDLM_DEBUG(lock, "client-side enqueue granted");
498 ns = lock->l_resource->lr_namespace;
499 l_lock(&ns->ns_lock);
501 /* take lock off the deadlock detection waitq. */
502 list_del_init(&lock->l_flock_waitq);
504 /* ldlm_lock_enqueue() has already placed lock on the granted list. */
505 list_del_init(&lock->l_res_link);
507 if (flags & LDLM_FL_TEST_LOCK) {
508 /* fcntl(F_GETLK) request */
509 /* The old mode was saved in getlk->fl_type so that if the mode
510 * in the lock changes we can decref the approprate refcount. */
511 ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
512 switch (lock->l_granted_mode) {
514 getlk->fl_type = F_RDLCK;
517 getlk->fl_type = F_WRLCK;
520 getlk->fl_type = F_UNLCK;
522 getlk->fl_pid = lock->l_policy_data.l_flock.pid;
523 getlk->fl_start = lock->l_policy_data.l_flock.start;
524 getlk->fl_end = lock->l_policy_data.l_flock.end;
526 int noreproc = LDLM_FL_WAIT_NOREPROC;
528 /* We need to reprocess the lock to do merges or splits
529 * with existing locks owned by this process. */
530 ldlm_process_flock_lock(lock, &noreproc, 1, &err);
532 wake_up(&lock->l_waitq);
534 l_unlock(&ns->ns_lock);
537 EXPORT_SYMBOL(ldlm_flock_completion_ast);
539 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
540 void *data, int flag)
542 struct ldlm_namespace *ns;
546 LASSERT(flag == LDLM_CB_CANCELING);
548 ns = lock->l_resource->lr_namespace;
550 /* take lock off the deadlock detection waitq. */
551 l_lock(&ns->ns_lock);
552 list_del_init(&lock->l_flock_waitq);
553 l_unlock(&ns->ns_lock);