4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003 Hewlett-Packard Development Company LP.
24 * Developed under the sponsorship of the US Government under
25 * Subcontract No. B514193
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2017, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
37 * This file implements POSIX lock type for Lustre.
38 * Its policy properties are start and end of extent and PID.
40 * These locks are only done through MDS due to POSIX semantics requiring
41 * e.g. that locks could be only partially released and as such split into
42 * two parts, and also that two adjacent locks from the same process may be
43 * merged into a single wider lock.
45 * Lock modes are mapped like this:
46 * PR and PW for READ and WRITE locks
47 * NL to request a releasing of a portion of the lock
49 * These flock locks never timeout.
52 #define DEBUG_SUBSYSTEM S_LDLM
54 #include <linux/list.h>
55 #include <lustre_dlm.h>
56 #include <obd_support.h>
57 #include <obd_class.h>
58 #include <lustre_lib.h>
60 #include "ldlm_internal.h"
62 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
63 void *data, int flag);
66 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
68 return ((new->l_policy_data.l_flock.owner ==
69 lock->l_policy_data.l_flock.owner) &&
70 (new->l_export == lock->l_export));
74 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
76 return ((new->l_policy_data.l_flock.start <=
77 lock->l_policy_data.l_flock.end) &&
78 (new->l_policy_data.l_flock.end >=
79 lock->l_policy_data.l_flock.start));
82 static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
83 struct ldlm_lock *lock)
86 if (req->l_export == NULL)
89 LASSERT(hlist_unhashed(&req->l_exp_flock_hash));
91 req->l_policy_data.l_flock.blocking_owner =
92 lock->l_policy_data.l_flock.owner;
93 req->l_policy_data.l_flock.blocking_export =
95 atomic_set(&req->l_policy_data.l_flock.blocking_refs, 0);
97 cfs_hash_add(req->l_export->exp_flock_hash,
98 &req->l_policy_data.l_flock.owner,
99 &req->l_exp_flock_hash);
102 static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
104 /* For server only */
105 if (req->l_export == NULL)
108 check_res_locked(req->l_resource);
109 if (req->l_export->exp_flock_hash != NULL &&
110 !hlist_unhashed(&req->l_exp_flock_hash))
111 cfs_hash_del(req->l_export->exp_flock_hash,
112 &req->l_policy_data.l_flock.owner,
113 &req->l_exp_flock_hash);
117 ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
121 LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: %#llx)",
124 /* Safe to not lock here, since it should be empty anyway */
125 LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
127 list_del_init(&lock->l_res_link);
128 if (flags == LDLM_FL_WAIT_NOREPROC) {
129 /* client side - set a flag to prevent sending a CANCEL */
130 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
132 /* when reaching here, it is under lock_res_and_lock(). Thus,
133 * need call the nolock version of ldlm_lock_decref_internal
135 ldlm_lock_decref_internal_nolock(lock, mode);
138 ldlm_lock_destroy_nolock(lock);
142 #ifdef HAVE_SERVER_SUPPORT
144 * POSIX locks deadlock detection code.
146 * Given a new lock \a req and an existing lock \a bl_lock it conflicts
147 * with, we need to iterate through all blocked POSIX locks for this
148 * export and see if there is a deadlock condition arising. (i.e. when
149 * one client holds a lock on something and want a lock on something
150 * else and at the same time another client has the opposite situation).
153 struct ldlm_flock_lookup_cb_data {
155 struct ldlm_lock *lock;
156 struct obd_export *exp;
159 static int ldlm_flock_lookup_cb(struct obd_export *exp, void *data)
161 struct ldlm_flock_lookup_cb_data *cb_data = data;
162 struct ldlm_lock *lock;
167 lock = cfs_hash_lookup(exp->exp_flock_hash, cb_data->bl_owner);
171 /* Stop on first found lock. Same process can't sleep twice */
172 cb_data->lock = lock;
173 cb_data->exp = class_export_get(exp);
179 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
181 struct obd_export *req_exp = req->l_export;
182 struct obd_export *bl_exp = bl_lock->l_export;
183 __u64 req_owner = req->l_policy_data.l_flock.owner;
184 __u64 bl_owner = bl_lock->l_policy_data.l_flock.owner;
186 /* For server only */
190 class_export_get(bl_exp);
192 struct ldlm_flock_lookup_cb_data cb_data = {
193 .bl_owner = &bl_owner,
197 struct ptlrpc_connection *bl_exp_conn;
198 struct obd_export *bl_exp_new;
199 struct ldlm_lock *lock = NULL;
200 struct ldlm_flock *flock;
202 bl_exp_conn = bl_exp->exp_connection;
203 if (bl_exp->exp_flock_hash != NULL) {
206 found = obd_nid_export_for_each(bl_exp->exp_obd,
207 &bl_exp_conn->c_peer.nid,
208 ldlm_flock_lookup_cb,
216 class_export_put(bl_exp);
217 bl_exp = cb_data.exp;
219 LASSERT(req != lock);
220 flock = &lock->l_policy_data.l_flock;
221 LASSERT(flock->owner == bl_owner);
222 bl_owner = flock->blocking_owner;
223 bl_exp_new = class_export_get(flock->blocking_export);
224 class_export_put(bl_exp);
226 cfs_hash_put(bl_exp->exp_flock_hash, &lock->l_exp_flock_hash);
229 if (bl_exp->exp_failed)
232 if (bl_owner == req_owner &&
233 nid_same(&bl_exp_conn->c_peer.nid,
234 &req_exp->exp_connection->c_peer.nid)) {
235 class_export_put(bl_exp);
239 class_export_put(bl_exp);
244 static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
245 struct list_head *work_list)
247 CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock);
249 if ((exp_connect_flags(lock->l_export) &
250 OBD_CONNECT_FLOCK_DEAD) == 0) {
251 CERROR("deadlock found, but client doesn't support flock canceliation\n");
253 LASSERT(lock->l_completion_ast);
254 LASSERT(!ldlm_is_ast_sent(lock));
255 lock->l_flags |= (LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK |
256 LDLM_FL_FLOCK_DEADLOCK);
257 ldlm_flock_blocking_unlink(lock);
258 ldlm_resource_unlink_lock(lock);
259 ldlm_add_ast_work_item(lock, NULL, work_list);
262 #endif /* HAVE_SERVER_SUPPORT */
265 * Process a granting attempt for flock lock.
266 * Must be called under ns lock held.
268 * This function looks for any conflicts for \a lock in the granted or
269 * waiting queues. The lock is granted if no conflicts are found in
273 ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
274 enum ldlm_process_intention intention,
275 enum ldlm_error *err, struct list_head *work_list)
277 struct ldlm_resource *res = req->l_resource;
278 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
279 struct ldlm_lock *tmp;
280 struct ldlm_lock *ownlocks = NULL;
281 struct ldlm_lock *lock = NULL;
282 struct ldlm_lock *new = req;
283 struct ldlm_lock *new2 = NULL;
284 enum ldlm_mode mode = req->l_req_mode;
285 int local = ns_is_client(ns);
286 int added = (mode == LCK_NL);
288 const struct ldlm_callback_suite null_cbs = { NULL };
289 #ifdef HAVE_SERVER_SUPPORT
290 struct list_head *grant_work = (intention == LDLM_PROCESS_ENQUEUE ?
295 CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start "
296 "%llu end %llu\n", *flags,
297 new->l_policy_data.l_flock.owner,
298 new->l_policy_data.l_flock.pid, mode,
299 req->l_policy_data.l_flock.start,
300 req->l_policy_data.l_flock.end);
305 /* No blocking ASTs are sent to the clients for
306 * Posix file & record locks
308 req->l_blocking_ast = NULL;
310 /* Called on the server for lock cancels. */
311 req->l_blocking_ast = ldlm_flock_blocking_ast;
315 if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
316 /* This loop determines where this processes locks start
317 * in the resource lr_granted list.
319 list_for_each_entry(lock, &res->lr_granted, l_res_link) {
320 if (ldlm_same_flock_owner(lock, req)) {
326 #ifdef HAVE_SERVER_SUPPORT
328 int reprocess_failed = 0;
329 lockmode_verify(mode);
331 /* This loop determines if there are existing locks
332 * that conflict with the new lock request.
334 list_for_each_entry(lock, &res->lr_granted, l_res_link) {
335 if (ldlm_same_flock_owner(lock, req)) {
341 if (req->l_req_mode == LCK_PR &&
342 lock->l_granted_mode == LCK_PR &&
343 lock->l_policy_data.l_flock.start <=
344 req->l_policy_data.l_flock.start &&
345 lock->l_policy_data.l_flock.end >=
346 req->l_policy_data.l_flock.end) {
347 /* there can't be granted WR lock */
350 /* locks are compatible, overlap doesn't matter */
351 if (lockmode_compat(lock->l_granted_mode, mode))
354 if (!ldlm_flocks_overlap(lock, req))
357 if (intention != LDLM_PROCESS_ENQUEUE) {
358 if (ldlm_flock_deadlock(req, lock)) {
359 ldlm_flock_cancel_on_deadlock(
361 RETURN(LDLM_ITER_CONTINUE);
363 reprocess_failed = 1;
367 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
368 ldlm_flock_destroy(req, mode, *flags);
370 RETURN(LDLM_ITER_STOP);
373 if (*flags & LDLM_FL_TEST_LOCK) {
374 ldlm_flock_destroy(req, mode, *flags);
375 req->l_req_mode = lock->l_granted_mode;
376 req->l_policy_data.l_flock.pid =
377 lock->l_policy_data.l_flock.pid;
378 req->l_policy_data.l_flock.start =
379 lock->l_policy_data.l_flock.start;
380 req->l_policy_data.l_flock.end =
381 lock->l_policy_data.l_flock.end;
382 *flags |= LDLM_FL_LOCK_CHANGED;
383 RETURN(LDLM_ITER_STOP);
386 /* add lock to blocking list before deadlock
387 * check to prevent race
389 ldlm_flock_blocking_link(req, lock);
391 if (ldlm_flock_deadlock(req, lock)) {
392 ldlm_flock_blocking_unlink(req);
393 ldlm_flock_destroy(req, mode, *flags);
395 RETURN(LDLM_ITER_STOP);
398 ldlm_resource_add_lock(res, &res->lr_waiting, req);
399 *flags |= LDLM_FL_BLOCK_GRANTED;
400 RETURN(LDLM_ITER_STOP);
402 if (reprocess_failed)
403 RETURN(LDLM_ITER_CONTINUE);
406 if (*flags & LDLM_FL_TEST_LOCK) {
407 ldlm_flock_destroy(req, mode, *flags);
408 req->l_req_mode = LCK_NL;
409 *flags |= LDLM_FL_LOCK_CHANGED;
410 RETURN(LDLM_ITER_STOP);
413 /* In case we had slept on this lock request take it off of the
414 * deadlock detection hash list.
416 ldlm_flock_blocking_unlink(req);
417 #endif /* HAVE_SERVER_SUPPORT */
419 /* Scan the locks owned by this process to find the insertion point
420 * (as locks are ordered), and to handle overlaps.
421 * We may have to merge or split existing locks.
426 lock = list_entry(&res->lr_granted,
427 struct ldlm_lock, l_res_link);
428 list_for_each_entry_safe_from(lock, tmp, &res->lr_granted, l_res_link) {
429 if (!ldlm_same_flock_owner(lock, new))
432 if (lock->l_granted_mode == mode) {
433 /* If the modes are the same then we need to process
434 * locks that overlap OR adjoin the new lock. The extra
435 * logic condition is necessary to deal with arithmetic
436 * overflow and underflow.
438 if ((new->l_policy_data.l_flock.start >
439 (lock->l_policy_data.l_flock.end + 1))
440 && (lock->l_policy_data.l_flock.end !=
444 if ((new->l_policy_data.l_flock.end <
445 (lock->l_policy_data.l_flock.start - 1))
446 && (lock->l_policy_data.l_flock.start != 0))
449 if (new->l_policy_data.l_flock.start <
450 lock->l_policy_data.l_flock.start) {
451 lock->l_policy_data.l_flock.start =
452 new->l_policy_data.l_flock.start;
454 new->l_policy_data.l_flock.start =
455 lock->l_policy_data.l_flock.start;
458 if (new->l_policy_data.l_flock.end >
459 lock->l_policy_data.l_flock.end) {
460 lock->l_policy_data.l_flock.end =
461 new->l_policy_data.l_flock.end;
463 new->l_policy_data.l_flock.end =
464 lock->l_policy_data.l_flock.end;
468 ldlm_flock_destroy(lock, mode, *flags);
476 if (new->l_policy_data.l_flock.start >
477 lock->l_policy_data.l_flock.end)
480 if (new->l_policy_data.l_flock.end <
481 lock->l_policy_data.l_flock.start)
484 res->lr_flock_node.lfn_needs_reprocess = true;
486 if (new->l_policy_data.l_flock.start <=
487 lock->l_policy_data.l_flock.start) {
488 if (new->l_policy_data.l_flock.end <
489 lock->l_policy_data.l_flock.end) {
490 lock->l_policy_data.l_flock.start =
491 new->l_policy_data.l_flock.end + 1;
494 ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
497 if (new->l_policy_data.l_flock.end >=
498 lock->l_policy_data.l_flock.end) {
499 lock->l_policy_data.l_flock.end =
500 new->l_policy_data.l_flock.start - 1;
504 /* split the existing lock into two locks */
506 /* if this is an F_UNLCK operation then we could avoid
507 * allocating a new lock and use the req lock passed in
508 * with the request but this would complicate the reply
509 * processing since updates to req get reflected in the
510 * reply. The client side replays the lock request so
511 * it must see the original lock data in the reply.
514 /* XXX - if ldlm_lock_new() can sleep we should
515 * release the lr_lock, allocate the new lock,
516 * and restart processing this lock.
519 unlock_res_and_lock(req);
520 new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
521 lock->l_granted_mode, &null_cbs,
522 NULL, 0, LVB_T_NONE);
523 lock_res_and_lock(req);
525 ldlm_flock_destroy(req, lock->l_granted_mode,
527 *err = PTR_ERR(new2);
528 RETURN(LDLM_ITER_STOP);
535 new2->l_granted_mode = lock->l_granted_mode;
536 new2->l_policy_data.l_flock.pid =
537 new->l_policy_data.l_flock.pid;
538 new2->l_policy_data.l_flock.owner =
539 new->l_policy_data.l_flock.owner;
540 new2->l_policy_data.l_flock.start =
541 lock->l_policy_data.l_flock.start;
542 new2->l_policy_data.l_flock.end =
543 new->l_policy_data.l_flock.start - 1;
544 lock->l_policy_data.l_flock.start =
545 new->l_policy_data.l_flock.end + 1;
546 new2->l_conn_export = lock->l_conn_export;
547 if (lock->l_export != NULL) {
548 new2->l_export = class_export_lock_get(lock->l_export,
550 if (new2->l_export->exp_lock_hash &&
551 hlist_unhashed(&new2->l_exp_hash))
552 cfs_hash_add(new2->l_export->exp_lock_hash,
553 &new2->l_remote_handle,
556 if (*flags == LDLM_FL_WAIT_NOREPROC)
557 ldlm_lock_addref_internal_nolock(new2,
558 lock->l_granted_mode);
560 /* insert new2 at lock */
561 ldlm_resource_add_lock(res, &lock->l_res_link, new2);
562 LDLM_LOCK_RELEASE(new2);
566 /* if new2 is created but never used, destroy it*/
567 if (splitted == 0 && new2 != NULL)
568 ldlm_lock_destroy_nolock(new2);
570 /* At this point we're granting the lock request. */
571 req->l_granted_mode = req->l_req_mode;
573 /* Add req to the granted queue before calling ldlm_reprocess_all(). */
575 list_del_init(&req->l_res_link);
576 /* insert new lock before "lock", which might be the
577 * next lock for this owner, or might be the first
578 * lock for the next owner, or might not be a lock at
579 * all, but instead points at the head of the list
581 ldlm_resource_add_lock(res, &lock->l_res_link, req);
584 if (*flags != LDLM_FL_WAIT_NOREPROC) {
585 #ifdef HAVE_SERVER_SUPPORT
586 if (intention == LDLM_PROCESS_ENQUEUE) {
587 /* If this is an unlock, reprocess the waitq and
588 * send completions ASTs for locks that can now be
589 * granted. The only problem with doing this
590 * reprocessing here is that the completion ASTs for
591 * newly granted locks will be sent before the unlock
592 * completion is sent. It shouldn't be an issue. Also
593 * note that ldlm_process_flock_lock() will recurse,
594 * but only once because 'intention' won't be
595 * LDLM_PROCESS_ENQUEUE from ldlm_reprocess_queue.
597 struct ldlm_flock_node *fn = &res->lr_flock_node;
599 if (mode == LCK_NL && fn->lfn_needs_reprocess &&
600 atomic_read(&fn->lfn_unlock_pending) == 0) {
604 ldlm_reprocess_queue(res, &res->lr_waiting,
606 LDLM_PROCESS_RESCAN, 0);
607 fn->lfn_needs_reprocess = false;
608 unlock_res_and_lock(req);
609 rc = ldlm_run_ast_work(ns, &rpc_list,
611 lock_res_and_lock(req);
612 if (rc == -ERESTART) {
613 fn->lfn_needs_reprocess = true;
618 LASSERT(req->l_completion_ast);
619 ldlm_add_ast_work_item(req, NULL, grant_work);
621 #else /* !HAVE_SERVER_SUPPORT */
622 /* The only one possible case for client-side calls flock
623 * policy function is ldlm_flock_completion_ast inside which
624 * carries LDLM_FL_WAIT_NOREPROC flag.
626 CERROR("Illegal parameter for client-side-only module.\n");
628 #endif /* HAVE_SERVER_SUPPORT */
631 /* In case we're reprocessing the requested lock we can't destroy
632 * it until after calling ldlm_add_ast_work_item() above so that laawi()
633 * can bump the reference count on \a req. Otherwise \a req
634 * could be freed before the completion AST can be sent.
637 ldlm_flock_destroy(req, mode, *flags);
639 ldlm_resource_dump(D_INFO, res);
640 RETURN(LDLM_ITER_CONTINUE);
644 * Flock completion callback function.
646 * \param lock [in,out]: A lock to be handled
647 * \param flags [in]: flags
648 * \param *data [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
650 * \retval 0 : success
651 * \retval <0 : failure
654 ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
656 struct file_lock *getlk = lock->l_ast_data;
657 struct obd_device *obd;
662 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
663 if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
664 lock_res_and_lock(lock);
665 lock->l_flags |= LDLM_FL_FAIL_LOC;
666 unlock_res_and_lock(lock);
667 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT3, 4);
669 CDEBUG(D_DLMTRACE, "flags: %#llx data: %p getlk: %p\n",
672 LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
674 if (flags & LDLM_FL_FAILED)
677 if (!(flags & LDLM_FL_BLOCKED_MASK)) {
679 /* mds granted the lock in the reply */
681 /* CP AST RPC: lock get granted, wake it up */
682 wake_up(&lock->l_waitq);
687 "client-side enqueue returned a blocked lock, sleeping");
688 obd = class_exp2obd(lock->l_conn_export);
690 /* Go to sleep until the lock is granted. */
691 rc = l_wait_event_abortable(lock->l_waitq,
692 is_granted_or_cancelled(lock));
694 /* take lock off the deadlock detection hash list. */
695 lock_res_and_lock(lock);
696 ldlm_flock_blocking_unlink(lock);
698 /* client side - set flag to prevent lock from being
701 ldlm_set_cbpending(lock);
702 unlock_res_and_lock(lock);
704 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
710 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
712 if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT4)) {
713 lock_res_and_lock(lock);
714 /* DEADLOCK is always set with CBPENDING */
715 lock->l_flags |= LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
716 unlock_res_and_lock(lock);
717 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT4, 4);
719 if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT5)) {
720 lock_res_and_lock(lock);
721 /* DEADLOCK is always set with CBPENDING */
722 lock->l_flags |= (LDLM_FL_FAIL_LOC |
723 LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING);
724 unlock_res_and_lock(lock);
725 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT5, 4);
728 lock_res_and_lock(lock);
731 /* Protect against race where lock could have been just destroyed
732 * due to overlap in ldlm_process_flock_lock().
734 if (ldlm_is_destroyed(lock)) {
735 unlock_res_and_lock(lock);
736 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
738 /* An error is still to be returned, to propagate it up to
739 * ldlm_cli_enqueue_fini() caller. */
743 /* ldlm_lock_enqueue() has already placed lock on the granted list. */
744 ldlm_resource_unlink_lock(lock);
746 /* Import invalidation. We need to actually release the lock
747 * references being held, so that it can go away. No point in
748 * holding the lock even if app still believes it has it, since
749 * server already dropped it anyway. Only for granted locks too.
751 /* Do the same for DEADLOCK'ed locks. */
752 if (ldlm_is_failed(lock) || ldlm_is_flock_deadlock(lock)) {
755 if (flags & LDLM_FL_TEST_LOCK)
756 LASSERT(ldlm_is_test_lock(lock));
758 if (ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
759 mode = getlk->fl_type;
761 mode = lock->l_req_mode;
763 if (ldlm_is_flock_deadlock(lock)) {
764 LDLM_DEBUG(lock, "client-side enqueue deadlock "
768 ldlm_flock_destroy(lock, mode, LDLM_FL_WAIT_NOREPROC);
769 unlock_res_and_lock(lock);
771 /* Need to wake up the waiter if we were evicted */
772 wake_up(&lock->l_waitq);
774 /* An error is still to be returned, to propagate it up to
775 * ldlm_cli_enqueue_fini() caller.
780 LDLM_DEBUG(lock, "client-side enqueue granted");
782 if (flags & LDLM_FL_TEST_LOCK) {
784 * fcntl(F_GETLK) request
785 * The old mode was saved in getlk->fl_type so that if the mode
786 * in the lock changes we can decref the appropriate refcount.
788 LASSERT(ldlm_is_test_lock(lock));
789 ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
790 switch (lock->l_granted_mode) {
792 getlk->fl_type = F_RDLCK;
795 getlk->fl_type = F_WRLCK;
798 getlk->fl_type = F_UNLCK;
800 getlk->fl_pid = (pid_t)lock->l_policy_data.l_flock.pid;
801 getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
802 getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
804 __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
806 /* We need to reprocess the lock to do merges or splits
807 * with existing locks owned by this process.
809 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
811 unlock_res_and_lock(lock);
814 EXPORT_SYMBOL(ldlm_flock_completion_ast);
816 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
817 void *data, int flag)
822 LASSERT(flag == LDLM_CB_CANCELING);
824 /* take lock off the deadlock detection hash list. */
825 lock_res_and_lock(lock);
826 ldlm_flock_blocking_unlink(lock);
827 unlock_res_and_lock(lock);
831 void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
832 union ldlm_policy_data *lpolicy)
834 lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
835 lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
836 lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
837 lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
840 void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
841 union ldlm_wire_policy_data *wpolicy)
843 memset(wpolicy, 0, sizeof(*wpolicy));
844 wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
845 wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
846 wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
847 wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
851 * Export handle<->flock hash operations.
854 ldlm_export_flock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
856 return cfs_hash_u64_hash(*(__u64 *)key, mask);
860 ldlm_export_flock_key(struct hlist_node *hnode)
862 struct ldlm_lock *lock;
864 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
865 return &lock->l_policy_data.l_flock.owner;
869 ldlm_export_flock_keycmp(const void *key, struct hlist_node *hnode)
871 return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
875 ldlm_export_flock_object(struct hlist_node *hnode)
877 return hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
881 ldlm_export_flock_get(struct cfs_hash *hs, struct hlist_node *hnode)
883 struct ldlm_lock *lock;
884 struct ldlm_flock *flock;
886 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
889 flock = &lock->l_policy_data.l_flock;
890 LASSERT(flock->blocking_export != NULL);
891 class_export_get(flock->blocking_export);
892 atomic_inc(&flock->blocking_refs);
896 ldlm_export_flock_put(struct cfs_hash *hs, struct hlist_node *hnode)
898 struct ldlm_lock *lock;
899 struct ldlm_flock *flock;
901 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
903 flock = &lock->l_policy_data.l_flock;
904 LASSERT(flock->blocking_export != NULL);
905 class_export_put(flock->blocking_export);
906 if (atomic_dec_and_test(&flock->blocking_refs)) {
907 flock->blocking_owner = 0;
908 flock->blocking_export = NULL;
910 LDLM_LOCK_RELEASE(lock);
913 static struct cfs_hash_ops ldlm_export_flock_ops = {
914 .hs_hash = ldlm_export_flock_hash,
915 .hs_key = ldlm_export_flock_key,
916 .hs_keycmp = ldlm_export_flock_keycmp,
917 .hs_object = ldlm_export_flock_object,
918 .hs_get = ldlm_export_flock_get,
919 .hs_put = ldlm_export_flock_put,
920 .hs_put_locked = ldlm_export_flock_put,
923 int ldlm_init_flock_export(struct obd_export *exp)
925 if( strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0)
928 exp->exp_flock_hash =
929 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
930 HASH_EXP_LOCK_CUR_BITS,
931 HASH_EXP_LOCK_MAX_BITS,
932 HASH_EXP_LOCK_BKT_BITS, 0,
933 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
934 &ldlm_export_flock_ops,
935 CFS_HASH_DEFAULT | CFS_HASH_NBLK_CHANGE);
936 if (!exp->exp_flock_hash)
942 void ldlm_destroy_flock_export(struct obd_export *exp)
945 if (exp->exp_flock_hash) {
946 cfs_hash_putref(exp->exp_flock_hash);
947 exp->exp_flock_hash = NULL;