4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003 Hewlett-Packard Development Company LP.
28 * Developed under the sponsorship of the US Government under
29 * Subcontract No. B514193
31 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
32 * Use is subject to license terms.
34 * Copyright (c) 2010, 2012, Intel Corporation.
37 * This file is part of Lustre, http://www.lustre.org/
38 * Lustre is a trademark of Sun Microsystems, Inc.
42 * This file implements POSIX lock type for Lustre.
43 * Its policy properties are start and end of extent and PID.
45 * These locks are only done through MDS due to POSIX semantics requiring
46 * e.g. that locks could be only partially released and as such split into
47 * two parts, and also that two adjacent locks from the same process may be
48 * merged into a single wider lock.
50 * Lock modes are mapped like this:
51 * PR and PW for READ and WRITE locks
52 * NL to request a releasing of a portion of the lock
54 * These flock locks never timeout.
57 #define DEBUG_SUBSYSTEM S_LDLM
60 #include <lustre_dlm.h>
61 #include <obd_support.h>
62 #include <obd_class.h>
63 #include <lustre_lib.h>
64 #include <libcfs/list.h>
66 #include <liblustre.h>
67 #include <obd_class.h>
70 #include "ldlm_internal.h"
72 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
73 void *data, int flag);
76 * list_for_remaining_safe - iterate over the remaining entries in a list
77 * and safeguard against removal of a list entry.
78 * \param pos the &struct list_head to use as a loop counter. pos MUST
79 * have been initialized prior to using it in this macro.
80 * \param n another &struct list_head to use as temporary storage
81 * \param head the head for your list.
83 #define list_for_remaining_safe(pos, n, head) \
84 for (n = pos->next; pos != (head); pos = n, n = pos->next)
87 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
89 return((new->l_policy_data.l_flock.owner ==
90 lock->l_policy_data.l_flock.owner) &&
91 (new->l_export == lock->l_export));
95 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
97 return((new->l_policy_data.l_flock.start <=
98 lock->l_policy_data.l_flock.end) &&
99 (new->l_policy_data.l_flock.end >=
100 lock->l_policy_data.l_flock.start));
103 static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
104 struct ldlm_lock *lock)
106 /* For server only */
107 if (req->l_export == NULL)
110 LASSERT(cfs_hlist_unhashed(&req->l_exp_flock_hash));
112 req->l_policy_data.l_flock.blocking_owner =
113 lock->l_policy_data.l_flock.owner;
114 req->l_policy_data.l_flock.blocking_export =
116 req->l_policy_data.l_flock.blocking_refs = 0;
118 cfs_hash_add(req->l_export->exp_flock_hash,
119 &req->l_policy_data.l_flock.owner,
120 &req->l_exp_flock_hash);
123 static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
125 /* For server only */
126 if (req->l_export == NULL)
129 check_res_locked(req->l_resource);
130 if (req->l_export->exp_flock_hash != NULL &&
131 !cfs_hlist_unhashed(&req->l_exp_flock_hash))
132 cfs_hash_del(req->l_export->exp_flock_hash,
133 &req->l_policy_data.l_flock.owner,
134 &req->l_exp_flock_hash);
138 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
142 LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
145 /* Safe to not lock here, since it should be empty anyway */
146 LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
148 cfs_list_del_init(&lock->l_res_link);
149 if (flags == LDLM_FL_WAIT_NOREPROC &&
150 !(lock->l_flags & LDLM_FL_FAILED)) {
151 /* client side - set a flag to prevent sending a CANCEL */
152 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
154 /* when reaching here, it is under lock_res_and_lock(). Thus,
155 need call the nolock version of ldlm_lock_decref_internal*/
156 ldlm_lock_decref_internal_nolock(lock, mode);
159 ldlm_lock_destroy_nolock(lock);
164 * POSIX locks deadlock detection code.
166 * Given a new lock \a req and an existing lock \a bl_lock it conflicts
167 * with, we need to iterate through all blocked POSIX locks for this
168 * export and see if there is a deadlock condition arising. (i.e. when
169 * one client holds a lock on something and want a lock on something
170 * else and at the same time another client has the opposite situation).
173 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
175 struct obd_export *req_exp = req->l_export;
176 struct obd_export *bl_exp = bl_lock->l_export;
177 __u64 req_owner = req->l_policy_data.l_flock.owner;
178 __u64 bl_owner = bl_lock->l_policy_data.l_flock.owner;
180 /* For server only */
184 class_export_get(bl_exp);
186 struct obd_export *bl_exp_new;
187 struct ldlm_lock *lock = NULL;
188 struct ldlm_flock *flock;
190 if (bl_exp->exp_flock_hash != NULL)
191 lock = cfs_hash_lookup(bl_exp->exp_flock_hash,
196 LASSERT(req != lock);
197 flock = &lock->l_policy_data.l_flock;
198 LASSERT(flock->owner == bl_owner);
199 bl_owner = flock->blocking_owner;
200 bl_exp_new = class_export_get(flock->blocking_export);
201 class_export_put(bl_exp);
203 cfs_hash_put(bl_exp->exp_flock_hash, &lock->l_exp_flock_hash);
206 if (bl_owner == req_owner && bl_exp == req_exp) {
207 class_export_put(bl_exp);
211 class_export_put(bl_exp);
216 static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
217 cfs_list_t *work_list)
219 CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock);
221 if ((exp_connect_flags(lock->l_export) &
222 OBD_CONNECT_FLOCK_DEAD) == 0) {
223 CERROR("deadlock found, but client doesn't "
224 "support flock canceliation\n");
226 LASSERT(lock->l_completion_ast);
227 LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
228 lock->l_flags |= LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK |
229 LDLM_FL_FLOCK_DEADLOCK;
230 ldlm_flock_blocking_unlink(lock);
231 ldlm_resource_unlink_lock(lock);
232 ldlm_add_ast_work_item(lock, NULL, work_list);
237 * Process a granting attempt for flock lock.
238 * Must be called under ns lock held.
240 * This function looks for any conflicts for \a lock in the granted or
241 * waiting queues. The lock is granted if no conflicts are found in
244 * It is also responsible for splitting a lock if a portion of the lock
247 * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
248 * - blocking ASTs have already been sent
250 * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
251 * - blocking ASTs have not been sent yet, so list of conflicting locks
252 * would be collected and ASTs sent.
255 ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
256 ldlm_error_t *err, cfs_list_t *work_list)
258 struct ldlm_resource *res = req->l_resource;
259 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
261 cfs_list_t *ownlocks = NULL;
262 struct ldlm_lock *lock = NULL;
263 struct ldlm_lock *new = req;
264 struct ldlm_lock *new2 = NULL;
265 ldlm_mode_t mode = req->l_req_mode;
266 int local = ns_is_client(ns);
267 int added = (mode == LCK_NL);
270 const struct ldlm_callback_suite null_cbs = { NULL };
273 CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
274 LPU64" end "LPU64"\n", *flags,
275 new->l_policy_data.l_flock.owner,
276 new->l_policy_data.l_flock.pid, mode,
277 req->l_policy_data.l_flock.start,
278 req->l_policy_data.l_flock.end);
283 /* No blocking ASTs are sent to the clients for
284 * Posix file & record locks */
285 req->l_blocking_ast = NULL;
287 /* Called on the server for lock cancels. */
288 req->l_blocking_ast = ldlm_flock_blocking_ast;
292 if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
293 /* This loop determines where this processes locks start
294 * in the resource lr_granted list. */
295 cfs_list_for_each(tmp, &res->lr_granted) {
296 lock = cfs_list_entry(tmp, struct ldlm_lock,
298 if (ldlm_same_flock_owner(lock, req)) {
304 int reprocess_failed = 0;
305 lockmode_verify(mode);
307 /* This loop determines if there are existing locks
308 * that conflict with the new lock request. */
309 cfs_list_for_each(tmp, &res->lr_granted) {
310 lock = cfs_list_entry(tmp, struct ldlm_lock,
313 if (ldlm_same_flock_owner(lock, req)) {
319 /* locks are compatible, overlap doesn't matter */
320 if (lockmode_compat(lock->l_granted_mode, mode))
323 if (!ldlm_flocks_overlap(lock, req))
327 reprocess_failed = 1;
328 if (ldlm_flock_deadlock(req, lock)) {
329 ldlm_flock_cancel_on_deadlock(req,
331 RETURN(LDLM_ITER_CONTINUE);
336 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
337 ldlm_flock_destroy(req, mode, *flags);
339 RETURN(LDLM_ITER_STOP);
342 if (*flags & LDLM_FL_TEST_LOCK) {
343 ldlm_flock_destroy(req, mode, *flags);
344 req->l_req_mode = lock->l_granted_mode;
345 req->l_policy_data.l_flock.pid =
346 lock->l_policy_data.l_flock.pid;
347 req->l_policy_data.l_flock.start =
348 lock->l_policy_data.l_flock.start;
349 req->l_policy_data.l_flock.end =
350 lock->l_policy_data.l_flock.end;
351 *flags |= LDLM_FL_LOCK_CHANGED;
352 RETURN(LDLM_ITER_STOP);
355 /* add lock to blocking list before deadlock
356 * check to prevent race */
357 ldlm_flock_blocking_link(req, lock);
359 if (ldlm_flock_deadlock(req, lock)) {
360 ldlm_flock_blocking_unlink(req);
361 ldlm_flock_destroy(req, mode, *flags);
363 RETURN(LDLM_ITER_STOP);
366 ldlm_resource_add_lock(res, &res->lr_waiting, req);
367 *flags |= LDLM_FL_BLOCK_GRANTED;
368 RETURN(LDLM_ITER_STOP);
370 if (reprocess_failed)
371 RETURN(LDLM_ITER_CONTINUE);
374 if (*flags & LDLM_FL_TEST_LOCK) {
375 ldlm_flock_destroy(req, mode, *flags);
376 req->l_req_mode = LCK_NL;
377 *flags |= LDLM_FL_LOCK_CHANGED;
378 RETURN(LDLM_ITER_STOP);
381 /* In case we had slept on this lock request take it off of the
382 * deadlock detection hash list. */
383 ldlm_flock_blocking_unlink(req);
385 /* Scan the locks owned by this process that overlap this request.
386 * We may have to merge or split existing locks. */
389 ownlocks = &res->lr_granted;
391 list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
392 lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
394 if (!ldlm_same_flock_owner(lock, new))
397 if (lock->l_granted_mode == mode) {
398 /* If the modes are the same then we need to process
399 * locks that overlap OR adjoin the new lock. The extra
400 * logic condition is necessary to deal with arithmetic
401 * overflow and underflow. */
402 if ((new->l_policy_data.l_flock.start >
403 (lock->l_policy_data.l_flock.end + 1))
404 && (lock->l_policy_data.l_flock.end !=
408 if ((new->l_policy_data.l_flock.end <
409 (lock->l_policy_data.l_flock.start - 1))
410 && (lock->l_policy_data.l_flock.start != 0))
413 if (new->l_policy_data.l_flock.start <
414 lock->l_policy_data.l_flock.start) {
415 lock->l_policy_data.l_flock.start =
416 new->l_policy_data.l_flock.start;
418 new->l_policy_data.l_flock.start =
419 lock->l_policy_data.l_flock.start;
422 if (new->l_policy_data.l_flock.end >
423 lock->l_policy_data.l_flock.end) {
424 lock->l_policy_data.l_flock.end =
425 new->l_policy_data.l_flock.end;
427 new->l_policy_data.l_flock.end =
428 lock->l_policy_data.l_flock.end;
432 ldlm_flock_destroy(lock, mode, *flags);
440 if (new->l_policy_data.l_flock.start >
441 lock->l_policy_data.l_flock.end)
444 if (new->l_policy_data.l_flock.end <
445 lock->l_policy_data.l_flock.start)
450 if (new->l_policy_data.l_flock.start <=
451 lock->l_policy_data.l_flock.start) {
452 if (new->l_policy_data.l_flock.end <
453 lock->l_policy_data.l_flock.end) {
454 lock->l_policy_data.l_flock.start =
455 new->l_policy_data.l_flock.end + 1;
458 ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
461 if (new->l_policy_data.l_flock.end >=
462 lock->l_policy_data.l_flock.end) {
463 lock->l_policy_data.l_flock.end =
464 new->l_policy_data.l_flock.start - 1;
468 /* split the existing lock into two locks */
470 /* if this is an F_UNLCK operation then we could avoid
471 * allocating a new lock and use the req lock passed in
472 * with the request but this would complicate the reply
473 * processing since updates to req get reflected in the
474 * reply. The client side replays the lock request so
475 * it must see the original lock data in the reply. */
477 /* XXX - if ldlm_lock_new() can sleep we should
478 * release the lr_lock, allocate the new lock,
479 * and restart processing this lock. */
481 unlock_res_and_lock(req);
482 new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
483 lock->l_granted_mode, &null_cbs,
484 NULL, 0, LVB_T_NONE);
485 lock_res_and_lock(req);
487 ldlm_flock_destroy(req, lock->l_granted_mode,
490 RETURN(LDLM_ITER_STOP);
497 new2->l_granted_mode = lock->l_granted_mode;
498 new2->l_policy_data.l_flock.pid =
499 new->l_policy_data.l_flock.pid;
500 new2->l_policy_data.l_flock.owner =
501 new->l_policy_data.l_flock.owner;
502 new2->l_policy_data.l_flock.start =
503 lock->l_policy_data.l_flock.start;
504 new2->l_policy_data.l_flock.end =
505 new->l_policy_data.l_flock.start - 1;
506 lock->l_policy_data.l_flock.start =
507 new->l_policy_data.l_flock.end + 1;
508 new2->l_conn_export = lock->l_conn_export;
509 if (lock->l_export != NULL) {
510 new2->l_export = class_export_lock_get(lock->l_export, new2);
511 if (new2->l_export->exp_lock_hash &&
512 cfs_hlist_unhashed(&new2->l_exp_hash))
513 cfs_hash_add(new2->l_export->exp_lock_hash,
514 &new2->l_remote_handle,
517 if (*flags == LDLM_FL_WAIT_NOREPROC)
518 ldlm_lock_addref_internal_nolock(new2,
519 lock->l_granted_mode);
521 /* insert new2 at lock */
522 ldlm_resource_add_lock(res, ownlocks, new2);
523 LDLM_LOCK_RELEASE(new2);
527 /* if new2 is created but never used, destroy it*/
528 if (splitted == 0 && new2 != NULL)
529 ldlm_lock_destroy_nolock(new2);
531 /* At this point we're granting the lock request. */
532 req->l_granted_mode = req->l_req_mode;
534 /* Add req to the granted queue before calling ldlm_reprocess_all(). */
536 cfs_list_del_init(&req->l_res_link);
537 /* insert new lock before ownlocks in list. */
538 ldlm_resource_add_lock(res, ownlocks, req);
541 if (*flags != LDLM_FL_WAIT_NOREPROC) {
542 #ifdef HAVE_SERVER_SUPPORT
544 /* If this is an unlock, reprocess the waitq and
545 * send completions ASTs for locks that can now be
546 * granted. The only problem with doing this
547 * reprocessing here is that the completion ASTs for
548 * newly granted locks will be sent before the unlock
549 * completion is sent. It shouldn't be an issue. Also
550 * note that ldlm_process_flock_lock() will recurse,
551 * but only once because first_enq will be false from
552 * ldlm_reprocess_queue. */
553 if ((mode == LCK_NL) && overlaps) {
554 CFS_LIST_HEAD(rpc_list);
557 ldlm_reprocess_queue(res, &res->lr_waiting,
560 unlock_res_and_lock(req);
561 rc = ldlm_run_ast_work(ns, &rpc_list,
563 lock_res_and_lock(req);
565 GOTO(restart, -ERESTART);
568 LASSERT(req->l_completion_ast);
569 ldlm_add_ast_work_item(req, NULL, work_list);
571 #else /* !HAVE_SERVER_SUPPORT */
572 /* The only one possible case for client-side calls flock
573 * policy function is ldlm_flock_completion_ast inside which
574 * carries LDLM_FL_WAIT_NOREPROC flag. */
575 CERROR("Illegal parameter for client-side-only module.\n");
577 #endif /* HAVE_SERVER_SUPPORT */
580 /* In case we're reprocessing the requested lock we can't destroy
581 * it until after calling ldlm_add_ast_work_item() above so that laawi()
582 * can bump the reference count on \a req. Otherwise \a req
583 * could be freed before the completion AST can be sent. */
585 ldlm_flock_destroy(req, mode, *flags);
587 ldlm_resource_dump(D_INFO, res);
588 RETURN(LDLM_ITER_CONTINUE);
591 struct ldlm_flock_wait_data {
592 struct ldlm_lock *fwd_lock;
597 ldlm_flock_interrupted_wait(void *data)
599 struct ldlm_lock *lock;
602 lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
604 /* take lock off the deadlock detection hash list. */
605 lock_res_and_lock(lock);
606 ldlm_flock_blocking_unlink(lock);
608 /* client side - set flag to prevent lock from being put on LRU list */
609 lock->l_flags |= LDLM_FL_CBPENDING;
610 unlock_res_and_lock(lock);
616 * Flock completion callback function.
618 * \param lock [in,out]: A lock to be handled
619 * \param flags [in]: flags
620 * \param *data [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
622 * \retval 0 : success
623 * \retval <0 : failure
626 ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
628 struct file_lock *getlk = lock->l_ast_data;
629 struct obd_device *obd;
630 struct obd_import *imp = NULL;
631 struct ldlm_flock_wait_data fwd;
632 struct l_wait_info lwi;
637 CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
640 /* Import invalidation. We need to actually release the lock
641 * references being held, so that it can go away. No point in
642 * holding the lock even if app still believes it has it, since
643 * server already dropped it anyway. Only for granted locks too. */
644 if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
645 (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
646 if (lock->l_req_mode == lock->l_granted_mode &&
647 lock->l_granted_mode != LCK_NL &&
649 ldlm_lock_decref_internal(lock, lock->l_req_mode);
651 /* Need to wake up the waiter if we were evicted */
652 cfs_waitq_signal(&lock->l_waitq);
656 LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
658 if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
659 LDLM_FL_BLOCK_CONV))) {
661 /* mds granted the lock in the reply */
663 /* CP AST RPC: lock get granted, wake it up */
664 cfs_waitq_signal(&lock->l_waitq);
668 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
671 obd = class_exp2obd(lock->l_conn_export);
673 /* if this is a local lock, there is no import */
675 imp = obd->u.cli.cl_import;
678 spin_lock(&imp->imp_lock);
679 fwd.fwd_generation = imp->imp_generation;
680 spin_unlock(&imp->imp_lock);
683 lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
685 /* Go to sleep until the lock is granted. */
686 rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
689 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
695 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
697 if (lock->l_flags & LDLM_FL_DESTROYED) {
698 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
702 if (lock->l_flags & LDLM_FL_FAILED) {
703 LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
708 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
713 LDLM_DEBUG(lock, "client-side enqueue granted");
715 lock_res_and_lock(lock);
717 /* take lock off the deadlock detection hash list. */
718 ldlm_flock_blocking_unlink(lock);
720 /* ldlm_lock_enqueue() has already placed lock on the granted list. */
721 cfs_list_del_init(&lock->l_res_link);
723 if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
724 LDLM_DEBUG(lock, "client-side enqueue deadlock received");
726 } else if (flags & LDLM_FL_TEST_LOCK) {
727 /* fcntl(F_GETLK) request */
728 /* The old mode was saved in getlk->fl_type so that if the mode
729 * in the lock changes we can decref the appropriate refcount.*/
730 ldlm_flock_destroy(lock, flock_type(getlk),
731 LDLM_FL_WAIT_NOREPROC);
732 switch (lock->l_granted_mode) {
734 flock_set_type(getlk, F_RDLCK);
737 flock_set_type(getlk, F_WRLCK);
740 flock_set_type(getlk, F_UNLCK);
742 flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
743 flock_set_start(getlk,
744 (loff_t)lock->l_policy_data.l_flock.start);
746 (loff_t)lock->l_policy_data.l_flock.end);
748 __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
750 /* We need to reprocess the lock to do merges or splits
751 * with existing locks owned by this process. */
752 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
754 unlock_res_and_lock(lock);
757 EXPORT_SYMBOL(ldlm_flock_completion_ast);
759 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
760 void *data, int flag)
765 LASSERT(flag == LDLM_CB_CANCELING);
767 /* take lock off the deadlock detection hash list. */
768 lock_res_and_lock(lock);
769 ldlm_flock_blocking_unlink(lock);
770 unlock_res_and_lock(lock);
774 void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
775 ldlm_policy_data_t *lpolicy)
777 memset(lpolicy, 0, sizeof(*lpolicy));
778 lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
779 lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
780 lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
781 /* Compat code, old clients had no idea about owner field and
782 * relied solely on pid for ownership. Introduced in LU-104, 2.1,
784 lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
788 void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
789 ldlm_policy_data_t *lpolicy)
791 memset(lpolicy, 0, sizeof(*lpolicy));
792 lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
793 lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
794 lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
795 lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
798 void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
799 ldlm_wire_policy_data_t *wpolicy)
801 memset(wpolicy, 0, sizeof(*wpolicy));
802 wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
803 wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
804 wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
805 wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
809 * Export handle<->flock hash operations.
812 ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
814 return cfs_hash_u64_hash(*(__u64 *)key, mask);
818 ldlm_export_flock_key(cfs_hlist_node_t *hnode)
820 struct ldlm_lock *lock;
822 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
823 return &lock->l_policy_data.l_flock.owner;
827 ldlm_export_flock_keycmp(const void *key, cfs_hlist_node_t *hnode)
829 return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
833 ldlm_export_flock_object(cfs_hlist_node_t *hnode)
835 return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
839 ldlm_export_flock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
841 struct ldlm_lock *lock;
842 struct ldlm_flock *flock;
844 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
847 flock = &lock->l_policy_data.l_flock;
848 LASSERT(flock->blocking_export != NULL);
849 class_export_get(flock->blocking_export);
850 flock->blocking_refs++;
854 ldlm_export_flock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
856 struct ldlm_lock *lock;
857 struct ldlm_flock *flock;
859 lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
860 LDLM_LOCK_RELEASE(lock);
862 flock = &lock->l_policy_data.l_flock;
863 LASSERT(flock->blocking_export != NULL);
864 class_export_put(flock->blocking_export);
865 if (--flock->blocking_refs == 0) {
866 flock->blocking_owner = 0;
867 flock->blocking_export = NULL;
871 static cfs_hash_ops_t ldlm_export_flock_ops = {
872 .hs_hash = ldlm_export_flock_hash,
873 .hs_key = ldlm_export_flock_key,
874 .hs_keycmp = ldlm_export_flock_keycmp,
875 .hs_object = ldlm_export_flock_object,
876 .hs_get = ldlm_export_flock_get,
877 .hs_put = ldlm_export_flock_put,
878 .hs_put_locked = ldlm_export_flock_put,
881 int ldlm_init_flock_export(struct obd_export *exp)
883 if( strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0)
886 exp->exp_flock_hash =
887 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
888 HASH_EXP_LOCK_CUR_BITS,
889 HASH_EXP_LOCK_MAX_BITS,
890 HASH_EXP_LOCK_BKT_BITS, 0,
891 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
892 &ldlm_export_flock_ops,
893 CFS_HASH_DEFAULT | CFS_HASH_NBLK_CHANGE);
894 if (!exp->exp_flock_hash)
899 EXPORT_SYMBOL(ldlm_init_flock_export);
901 void ldlm_destroy_flock_export(struct obd_export *exp)
904 if (exp->exp_flock_hash) {
905 cfs_hash_putref(exp->exp_flock_hash);
906 exp->exp_flock_hash = NULL;
910 EXPORT_SYMBOL(ldlm_destroy_flock_export);