4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_lock for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_OSC
44 # include <libcfs/libcfs.h>
46 # include <liblustre.h>
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
51 #include "osc_cl_internal.h"
57 #define _PAGEREF_MAGIC (-10000000)
59 /*****************************************************************************
65 static const struct cl_lock_operations osc_lock_ops;
66 static const struct cl_lock_operations osc_lock_lockless_ops;
67 static void osc_lock_to_lockless(const struct lu_env *env,
68 struct osc_lock *ols, int force);
69 static int osc_lock_has_pages(struct osc_lock *olck);
71 int osc_lock_is_lockless(const struct osc_lock *olck)
73 return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
77 * Returns a weak pointer to the ldlm lock identified by a handle. Returned
78 * pointer cannot be dereferenced, as lock is not protected from concurrent
79 * reclaim. This function is a helper for osc_lock_invariant().
81 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
83 struct ldlm_lock *lock;
85 lock = ldlm_handle2lock(handle);
92 * Invariant that has to be true all of the time.
94 static int osc_lock_invariant(struct osc_lock *ols)
96 struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
97 struct ldlm_lock *olock = ols->ols_lock;
98 int handle_used = lustre_handle_is_used(&ols->ols_handle);
101 ergo(osc_lock_is_lockless(ols),
102 ols->ols_locklessable && ols->ols_lock == NULL) ||
103 (ergo(olock != NULL, handle_used) &&
105 olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
107 * Check that ->ols_handle and ->ols_lock are consistent, but
108 * take into account that they are set at the different time.
111 ergo(lock != NULL && olock != NULL, lock == olock) &&
112 ergo(lock == NULL, olock == NULL)) &&
113 ergo(ols->ols_state == OLS_CANCELLED,
114 olock == NULL && !handle_used) &&
116 * DLM lock is destroyed only after we have seen cancellation
119 ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
120 !olock->l_destroyed) &&
121 ergo(ols->ols_state == OLS_GRANTED,
123 olock->l_req_mode == olock->l_granted_mode &&
127 /*****************************************************************************
134 * Breaks a link between osc_lock and dlm_lock.
136 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
138 struct ldlm_lock *dlmlock;
140 spin_lock(&osc_ast_guard);
141 dlmlock = olck->ols_lock;
142 if (dlmlock == NULL) {
143 spin_unlock(&osc_ast_guard);
147 olck->ols_lock = NULL;
148 /* wb(); --- for all who checks (ols->ols_lock != NULL) before
149 * call to osc_lock_detach() */
150 dlmlock->l_ast_data = NULL;
151 olck->ols_handle.cookie = 0ULL;
152 spin_unlock(&osc_ast_guard);
154 lock_res_and_lock(dlmlock);
155 if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
156 struct cl_object *obj = olck->ols_cl.cls_obj;
157 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
160 cl_object_attr_lock(obj);
161 /* Must get the value under the lock to avoid possible races. */
162 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
163 /* Update the kms. Need to loop all granted locks.
164 * Not a problem for the client */
165 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
167 cl_object_attr_set(env, obj, attr, CAT_KMS);
168 cl_object_attr_unlock(obj);
170 unlock_res_and_lock(dlmlock);
172 /* release a reference taken in osc_lock_upcall0(). */
173 LASSERT(olck->ols_has_ref);
174 lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
175 LDLM_LOCK_RELEASE(dlmlock);
176 olck->ols_has_ref = 0;
179 static int osc_lock_unhold(struct osc_lock *ols)
185 result = osc_cancel_base(&ols->ols_handle,
186 ols->ols_einfo.ei_mode);
191 static int osc_lock_unuse(const struct lu_env *env,
192 const struct cl_lock_slice *slice)
194 struct osc_lock *ols = cl2osc_lock(slice);
196 LINVRNT(osc_lock_invariant(ols));
198 switch (ols->ols_state) {
200 LASSERT(!ols->ols_hold);
201 LASSERT(ols->ols_agl);
203 case OLS_UPCALL_RECEIVED:
204 osc_lock_unhold(ols);
206 LASSERT(!ols->ols_hold);
207 osc_lock_detach(env, ols);
208 ols->ols_state = OLS_NEW;
211 LASSERT(!ols->ols_glimpse);
212 LASSERT(ols->ols_hold);
214 * Move lock into OLS_RELEASED state before calling
215 * osc_cancel_base() so that possible synchronous cancellation
216 * (that always happens e.g., for liblustre) sees that lock is
219 ols->ols_state = OLS_RELEASED;
220 return osc_lock_unhold(ols);
222 CERROR("Impossible state: %d\n", ols->ols_state);
227 static void osc_lock_fini(const struct lu_env *env,
228 struct cl_lock_slice *slice)
230 struct osc_lock *ols = cl2osc_lock(slice);
232 LINVRNT(osc_lock_invariant(ols));
234 * ->ols_hold can still be true at this point if, for example, a
235 * thread that requested a lock was killed (and released a reference
236 * to the lock), before reply from a server was received. In this case
237 * lock is destroyed immediately after upcall.
239 osc_lock_unhold(ols);
240 LASSERT(ols->ols_lock == NULL);
241 LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
242 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
244 OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
247 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
248 struct ldlm_res_id *resname)
250 const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
253 * In the perfect world of the future, where ost servers talk
256 fid_build_reg_res_name(fid, resname);
259 * In reality, where ost server expects ->lsm_object_id and
260 * ->lsm_object_seq in rename.
262 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
267 static void osc_lock_build_policy(const struct lu_env *env,
268 const struct cl_lock *lock,
269 ldlm_policy_data_t *policy)
271 const struct cl_lock_descr *d = &lock->cll_descr;
273 osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
274 policy->l_extent.gid = d->cld_gid;
277 static __u64 osc_enq2ldlm_flags(__u32 enqflags)
281 LASSERT((enqflags & ~CEF_MASK) == 0);
283 if (enqflags & CEF_NONBLOCK)
284 result |= LDLM_FL_BLOCK_NOWAIT;
285 if (enqflags & CEF_ASYNC)
286 result |= LDLM_FL_HAS_INTENT;
287 if (enqflags & CEF_DISCARD_DATA)
288 result |= LDLM_AST_DISCARD_DATA;
293 * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
294 * pointers. Initialized in osc_init().
296 spinlock_t osc_ast_guard;
298 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
300 struct osc_lock *olck;
302 lock_res_and_lock(dlm_lock);
303 spin_lock(&osc_ast_guard);
304 olck = dlm_lock->l_ast_data;
306 struct cl_lock *lock = olck->ols_cl.cls_lock;
308 * If osc_lock holds a reference on ldlm lock, return it even
309 * when cl_lock is in CLS_FREEING state. This way
311 * osc_ast_data_get(dlmlock) == NULL
313 * guarantees that all osc references on dlmlock were
314 * released. osc_dlm_blocking_ast0() relies on that.
316 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
317 cl_lock_get_trust(lock);
318 lu_ref_add_atomic(&lock->cll_reference,
319 "ast", cfs_current());
323 spin_unlock(&osc_ast_guard);
324 unlock_res_and_lock(dlm_lock);
328 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
330 struct cl_lock *lock;
332 lock = olck->ols_cl.cls_lock;
333 lu_ref_del(&lock->cll_reference, "ast", cfs_current());
334 cl_lock_put(env, lock);
338 * Updates object attributes from a lock value block (lvb) received together
339 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
342 * This can be optimized to not update attributes when lock is a result of a
345 * Called under lock and resource spin-locks.
347 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
351 struct cl_object *obj;
352 struct lov_oinfo *oinfo;
353 struct cl_attr *attr;
358 if (!(olck->ols_flags & LDLM_FL_LVB_READY))
361 lvb = &olck->ols_lvb;
362 obj = olck->ols_cl.cls_obj;
363 oinfo = cl2osc(obj)->oo_oinfo;
364 attr = &osc_env_info(env)->oti_attr;
365 valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
366 cl_lvb2attr(attr, lvb);
368 cl_object_attr_lock(obj);
370 struct ldlm_lock *dlmlock;
373 dlmlock = olck->ols_lock;
374 LASSERT(dlmlock != NULL);
376 /* re-grab LVB from a dlm lock under DLM spin-locks. */
377 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
378 size = lvb->lvb_size;
379 /* Extend KMS up to the end of this lock and no further
380 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
381 if (size > dlmlock->l_policy_data.l_extent.end)
382 size = dlmlock->l_policy_data.l_extent.end + 1;
383 if (size >= oinfo->loi_kms) {
384 LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
385 ", kms="LPU64, lvb->lvb_size, size);
387 attr->cat_kms = size;
389 LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
390 LPU64"; leaving kms="LPU64", end="LPU64,
391 lvb->lvb_size, oinfo->loi_kms,
392 dlmlock->l_policy_data.l_extent.end);
394 ldlm_lock_allow_match_locked(dlmlock);
395 } else if (rc == -ENAVAIL && olck->ols_glimpse) {
396 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
397 " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
402 cl_object_attr_set(env, obj, attr, valid);
404 cl_object_attr_unlock(obj);
410 * Called when a lock is granted, from an upcall (when server returned a
411 * granted lock), or from completion AST, when server returned a blocked lock.
413 * Called under lock and resource spin-locks, that are released temporarily
416 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
417 struct ldlm_lock *dlmlock, int rc)
419 struct ldlm_extent *ext;
420 struct cl_lock *lock;
421 struct cl_lock_descr *descr;
423 LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
426 if (olck->ols_state < OLS_GRANTED) {
427 lock = olck->ols_cl.cls_lock;
428 ext = &dlmlock->l_policy_data.l_extent;
429 descr = &osc_env_info(env)->oti_descr;
430 descr->cld_obj = lock->cll_descr.cld_obj;
432 /* XXX check that ->l_granted_mode is valid. */
433 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
434 descr->cld_start = cl_index(descr->cld_obj, ext->start);
435 descr->cld_end = cl_index(descr->cld_obj, ext->end);
436 descr->cld_gid = ext->gid;
438 * tell upper layers the extent of the lock that was actually
441 olck->ols_state = OLS_GRANTED;
442 osc_lock_lvb_update(env, olck, rc);
444 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
445 * to take a semaphore on a parent lock. This is safe, because
446 * spin-locks are needed to protect consistency of
447 * dlmlock->l_*_mode and LVB, and we have finished processing
449 unlock_res_and_lock(dlmlock);
450 cl_lock_modify(env, lock, descr);
451 cl_lock_signal(env, lock);
452 LINVRNT(osc_lock_invariant(olck));
453 lock_res_and_lock(dlmlock);
458 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
461 struct ldlm_lock *dlmlock;
465 dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
466 LASSERT(dlmlock != NULL);
468 lock_res_and_lock(dlmlock);
469 spin_lock(&osc_ast_guard);
470 LASSERT(dlmlock->l_ast_data == olck);
471 LASSERT(olck->ols_lock == NULL);
472 olck->ols_lock = dlmlock;
473 spin_unlock(&osc_ast_guard);
476 * Lock might be not yet granted. In this case, completion ast
477 * (osc_ldlm_completion_ast()) comes later and finishes lock
480 if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
481 osc_lock_granted(env, olck, dlmlock, 0);
482 unlock_res_and_lock(dlmlock);
485 * osc_enqueue_interpret() decrefs asynchronous locks, counter
488 ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
491 /* lock reference taken by ldlm_handle2lock_long() is owned by
492 * osc_lock and released in osc_lock_detach() */
493 lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
494 olck->ols_has_ref = 1;
498 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
499 * received from a server, or after osc_enqueue_base() matched a local DLM
502 static int osc_lock_upcall(void *cookie, int errcode)
504 struct osc_lock *olck = cookie;
505 struct cl_lock_slice *slice = &olck->ols_cl;
506 struct cl_lock *lock = slice->cls_lock;
508 struct cl_env_nest nest;
511 env = cl_env_nested_get(&nest);
515 cl_lock_mutex_get(env, lock);
517 LASSERT(lock->cll_state >= CLS_QUEUING);
518 if (olck->ols_state == OLS_ENQUEUED) {
519 olck->ols_state = OLS_UPCALL_RECEIVED;
520 rc = ldlm_error2errno(errcode);
521 } else if (olck->ols_state == OLS_CANCELLED) {
524 CERROR("Impossible state: %d\n", olck->ols_state);
528 struct ldlm_lock *dlmlock;
530 dlmlock = ldlm_handle2lock(&olck->ols_handle);
531 if (dlmlock != NULL) {
532 lock_res_and_lock(dlmlock);
533 spin_lock(&osc_ast_guard);
534 LASSERT(olck->ols_lock == NULL);
535 dlmlock->l_ast_data = NULL;
536 olck->ols_handle.cookie = 0ULL;
537 spin_unlock(&osc_ast_guard);
538 ldlm_lock_fail_match_locked(dlmlock);
539 unlock_res_and_lock(dlmlock);
540 LDLM_LOCK_PUT(dlmlock);
543 if (olck->ols_glimpse)
544 olck->ols_glimpse = 0;
545 osc_lock_upcall0(env, olck);
548 /* Error handling, some errors are tolerable. */
549 if (olck->ols_locklessable && rc == -EUSERS) {
550 /* This is a tolerable error, turn this lock into
553 osc_object_set_contended(cl2osc(slice->cls_obj));
554 LASSERT(slice->cls_ops == &osc_lock_ops);
556 /* Change this lock to ldlmlock-less lock. */
557 osc_lock_to_lockless(env, olck, 1);
558 olck->ols_state = OLS_GRANTED;
560 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
561 osc_lock_lvb_update(env, olck, rc);
562 cl_lock_delete(env, lock);
563 /* Hide the error. */
568 /* For AGL case, the RPC sponsor may exits the cl_lock
569 * processing without wait() called before related OSC
570 * lock upcall(). So update the lock status according
571 * to the enqueue result inside AGL upcall(). */
573 lock->cll_flags |= CLF_FROM_UPCALL;
574 cl_wait_try(env, lock);
575 lock->cll_flags &= ~CLF_FROM_UPCALL;
576 if (!olck->ols_glimpse)
579 cl_lock_signal(env, lock);
580 /* del user for lock upcall cookie */
581 cl_unuse_try(env, lock);
583 /* del user for lock upcall cookie */
584 cl_lock_user_del(env, lock);
585 cl_lock_error(env, lock, rc);
588 /* release cookie reference, acquired by osc_lock_enqueue() */
589 cl_lock_hold_release(env, lock, "upcall", lock);
590 cl_lock_mutex_put(env, lock);
592 lu_ref_del(&lock->cll_reference, "upcall", lock);
593 /* This maybe the last reference, so must be called after
594 * cl_lock_mutex_put(). */
595 cl_lock_put(env, lock);
597 cl_env_nested_put(&nest, env);
599 /* should never happen, similar to osc_ldlm_blocking_ast(). */
606 * Core of osc_dlm_blocking_ast() logic.
608 static void osc_lock_blocking(const struct lu_env *env,
609 struct ldlm_lock *dlmlock,
610 struct osc_lock *olck, int blocking)
612 struct cl_lock *lock = olck->ols_cl.cls_lock;
614 LASSERT(olck->ols_lock == dlmlock);
615 CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
616 LASSERT(!osc_lock_is_lockless(olck));
619 * Lock might be still addref-ed here, if e.g., blocking ast
620 * is sent for a failed lock.
622 osc_lock_unhold(olck);
624 if (blocking && olck->ols_state < OLS_BLOCKED)
626 * Move osc_lock into OLS_BLOCKED before canceling the lock,
627 * because it recursively re-enters osc_lock_blocking(), with
628 * the state set to OLS_CANCELLED.
630 olck->ols_state = OLS_BLOCKED;
632 * cancel and destroy lock at least once no matter how blocking ast is
633 * entered (see comment above osc_ldlm_blocking_ast() for use
634 * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
636 cl_lock_cancel(env, lock);
637 cl_lock_delete(env, lock);
641 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
642 * and ldlm_lock caches.
644 static int osc_dlm_blocking_ast0(const struct lu_env *env,
645 struct ldlm_lock *dlmlock,
646 void *data, int flag)
648 struct osc_lock *olck;
649 struct cl_lock *lock;
653 LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
656 olck = osc_ast_data_get(dlmlock);
658 lock = olck->ols_cl.cls_lock;
659 cl_lock_mutex_get(env, lock);
660 LINVRNT(osc_lock_invariant(olck));
661 if (olck->ols_ast_wait) {
662 /* wake up osc_lock_use() */
663 cl_lock_signal(env, lock);
664 olck->ols_ast_wait = 0;
667 * Lock might have been canceled while this thread was
668 * sleeping for lock mutex, but olck is pinned in memory.
670 if (olck == dlmlock->l_ast_data) {
672 * NOTE: DLM sends blocking AST's for failed locks
673 * (that are still in pre-OLS_GRANTED state)
674 * too, and they have to be canceled otherwise
675 * DLM lock is never destroyed and stuck in
678 * Alternatively, ldlm_cli_cancel() can be
679 * called here directly for osc_locks with
680 * ols_state < OLS_GRANTED to maintain an
681 * invariant that ->clo_cancel() is only called
682 * for locks that were granted.
684 LASSERT(data == olck);
685 osc_lock_blocking(env, dlmlock,
686 olck, flag == LDLM_CB_BLOCKING);
689 cl_lock_mutex_put(env, lock);
690 osc_ast_data_put(env, olck);
693 * DLM lock exists, but there is no cl_lock attached to it.
694 * This is a `normal' race. cl_object and its cl_lock's can be
695 * removed by memory pressure, together with all pages.
697 cancel = (flag == LDLM_CB_BLOCKING);
700 struct lustre_handle *lockh;
702 lockh = &osc_env_info(env)->oti_handle;
703 ldlm_lock2handle(dlmlock, lockh);
704 result = ldlm_cli_cancel(lockh);
711 * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
712 * some other lock, or is canceled. This function is installed as a
713 * ldlm_lock::l_blocking_ast() for client extent locks.
715 * Control flow is tricky, because ldlm uses the same call-back
716 * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
718 * \param dlmlock lock for which ast occurred.
720 * \param new description of a conflicting lock in case of blocking ast.
722 * \param data value of dlmlock->l_ast_data
724 * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
725 * cancellation and blocking ast's.
727 * Possible use cases:
729 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
730 * lock due to lock lru pressure, or explicit user request to purge
733 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
734 * us that dlmlock conflicts with another lock that some client is
735 * enqueing. Lock is canceled.
737 * - cl_lock_cancel() is called. osc_lock_cancel() calls
738 * ldlm_cli_cancel() that calls
740 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
742 * recursively entering osc_ldlm_blocking_ast().
744 * - client cancels lock voluntary (e.g., as a part of early cancellation):
747 * osc_lock_cancel()->
748 * ldlm_cli_cancel()->
749 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
752 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
753 struct ldlm_lock_desc *new, void *data,
757 struct cl_env_nest nest;
761 * This can be called in the context of outer IO, e.g.,
764 * ->osc_enqueue_base()->...
765 * ->ldlm_prep_elc_req()->...
766 * ->ldlm_cancel_callback()->...
767 * ->osc_ldlm_blocking_ast()
769 * new environment has to be created to not corrupt outer context.
771 env = cl_env_nested_get(&nest);
773 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
774 cl_env_nested_put(&nest, env);
776 result = PTR_ERR(env);
778 * XXX This should never happen, as cl_lock is
779 * stuck. Pre-allocated environment a la vvp_inode_fini_env
785 if (result == -ENODATA)
788 CERROR("BAST failed: %d\n", result);
793 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
794 __u64 flags, void *data)
796 struct cl_env_nest nest;
798 struct osc_lock *olck;
799 struct cl_lock *lock;
803 /* first, do dlm part of the work */
804 dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
805 /* then, notify cl_lock */
806 env = cl_env_nested_get(&nest);
808 olck = osc_ast_data_get(dlmlock);
810 lock = olck->ols_cl.cls_lock;
811 cl_lock_mutex_get(env, lock);
813 * ldlm_handle_cp_callback() copied LVB from request
814 * to lock->l_lvb_data, store it in osc_lock.
816 LASSERT(dlmlock->l_lvb_data != NULL);
817 lock_res_and_lock(dlmlock);
818 olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
819 if (olck->ols_lock == NULL) {
821 * upcall (osc_lock_upcall()) hasn't yet been
822 * called. Do nothing now, upcall will bind
823 * olck to dlmlock and signal the waiters.
825 * This maintains an invariant that osc_lock
826 * and ldlm_lock are always bound when
827 * osc_lock is in OLS_GRANTED state.
829 } else if (dlmlock->l_granted_mode ==
830 dlmlock->l_req_mode) {
831 osc_lock_granted(env, olck, dlmlock, dlmrc);
833 unlock_res_and_lock(dlmlock);
836 CL_LOCK_DEBUG(D_ERROR, env, lock,
837 "dlmlock returned %d\n", dlmrc);
838 cl_lock_error(env, lock, dlmrc);
840 cl_lock_mutex_put(env, lock);
841 osc_ast_data_put(env, olck);
844 result = -ELDLM_NO_LOCK_DATA;
845 cl_env_nested_put(&nest, env);
847 result = PTR_ERR(env);
848 return dlmrc ?: result;
851 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
853 struct ptlrpc_request *req = data;
854 struct osc_lock *olck;
855 struct cl_lock *lock;
856 struct cl_object *obj;
857 struct cl_env_nest nest;
860 struct req_capsule *cap;
863 LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
865 env = cl_env_nested_get(&nest);
867 /* osc_ast_data_get() has to go after environment is
868 * allocated, because osc_ast_data() acquires a
869 * reference to a lock, and it can only be released in
872 olck = osc_ast_data_get(dlmlock);
874 lock = olck->ols_cl.cls_lock;
875 /* Do not grab the mutex of cl_lock for glimpse.
876 * See LU-1274 for details.
877 * BTW, it's okay for cl_lock to be cancelled during
878 * this period because server can handle this race.
879 * See ldlm_server_glimpse_ast() for details.
880 * cl_lock_mutex_get(env, lock); */
882 req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
883 req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
885 result = req_capsule_server_pack(cap);
887 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
888 obj = lock->cll_descr.cld_obj;
889 result = cl_object_glimpse(env, obj, lvb);
891 if (!exp_connect_lvb_type(req->rq_export))
892 req_capsule_shrink(&req->rq_pill,
894 sizeof(struct ost_lvb_v1),
896 osc_ast_data_put(env, olck);
899 * These errors are normal races, so we don't want to
900 * fill the console with messages by calling
903 lustre_pack_reply(req, 1, NULL, NULL);
904 result = -ELDLM_NO_LOCK_DATA;
906 cl_env_nested_put(&nest, env);
908 result = PTR_ERR(env);
909 req->rq_status = result;
913 static unsigned long osc_lock_weigh(const struct lu_env *env,
914 const struct cl_lock_slice *slice)
917 * don't need to grab coh_page_guard since we don't care the exact #
920 return cl_object_header(slice->cls_obj)->coh_pages;
924 * Get the weight of dlm lock for early cancellation.
926 * XXX: it should return the pages covered by this \a dlmlock.
928 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
930 struct cl_env_nest nest;
932 struct osc_lock *lock;
934 unsigned long weight;
939 * osc_ldlm_weigh_ast has a complex context since it might be called
940 * because of lock canceling, or from user's input. We have to make
941 * a new environment for it. Probably it is implementation safe to use
942 * the upper context because cl_lock_put don't modify environment
943 * variables. But in case of ..
945 env = cl_env_nested_get(&nest);
947 /* Mostly because lack of memory, tend to eliminate this lock*/
950 LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
951 lock = osc_ast_data_get(dlmlock);
953 /* cl_lock was destroyed because of memory pressure.
954 * It is much reasonable to assign this type of lock
957 GOTO(out, weight = 0);
960 cll = lock->ols_cl.cls_lock;
961 cl_lock_mutex_get(env, cll);
962 weight = cl_lock_weigh(env, cll);
963 cl_lock_mutex_put(env, cll);
964 osc_ast_data_put(env, lock);
968 cl_env_nested_put(&nest, env);
972 static void osc_lock_build_einfo(const struct lu_env *env,
973 const struct cl_lock *clock,
974 struct osc_lock *lock,
975 struct ldlm_enqueue_info *einfo)
977 enum cl_lock_mode mode;
979 mode = clock->cll_descr.cld_mode;
980 if (mode == CLM_PHANTOM)
982 * For now, enqueue all glimpse locks in read mode. In the
983 * future, client might choose to enqueue LCK_PW lock for
984 * glimpse on a file opened for write.
988 einfo->ei_type = LDLM_EXTENT;
989 einfo->ei_mode = osc_cl_lock2ldlm(mode);
990 einfo->ei_cb_bl = osc_ldlm_blocking_ast;
991 einfo->ei_cb_cp = osc_ldlm_completion_ast;
992 einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
993 einfo->ei_cb_wg = osc_ldlm_weigh_ast;
994 einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
998 * Determine if the lock should be converted into a lockless lock.
1001 * - if the lock has an explicite requirment for a non-lockless lock;
1002 * - if the io lock request type ci_lockreq;
1003 * - send the enqueue rpc to ost to make the further decision;
1004 * - special treat to truncate lockless lock
1006 * Additional policy can be implemented here, e.g., never do lockless-io
1007 * for large extents.
1009 static void osc_lock_to_lockless(const struct lu_env *env,
1010 struct osc_lock *ols, int force)
1012 struct cl_lock_slice *slice = &ols->ols_cl;
1014 LASSERT(ols->ols_state == OLS_NEW ||
1015 ols->ols_state == OLS_UPCALL_RECEIVED);
1018 ols->ols_locklessable = 1;
1019 slice->cls_ops = &osc_lock_lockless_ops;
1021 struct osc_io *oio = osc_env_io(env);
1022 struct cl_io *io = oio->oi_cl.cis_io;
1023 struct cl_object *obj = slice->cls_obj;
1024 struct osc_object *oob = cl2osc(obj);
1025 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1026 struct obd_connect_data *ocd;
1028 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1029 io->ci_lockreq == CILR_MAYBE ||
1030 io->ci_lockreq == CILR_NEVER);
1032 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1033 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
1034 (io->ci_lockreq == CILR_MAYBE) &&
1035 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1036 if (io->ci_lockreq == CILR_NEVER ||
1038 (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1039 /* lockless truncate */
1040 (cl_io_is_trunc(io) &&
1041 (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1042 osd->od_lockless_truncate)) {
1043 ols->ols_locklessable = 1;
1044 slice->cls_ops = &osc_lock_lockless_ops;
1047 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1050 static int osc_lock_compatible(const struct osc_lock *qing,
1051 const struct osc_lock *qed)
1053 enum cl_lock_mode qing_mode;
1054 enum cl_lock_mode qed_mode;
1056 qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1057 if (qed->ols_glimpse &&
1058 (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1061 qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1062 return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1066 * Cancel all conflicting locks and wait for them to be destroyed.
1068 * This function is used for two purposes:
1070 * - early cancel all conflicting locks before starting IO, and
1072 * - guarantee that pages added to the page cache by lockless IO are never
1073 * covered by locks other than lockless IO lock, and, hence, are not
1074 * visible to other threads.
1076 static int osc_lock_enqueue_wait(const struct lu_env *env,
1077 const struct osc_lock *olck)
1079 struct cl_lock *lock = olck->ols_cl.cls_lock;
1080 struct cl_lock_descr *descr = &lock->cll_descr;
1081 struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
1082 struct cl_lock *scan;
1083 struct cl_lock *conflict= NULL;
1084 int lockless = osc_lock_is_lockless(olck);
1088 LASSERT(cl_lock_is_mutexed(lock));
1090 /* make it enqueue anyway for glimpse lock, because we actually
1091 * don't need to cancel any conflicting locks. */
1092 if (olck->ols_glimpse)
1095 spin_lock(&hdr->coh_lock_guard);
1096 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1097 struct cl_lock_descr *cld = &scan->cll_descr;
1098 const struct osc_lock *scan_ols;
1103 if (scan->cll_state < CLS_QUEUING ||
1104 scan->cll_state == CLS_FREEING ||
1105 cld->cld_start > descr->cld_end ||
1106 cld->cld_end < descr->cld_start)
1109 /* overlapped and living locks. */
1111 /* We're not supposed to give up group lock. */
1112 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1113 LASSERT(descr->cld_mode != CLM_GROUP ||
1114 descr->cld_gid != scan->cll_descr.cld_gid);
1118 scan_ols = osc_lock_at(scan);
1120 /* We need to cancel the compatible locks if we're enqueuing
1121 * a lockless lock, for example:
1122 * imagine that client has PR lock on [0, 1000], and thread T0
1123 * is doing lockless IO in [500, 1500] region. Concurrent
1124 * thread T1 can see lockless data in [500, 1000], which is
1125 * wrong, because these data are possibly stale. */
1126 if (!lockless && osc_lock_compatible(olck, scan_ols))
1129 cl_lock_get_trust(scan);
1133 spin_unlock(&hdr->coh_lock_guard);
1136 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1137 /* we want a group lock but a previous lock request
1138 * conflicts, we do not wait but return 0 so the
1139 * request is send to the server
1141 CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1142 "with %p, no wait, send to server\n",
1144 cl_lock_put(env, conflict);
1147 CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1150 LASSERT(lock->cll_conflict == NULL);
1151 lu_ref_add(&conflict->cll_reference, "cancel-wait",
1153 lock->cll_conflict = conflict;
1161 * Implementation of cl_lock_operations::clo_enqueue() method for osc
1162 * layer. This initiates ldlm enqueue:
1164 * - cancels conflicting locks early (osc_lock_enqueue_wait());
1166 * - calls osc_enqueue_base() to do actual enqueue.
1168 * osc_enqueue_base() is supplied with an upcall function that is executed
1169 * when lock is received either after a local cached ldlm lock is matched, or
1170 * when a reply from the server is received.
1172 * This function does not wait for the network communication to complete.
1174 static int osc_lock_enqueue(const struct lu_env *env,
1175 const struct cl_lock_slice *slice,
1176 struct cl_io *unused, __u32 enqflags)
1178 struct osc_lock *ols = cl2osc_lock(slice);
1179 struct cl_lock *lock = ols->ols_cl.cls_lock;
1183 LASSERT(cl_lock_is_mutexed(lock));
1184 LASSERTF(ols->ols_state == OLS_NEW,
1185 "Impossible state: %d\n", ols->ols_state);
1187 LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
1188 "lock = %p, ols = %p\n", lock, ols);
1190 result = osc_lock_enqueue_wait(env, ols);
1192 if (!osc_lock_is_lockless(ols)) {
1193 struct osc_object *obj = cl2osc(slice->cls_obj);
1194 struct osc_thread_info *info = osc_env_info(env);
1195 struct ldlm_res_id *resname = &info->oti_resname;
1196 ldlm_policy_data_t *policy = &info->oti_policy;
1197 struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1199 /* lock will be passed as upcall cookie,
1200 * hold ref to prevent to be released. */
1201 cl_lock_hold_add(env, lock, "upcall", lock);
1202 /* a user for lock also */
1203 cl_lock_user_add(env, lock);
1204 ols->ols_state = OLS_ENQUEUED;
1207 * XXX: this is possible blocking point as
1208 * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1211 osc_lock_build_res(env, obj, resname);
1212 osc_lock_build_policy(env, lock, policy);
1213 result = osc_enqueue_base(osc_export(obj), resname,
1214 &ols->ols_flags, policy,
1216 obj->oo_oinfo->loi_kms_valid,
1218 ols, einfo, &ols->ols_handle,
1219 PTLRPCD_SET, 1, ols->ols_agl);
1221 cl_lock_user_del(env, lock);
1222 cl_lock_unhold(env, lock, "upcall", lock);
1223 if (unlikely(result == -ECANCELED)) {
1224 ols->ols_state = OLS_NEW;
1229 ols->ols_state = OLS_GRANTED;
1230 ols->ols_owner = osc_env_io(env);
1233 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1237 static int osc_lock_wait(const struct lu_env *env,
1238 const struct cl_lock_slice *slice)
1240 struct osc_lock *olck = cl2osc_lock(slice);
1241 struct cl_lock *lock = olck->ols_cl.cls_lock;
1243 LINVRNT(osc_lock_invariant(olck));
1245 if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
1246 if (olck->ols_flags & LDLM_FL_LVB_READY) {
1248 } else if (olck->ols_agl) {
1249 if (lock->cll_flags & CLF_FROM_UPCALL)
1250 /* It is from enqueue RPC reply upcall for
1251 * updating state. Do not re-enqueue. */
1254 olck->ols_state = OLS_NEW;
1256 LASSERT(lock->cll_error);
1257 return lock->cll_error;
1261 if (olck->ols_state == OLS_NEW) {
1264 LASSERT(olck->ols_agl);
1266 rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
1270 return CLO_REENQUEUED;
1273 LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1274 lock->cll_error == 0, olck->ols_lock != NULL));
1276 return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1280 * An implementation of cl_lock_operations::clo_use() method that pins cached
1283 static int osc_lock_use(const struct lu_env *env,
1284 const struct cl_lock_slice *slice)
1286 struct osc_lock *olck = cl2osc_lock(slice);
1289 LASSERT(!olck->ols_hold);
1292 * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1293 * flag is not set. This protects us from a concurrent blocking ast.
1295 rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1298 olck->ols_state = OLS_GRANTED;
1300 struct cl_lock *lock;
1303 * Lock is being cancelled somewhere within
1304 * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1305 * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1308 lock = slice->cls_lock;
1309 LASSERT(lock->cll_state == CLS_INTRANSIT);
1310 LASSERT(lock->cll_users > 0);
1311 /* set a flag for osc_dlm_blocking_ast0() to signal the
1313 olck->ols_ast_wait = 1;
1319 static int osc_lock_flush(struct osc_lock *ols, int discard)
1321 struct cl_lock *lock = ols->ols_cl.cls_lock;
1322 struct cl_env_nest nest;
1327 env = cl_env_nested_get(&nest);
1329 struct osc_object *obj = cl2osc(ols->ols_cl.cls_obj);
1330 struct cl_lock_descr *descr = &lock->cll_descr;
1333 if (descr->cld_mode >= CLM_WRITE) {
1334 result = osc_cache_writeback_range(env, obj,
1335 descr->cld_start, descr->cld_end,
1337 LDLM_DEBUG(ols->ols_lock,
1338 "lock %p: %d pages were %s.\n", lock, result,
1339 discard ? "discarded" : "written");
1344 rc = cl_lock_discard_pages(env, lock);
1345 if (result == 0 && rc < 0)
1348 cl_env_nested_put(&nest, env);
1350 result = PTR_ERR(env);
1353 LINVRNT(!osc_lock_has_pages(ols));
1359 * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1360 * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1361 * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1362 * with some other lock some where in the cluster. This function does the
1365 * - invalidates all pages protected by this lock (after sending dirty
1366 * ones to the server, as necessary);
1368 * - decref's underlying ldlm lock;
1370 * - cancels ldlm lock (ldlm_cli_cancel()).
1372 static void osc_lock_cancel(const struct lu_env *env,
1373 const struct cl_lock_slice *slice)
1375 struct cl_lock *lock = slice->cls_lock;
1376 struct osc_lock *olck = cl2osc_lock(slice);
1377 struct ldlm_lock *dlmlock = olck->ols_lock;
1381 LASSERT(cl_lock_is_mutexed(lock));
1382 LINVRNT(osc_lock_invariant(olck));
1384 if (dlmlock != NULL) {
1387 discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
1388 if (olck->ols_state >= OLS_GRANTED)
1389 result = osc_lock_flush(olck, discard);
1390 osc_lock_unhold(olck);
1392 lock_res_and_lock(dlmlock);
1393 /* Now that we're the only user of dlm read/write reference,
1394 * mostly the ->l_readers + ->l_writers should be zero.
1395 * However, there is a corner case.
1396 * See bug 18829 for details.*/
1397 do_cancel = (dlmlock->l_readers == 0 &&
1398 dlmlock->l_writers == 0);
1399 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1400 unlock_res_and_lock(dlmlock);
1402 result = ldlm_cli_cancel(&olck->ols_handle);
1404 CL_LOCK_DEBUG(D_ERROR, env, lock,
1405 "lock %p cancel failure with error(%d)\n",
1408 olck->ols_state = OLS_CANCELLED;
1409 olck->ols_flags &= ~LDLM_FL_LVB_READY;
1410 osc_lock_detach(env, olck);
1413 #ifdef INVARIANT_CHECK
1414 static int check_cb(const struct lu_env *env, struct cl_io *io,
1415 struct cl_page *page, void *cbdata)
1417 struct cl_lock *lock = cbdata;
1419 if (lock->cll_descr.cld_mode == CLM_READ) {
1420 struct cl_lock *tmp;
1421 tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1424 cl_lock_put(env, tmp);
1425 return CLP_GANG_OKAY;
1429 CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1430 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1431 return CLP_GANG_ABORT;
1435 * Returns true iff there are pages under \a olck not protected by other
1438 static int osc_lock_has_pages(struct osc_lock *olck)
1440 struct cl_lock *lock;
1441 struct cl_lock_descr *descr;
1442 struct cl_object *obj;
1443 struct osc_object *oob;
1444 struct cl_env_nest nest;
1449 env = cl_env_nested_get(&nest);
1453 obj = olck->ols_cl.cls_obj;
1455 io = &oob->oo_debug_io;
1456 lock = olck->ols_cl.cls_lock;
1457 descr = &lock->cll_descr;
1459 mutex_lock(&oob->oo_debug_mutex);
1461 io->ci_obj = cl_object_top(obj);
1462 io->ci_ignore_layout = 1;
1463 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1465 result = cl_page_gang_lookup(env, obj, io,
1466 descr->cld_start, descr->cld_end,
1467 check_cb, (void *)lock);
1468 if (result == CLP_GANG_ABORT)
1470 if (result == CLP_GANG_RESCHED)
1472 } while (result != CLP_GANG_OKAY);
1473 cl_io_fini(env, io);
1474 mutex_unlock(&oob->oo_debug_mutex);
1475 cl_env_nested_put(&nest, env);
1477 return (result == CLP_GANG_ABORT);
1480 static int osc_lock_has_pages(struct osc_lock *olck)
1484 #endif /* INVARIANT_CHECK */
1486 static void osc_lock_delete(const struct lu_env *env,
1487 const struct cl_lock_slice *slice)
1489 struct osc_lock *olck;
1491 olck = cl2osc_lock(slice);
1492 if (olck->ols_glimpse) {
1493 LASSERT(!olck->ols_hold);
1494 LASSERT(!olck->ols_lock);
1498 LINVRNT(osc_lock_invariant(olck));
1499 LINVRNT(!osc_lock_has_pages(olck));
1501 osc_lock_unhold(olck);
1502 osc_lock_detach(env, olck);
1506 * Implements cl_lock_operations::clo_state() method for osc layer.
1508 * Maintains osc_lock::ols_owner field.
1510 * This assumes that lock always enters CLS_HELD (from some other state) in
1511 * the same IO context as one that requested the lock. This should not be a
1512 * problem, because context is by definition shared by all activity pertaining
1513 * to the same high-level IO.
1515 static void osc_lock_state(const struct lu_env *env,
1516 const struct cl_lock_slice *slice,
1517 enum cl_lock_state state)
1519 struct osc_lock *lock = cl2osc_lock(slice);
1522 * XXX multiple io contexts can use the lock at the same time.
1524 LINVRNT(osc_lock_invariant(lock));
1525 if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1526 struct osc_io *oio = osc_env_io(env);
1528 LASSERT(lock->ols_owner == NULL);
1529 lock->ols_owner = oio;
1530 } else if (state != CLS_HELD)
1531 lock->ols_owner = NULL;
1534 static int osc_lock_print(const struct lu_env *env, void *cookie,
1535 lu_printer_t p, const struct cl_lock_slice *slice)
1537 struct osc_lock *lock = cl2osc_lock(slice);
1540 * XXX print ldlm lock and einfo properly.
1542 (*p)(env, cookie, "%p %#16llx "LPX64" %d %p ",
1543 lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1544 lock->ols_state, lock->ols_owner);
1545 osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1549 static int osc_lock_fits_into(const struct lu_env *env,
1550 const struct cl_lock_slice *slice,
1551 const struct cl_lock_descr *need,
1552 const struct cl_io *io)
1554 struct osc_lock *ols = cl2osc_lock(slice);
1556 if (need->cld_enq_flags & CEF_NEVER)
1559 if (ols->ols_state >= OLS_CANCELLED)
1562 if (need->cld_mode == CLM_PHANTOM) {
1564 return !(ols->ols_state > OLS_RELEASED);
1567 * Note: the QUEUED lock can't be matched here, otherwise
1568 * it might cause the deadlocks.
1570 * P1: enqueued read lock, create sublock1
1571 * P2: enqueued write lock, create sublock2(conflicted
1573 * P1: Grant read lock.
1574 * P1: enqueued glimpse lock(with holding sublock1_read),
1575 * matched with sublock2, waiting sublock2 to be granted.
1576 * But sublock2 can not be granted, because P1
1577 * will not release sublock1. Bang!
1579 if (ols->ols_state < OLS_GRANTED ||
1580 ols->ols_state > OLS_RELEASED)
1582 } else if (need->cld_enq_flags & CEF_MUST) {
1584 * If the lock hasn't ever enqueued, it can't be matched
1585 * because enqueue process brings in many information
1586 * which can be used to determine things such as lockless,
1589 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1590 ols->ols_locklessable)
1596 static const struct cl_lock_operations osc_lock_ops = {
1597 .clo_fini = osc_lock_fini,
1598 .clo_enqueue = osc_lock_enqueue,
1599 .clo_wait = osc_lock_wait,
1600 .clo_unuse = osc_lock_unuse,
1601 .clo_use = osc_lock_use,
1602 .clo_delete = osc_lock_delete,
1603 .clo_state = osc_lock_state,
1604 .clo_cancel = osc_lock_cancel,
1605 .clo_weigh = osc_lock_weigh,
1606 .clo_print = osc_lock_print,
1607 .clo_fits_into = osc_lock_fits_into,
1610 static int osc_lock_lockless_unuse(const struct lu_env *env,
1611 const struct cl_lock_slice *slice)
1613 struct osc_lock *ols = cl2osc_lock(slice);
1614 struct cl_lock *lock = slice->cls_lock;
1616 LASSERT(ols->ols_state == OLS_GRANTED);
1617 LINVRNT(osc_lock_invariant(ols));
1619 cl_lock_cancel(env, lock);
1620 cl_lock_delete(env, lock);
1624 static void osc_lock_lockless_cancel(const struct lu_env *env,
1625 const struct cl_lock_slice *slice)
1627 struct osc_lock *ols = cl2osc_lock(slice);
1630 result = osc_lock_flush(ols, 0);
1632 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1634 ols->ols_state = OLS_CANCELLED;
1637 static int osc_lock_lockless_wait(const struct lu_env *env,
1638 const struct cl_lock_slice *slice)
1640 struct osc_lock *olck = cl2osc_lock(slice);
1641 struct cl_lock *lock = olck->ols_cl.cls_lock;
1643 LINVRNT(osc_lock_invariant(olck));
1644 LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1646 return lock->cll_error;
1649 static void osc_lock_lockless_state(const struct lu_env *env,
1650 const struct cl_lock_slice *slice,
1651 enum cl_lock_state state)
1653 struct osc_lock *lock = cl2osc_lock(slice);
1655 LINVRNT(osc_lock_invariant(lock));
1656 if (state == CLS_HELD) {
1657 struct osc_io *oio = osc_env_io(env);
1659 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1660 lock->ols_owner = oio;
1662 /* set the io to be lockless if this lock is for io's
1664 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1665 oio->oi_lockless = 1;
1669 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1670 const struct cl_lock_slice *slice,
1671 const struct cl_lock_descr *need,
1672 const struct cl_io *io)
1674 struct osc_lock *lock = cl2osc_lock(slice);
1676 if (!(need->cld_enq_flags & CEF_NEVER))
1679 /* lockless lock should only be used by its owning io. b22147 */
1680 return (lock->ols_owner == osc_env_io(env));
1683 static const struct cl_lock_operations osc_lock_lockless_ops = {
1684 .clo_fini = osc_lock_fini,
1685 .clo_enqueue = osc_lock_enqueue,
1686 .clo_wait = osc_lock_lockless_wait,
1687 .clo_unuse = osc_lock_lockless_unuse,
1688 .clo_state = osc_lock_lockless_state,
1689 .clo_fits_into = osc_lock_lockless_fits_into,
1690 .clo_cancel = osc_lock_lockless_cancel,
1691 .clo_print = osc_lock_print
1694 int osc_lock_init(const struct lu_env *env,
1695 struct cl_object *obj, struct cl_lock *lock,
1696 const struct cl_io *unused)
1698 struct osc_lock *clk;
1701 OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1703 __u32 enqflags = lock->cll_descr.cld_enq_flags;
1705 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1706 cfs_atomic_set(&clk->ols_pageref, 0);
1707 clk->ols_state = OLS_NEW;
1709 clk->ols_flags = osc_enq2ldlm_flags(enqflags);
1710 clk->ols_agl = !!(enqflags & CEF_AGL);
1712 clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
1713 if (clk->ols_flags & LDLM_FL_HAS_INTENT)
1714 clk->ols_glimpse = 1;
1716 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1718 if (!(enqflags & CEF_MUST))
1719 /* try to convert this lock to a lockless lock */
1720 osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
1721 if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
1722 clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1724 LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
1725 lock, clk, clk->ols_flags);
1733 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1735 struct osc_lock *olock;
1738 spin_lock(&osc_ast_guard);
1739 olock = dlm->l_ast_data;
1741 * there's a very rare race with osc_page_addref_lock(), but that
1742 * doesn't matter because in the worst case we don't cancel a lock
1743 * which we actually can, that's no harm.
1745 if (olock != NULL &&
1746 cfs_atomic_add_return(_PAGEREF_MAGIC,
1747 &olock->ols_pageref) != _PAGEREF_MAGIC) {
1748 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1751 spin_unlock(&osc_ast_guard);