1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_lock for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_OSC
44 # include <libcfs/libcfs.h>
46 # include <liblustre.h>
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
51 #include "osc_cl_internal.h"
57 /*****************************************************************************
63 static const struct cl_lock_operations osc_lock_ops;
64 static const struct cl_lock_operations osc_lock_lockless_ops;
65 static void osc_lock_to_lockless(const struct lu_env *env,
66 struct osc_lock *ols, int force);
67 static int osc_lock_has_pages(struct osc_lock *olck);
69 int osc_lock_is_lockless(const struct osc_lock *olck)
71 return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
75 * Returns a weak pointer to the ldlm lock identified by a handle. Returned
76 * pointer cannot be dereferenced, as lock is not protected from concurrent
77 * reclaim. This function is a helper for osc_lock_invariant().
79 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
81 struct ldlm_lock *lock;
83 lock = ldlm_handle2lock(handle);
90 * Invariant that has to be true all of the time.
92 static int osc_lock_invariant(struct osc_lock *ols)
94 struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
95 struct ldlm_lock *olock = ols->ols_lock;
96 int handle_used = lustre_handle_is_used(&ols->ols_handle);
99 ergo(osc_lock_is_lockless(ols),
100 ols->ols_locklessable && ols->ols_lock == NULL) ||
101 (ergo(olock != NULL, handle_used) &&
103 olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
105 * Check that ->ols_handle and ->ols_lock are consistent, but
106 * take into account that they are set at the different time.
109 ergo(lock != NULL && olock != NULL, lock == olock) &&
110 ergo(lock == NULL, olock == NULL)) &&
111 ergo(ols->ols_state == OLS_CANCELLED,
112 olock == NULL && !handle_used) &&
114 * DLM lock is destroyed only after we have seen cancellation
117 ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
118 !olock->l_destroyed) &&
119 ergo(ols->ols_state == OLS_GRANTED,
121 olock->l_req_mode == olock->l_granted_mode &&
125 /*****************************************************************************
132 * Breaks a link between osc_lock and dlm_lock.
134 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
136 struct ldlm_lock *dlmlock;
138 /* reset the osc lock's state because it might be queued again. */
139 olck->ols_state = OLS_NEW;
140 spin_lock(&osc_ast_guard);
141 dlmlock = olck->ols_lock;
142 if (dlmlock == NULL) {
143 spin_unlock(&osc_ast_guard);
147 olck->ols_lock = NULL;
148 /* wb(); --- for all who checks (ols->ols_lock != NULL) before
149 * call to osc_lock_detach() */
150 dlmlock->l_ast_data = NULL;
151 olck->ols_handle.cookie = 0ULL;
152 spin_unlock(&osc_ast_guard);
154 lock_res_and_lock(dlmlock);
155 if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
156 struct cl_object *obj = olck->ols_cl.cls_obj;
157 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
158 __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
160 /* Update the kms. Need to loop all granted locks.
161 * Not a problem for the client */
162 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
163 unlock_res_and_lock(dlmlock);
165 cl_object_attr_lock(obj);
166 cl_object_attr_set(env, obj, attr, CAT_KMS);
167 cl_object_attr_unlock(obj);
169 unlock_res_and_lock(dlmlock);
171 /* release a reference taken in osc_lock_upcall0(). */
172 LASSERT(olck->ols_has_ref);
173 lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
174 LDLM_LOCK_RELEASE(dlmlock);
175 olck->ols_has_ref = 0;
178 static int osc_lock_unhold(struct osc_lock *ols)
184 result = osc_cancel_base(&ols->ols_handle,
185 ols->ols_einfo.ei_mode);
190 static int osc_lock_unuse(const struct lu_env *env,
191 const struct cl_lock_slice *slice)
193 struct osc_lock *ols = cl2osc_lock(slice);
195 LASSERT(ols->ols_state == OLS_GRANTED ||
196 ols->ols_state == OLS_UPCALL_RECEIVED);
197 LINVRNT(osc_lock_invariant(ols));
199 if (ols->ols_glimpse) {
200 LASSERT(ols->ols_hold == 0);
203 LASSERT(ols->ols_hold);
206 * Move lock into OLS_RELEASED state before calling osc_cancel_base()
207 * so that possible synchronous cancellation (that always happens
208 * e.g., for liblustre) sees that lock is released.
210 ols->ols_state = OLS_RELEASED;
211 return osc_lock_unhold(ols);
214 static void osc_lock_fini(const struct lu_env *env,
215 struct cl_lock_slice *slice)
217 struct osc_lock *ols = cl2osc_lock(slice);
219 LINVRNT(osc_lock_invariant(ols));
221 * ->ols_hold can still be true at this point if, for example, a
222 * thread that requested a lock was killed (and released a reference
223 * to the lock), before reply from a server was received. In this case
224 * lock is destroyed immediately after upcall.
226 osc_lock_unhold(ols);
227 LASSERT(ols->ols_lock == NULL);
229 OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
232 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
233 struct ldlm_res_id *resname)
235 const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
238 * In the perfect world of the future, where ost servers talk
241 fid_build_reg_res_name(fid, resname);
244 * In reality, where ost server expects ->lsm_object_id and
245 * ->lsm_object_gr in rename.
247 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
252 static void osc_lock_build_policy(const struct lu_env *env,
253 const struct cl_lock *lock,
254 ldlm_policy_data_t *policy)
256 const struct cl_lock_descr *d = &lock->cll_descr;
258 osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
259 policy->l_extent.gid = d->cld_gid;
262 static int osc_enq2ldlm_flags(__u32 enqflags)
266 LASSERT((enqflags & ~CEF_MASK) == 0);
268 if (enqflags & CEF_NONBLOCK)
269 result |= LDLM_FL_BLOCK_NOWAIT;
270 if (enqflags & CEF_ASYNC)
271 result |= LDLM_FL_HAS_INTENT;
272 if (enqflags & CEF_DISCARD_DATA)
273 result |= LDLM_AST_DISCARD_DATA;
278 * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
279 * pointers. Initialized in osc_init().
281 spinlock_t osc_ast_guard;
283 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
285 struct osc_lock *olck;
287 lock_res_and_lock(dlm_lock);
288 spin_lock(&osc_ast_guard);
289 olck = dlm_lock->l_ast_data;
291 struct cl_lock *lock = olck->ols_cl.cls_lock;
293 * If osc_lock holds a reference on ldlm lock, return it even
294 * when cl_lock is in CLS_FREEING state. This way
296 * osc_ast_data_get(dlmlock) == NULL
298 * guarantees that all osc references on dlmlock were
299 * released. osc_dlm_blocking_ast0() relies on that.
301 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
302 cl_lock_get_trust(lock);
303 lu_ref_add_atomic(&lock->cll_reference,
304 "ast", cfs_current());
308 spin_unlock(&osc_ast_guard);
309 unlock_res_and_lock(dlm_lock);
313 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
315 struct cl_lock *lock;
317 lock = olck->ols_cl.cls_lock;
318 lu_ref_del(&lock->cll_reference, "ast", cfs_current());
319 cl_lock_put(env, lock);
323 * Updates object attributes from a lock value block (lvb) received together
324 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
327 * This can be optimized to not update attributes when lock is a result of a
330 * Called under lock and resource spin-locks.
332 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
336 struct cl_object *obj;
337 struct lov_oinfo *oinfo;
338 struct cl_attr *attr;
343 if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
348 lvb = &olck->ols_lvb;
349 obj = olck->ols_cl.cls_obj;
350 oinfo = cl2osc(obj)->oo_oinfo;
351 attr = &osc_env_info(env)->oti_attr;
352 valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
353 cl_lvb2attr(attr, lvb);
355 cl_object_attr_lock(obj);
357 struct ldlm_lock *dlmlock;
360 dlmlock = olck->ols_lock;
361 LASSERT(dlmlock != NULL);
363 /* re-grab LVB from a dlm lock under DLM spin-locks. */
364 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
365 size = lvb->lvb_size;
366 /* Extend KMS up to the end of this lock and no further
367 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
368 if (size > dlmlock->l_policy_data.l_extent.end)
369 size = dlmlock->l_policy_data.l_extent.end + 1;
370 if (size >= oinfo->loi_kms) {
371 LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
372 ", kms="LPU64, lvb->lvb_size, size);
374 attr->cat_kms = size;
376 LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
377 LPU64"; leaving kms="LPU64", end="LPU64,
378 lvb->lvb_size, oinfo->loi_kms,
379 dlmlock->l_policy_data.l_extent.end);
381 ldlm_lock_allow_match_locked(dlmlock);
382 } else if (rc == -ENAVAIL && olck->ols_glimpse) {
383 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
384 " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
389 cl_object_attr_set(env, obj, attr, valid);
391 cl_object_attr_unlock(obj);
397 * Called when a lock is granted, from an upcall (when server returned a
398 * granted lock), or from completion AST, when server returned a blocked lock.
400 * Called under lock and resource spin-locks, that are released temporarily
403 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
404 struct ldlm_lock *dlmlock, int rc)
406 struct ldlm_extent *ext;
407 struct cl_lock *lock;
408 struct cl_lock_descr *descr;
410 LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
413 if (olck->ols_state != OLS_GRANTED) {
414 lock = olck->ols_cl.cls_lock;
415 ext = &dlmlock->l_policy_data.l_extent;
416 descr = &osc_env_info(env)->oti_descr;
417 descr->cld_obj = lock->cll_descr.cld_obj;
419 /* XXX check that ->l_granted_mode is valid. */
420 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
421 descr->cld_start = cl_index(descr->cld_obj, ext->start);
422 descr->cld_end = cl_index(descr->cld_obj, ext->end);
423 descr->cld_gid = ext->gid;
425 * tell upper layers the extent of the lock that was actually
428 olck->ols_state = OLS_GRANTED;
429 osc_lock_lvb_update(env, olck, rc);
431 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
432 * to take a semaphore on a parent lock. This is safe, because
433 * spin-locks are needed to protect consistency of
434 * dlmlock->l_*_mode and LVB, and we have finished processing
436 unlock_res_and_lock(dlmlock);
437 cl_lock_modify(env, lock, descr);
438 cl_lock_signal(env, lock);
439 LINVRNT(osc_lock_invariant(olck));
440 lock_res_and_lock(dlmlock);
445 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
448 struct ldlm_lock *dlmlock;
452 dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
453 LASSERT(dlmlock != NULL);
455 lock_res_and_lock(dlmlock);
456 spin_lock(&osc_ast_guard);
457 LASSERT(dlmlock->l_ast_data == olck);
458 LASSERT(olck->ols_lock == NULL);
459 olck->ols_lock = dlmlock;
460 spin_unlock(&osc_ast_guard);
463 * Lock might be not yet granted. In this case, completion ast
464 * (osc_ldlm_completion_ast()) comes later and finishes lock
467 if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
468 osc_lock_granted(env, olck, dlmlock, 0);
469 unlock_res_and_lock(dlmlock);
472 * osc_enqueue_interpret() decrefs asynchronous locks, counter
475 ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
478 /* lock reference taken by ldlm_handle2lock_long() is owned by
479 * osc_lock and released in osc_lock_detach() */
480 lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
481 olck->ols_has_ref = 1;
485 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
486 * received from a server, or after osc_enqueue_base() matched a local DLM
489 static int osc_lock_upcall(void *cookie, int errcode)
491 struct osc_lock *olck = cookie;
492 struct cl_lock_slice *slice = &olck->ols_cl;
493 struct cl_lock *lock = slice->cls_lock;
495 struct cl_env_nest nest;
498 env = cl_env_nested_get(&nest);
502 cl_lock_mutex_get(env, lock);
504 LASSERT(lock->cll_state >= CLS_QUEUING);
505 if (olck->ols_state == OLS_ENQUEUED) {
506 olck->ols_state = OLS_UPCALL_RECEIVED;
507 rc = ldlm_error2errno(errcode);
508 } else if (olck->ols_state == OLS_CANCELLED) {
511 CERROR("Impossible state: %i\n", olck->ols_state);
515 struct ldlm_lock *dlmlock;
517 dlmlock = ldlm_handle2lock(&olck->ols_handle);
518 if (dlmlock != NULL) {
519 lock_res_and_lock(dlmlock);
520 spin_lock(&osc_ast_guard);
521 LASSERT(olck->ols_lock == NULL);
522 dlmlock->l_ast_data = NULL;
523 olck->ols_handle.cookie = 0ULL;
524 spin_unlock(&osc_ast_guard);
525 unlock_res_and_lock(dlmlock);
526 LDLM_LOCK_PUT(dlmlock);
529 if (olck->ols_glimpse)
530 olck->ols_glimpse = 0;
531 osc_lock_upcall0(env, olck);
534 /* Error handling, some errors are tolerable. */
535 if (olck->ols_locklessable && rc == -EUSERS) {
536 /* This is a tolerable error, turn this lock into
539 osc_object_set_contended(cl2osc(slice->cls_obj));
540 LASSERT(slice->cls_ops == &osc_lock_ops);
542 /* Change this lock to ldlmlock-less lock. */
543 osc_lock_to_lockless(env, olck, 1);
544 olck->ols_state = OLS_GRANTED;
546 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
547 osc_lock_lvb_update(env, olck, rc);
548 cl_lock_delete(env, lock);
549 /* Hide the error. */
554 /* on error, lock was signaled by cl_lock_error() */
555 cl_lock_signal(env, lock);
557 cl_lock_error(env, lock, rc);
559 cl_lock_mutex_put(env, lock);
561 /* release cookie reference, acquired by osc_lock_enqueue() */
562 lu_ref_del(&lock->cll_reference, "upcall", lock);
563 cl_lock_put(env, lock);
564 cl_env_nested_put(&nest, env);
566 /* should never happen, similar to osc_ldlm_blocking_ast(). */
572 * Core of osc_dlm_blocking_ast() logic.
574 static void osc_lock_blocking(const struct lu_env *env,
575 struct ldlm_lock *dlmlock,
576 struct osc_lock *olck, int blocking)
578 struct cl_lock *lock = olck->ols_cl.cls_lock;
580 LASSERT(olck->ols_lock == dlmlock);
581 CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
582 LASSERT(!osc_lock_is_lockless(olck));
585 * Lock might be still addref-ed here, if e.g., blocking ast
586 * is sent for a failed lock.
588 osc_lock_unhold(olck);
590 if (blocking && olck->ols_state < OLS_BLOCKED)
592 * Move osc_lock into OLS_BLOCKED before canceling the lock,
593 * because it recursively re-enters osc_lock_blocking(), with
594 * the state set to OLS_CANCELLED.
596 olck->ols_state = OLS_BLOCKED;
598 * cancel and destroy lock at least once no matter how blocking ast is
599 * entered (see comment above osc_ldlm_blocking_ast() for use
600 * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
602 cl_lock_cancel(env, lock);
603 cl_lock_delete(env, lock);
607 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
608 * and ldlm_lock caches.
610 static int osc_dlm_blocking_ast0(const struct lu_env *env,
611 struct ldlm_lock *dlmlock,
612 void *data, int flag)
614 struct osc_lock *olck;
615 struct cl_lock *lock;
619 LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
622 olck = osc_ast_data_get(dlmlock);
624 lock = olck->ols_cl.cls_lock;
625 cl_lock_mutex_get(env, lock);
626 LINVRNT(osc_lock_invariant(olck));
627 if (olck->ols_ast_wait) {
628 /* wake up osc_lock_use() */
629 cl_lock_signal(env, lock);
630 olck->ols_ast_wait = 0;
633 * Lock might have been canceled while this thread was
634 * sleeping for lock mutex, but olck is pinned in memory.
636 if (olck == dlmlock->l_ast_data) {
638 * NOTE: DLM sends blocking AST's for failed locks
639 * (that are still in pre-OLS_GRANTED state)
640 * too, and they have to be canceled otherwise
641 * DLM lock is never destroyed and stuck in
644 * Alternatively, ldlm_cli_cancel() can be
645 * called here directly for osc_locks with
646 * ols_state < OLS_GRANTED to maintain an
647 * invariant that ->clo_cancel() is only called
648 * for locks that were granted.
650 LASSERT(data == olck);
651 osc_lock_blocking(env, dlmlock,
652 olck, flag == LDLM_CB_BLOCKING);
655 cl_lock_mutex_put(env, lock);
656 osc_ast_data_put(env, olck);
659 * DLM lock exists, but there is no cl_lock attached to it.
660 * This is a `normal' race. cl_object and its cl_lock's can be
661 * removed by memory pressure, together with all pages.
663 cancel = (flag == LDLM_CB_BLOCKING);
666 struct lustre_handle *lockh;
668 lockh = &osc_env_info(env)->oti_handle;
669 ldlm_lock2handle(dlmlock, lockh);
670 result = ldlm_cli_cancel(lockh);
677 * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
678 * some other lock, or is canceled. This function is installed as a
679 * ldlm_lock::l_blocking_ast() for client extent locks.
681 * Control flow is tricky, because ldlm uses the same call-back
682 * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
684 * \param dlmlock lock for which ast occurred.
686 * \param new description of a conflicting lock in case of blocking ast.
688 * \param data value of dlmlock->l_ast_data
690 * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
691 * cancellation and blocking ast's.
693 * Possible use cases:
695 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
696 * lock due to lock lru pressure, or explicit user request to purge
699 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
700 * us that dlmlock conflicts with another lock that some client is
701 * enqueing. Lock is canceled.
703 * - cl_lock_cancel() is called. osc_lock_cancel() calls
704 * ldlm_cli_cancel() that calls
706 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
708 * recursively entering osc_ldlm_blocking_ast().
710 * - client cancels lock voluntary (e.g., as a part of early cancellation):
713 * osc_lock_cancel()->
714 * ldlm_cli_cancel()->
715 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
718 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
719 struct ldlm_lock_desc *new, void *data,
723 struct cl_env_nest nest;
727 * This can be called in the context of outer IO, e.g.,
730 * ->osc_enqueue_base()->...
731 * ->ldlm_prep_elc_req()->...
732 * ->ldlm_cancel_callback()->...
733 * ->osc_ldlm_blocking_ast()
735 * new environment has to be created to not corrupt outer context.
737 env = cl_env_nested_get(&nest);
739 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
740 cl_env_nested_put(&nest, env);
742 result = PTR_ERR(env);
744 * XXX This should never happen, as cl_lock is
745 * stuck. Pre-allocated environment a la vvp_inode_fini_env
751 if (result == -ENODATA)
754 CERROR("BAST failed: %d\n", result);
759 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
760 int flags, void *data)
762 struct cl_env_nest nest;
764 struct osc_lock *olck;
765 struct cl_lock *lock;
769 /* first, do dlm part of the work */
770 dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
771 /* then, notify cl_lock */
772 env = cl_env_nested_get(&nest);
774 olck = osc_ast_data_get(dlmlock);
776 lock = olck->ols_cl.cls_lock;
777 cl_lock_mutex_get(env, lock);
779 * ldlm_handle_cp_callback() copied LVB from request
780 * to lock->l_lvb_data, store it in osc_lock.
782 LASSERT(dlmlock->l_lvb_data != NULL);
783 lock_res_and_lock(dlmlock);
784 olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
785 if (olck->ols_lock == NULL) {
787 * upcall (osc_lock_upcall()) hasn't yet been
788 * called. Do nothing now, upcall will bind
789 * olck to dlmlock and signal the waiters.
791 * This maintains an invariant that osc_lock
792 * and ldlm_lock are always bound when
793 * osc_lock is in OLS_GRANTED state.
795 } else if (dlmlock->l_granted_mode ==
796 dlmlock->l_req_mode) {
797 osc_lock_granted(env, olck, dlmlock, dlmrc);
799 unlock_res_and_lock(dlmlock);
802 CL_LOCK_DEBUG(D_ERROR, env, lock,
803 "dlmlock returned %d\n", dlmrc);
804 cl_lock_error(env, lock, dlmrc);
806 cl_lock_mutex_put(env, lock);
807 osc_ast_data_put(env, olck);
810 result = -ELDLM_NO_LOCK_DATA;
811 cl_env_nested_put(&nest, env);
813 result = PTR_ERR(env);
814 return dlmrc ?: result;
817 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
819 struct ptlrpc_request *req = data;
820 struct osc_lock *olck;
821 struct cl_lock *lock;
822 struct cl_object *obj;
823 struct cl_env_nest nest;
826 struct req_capsule *cap;
829 LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
831 env = cl_env_nested_get(&nest);
834 * osc_ast_data_get() has to go after environment is
835 * allocated, because osc_ast_data() acquires a
836 * reference to a lock, and it can only be released in
839 olck = osc_ast_data_get(dlmlock);
841 lock = olck->ols_cl.cls_lock;
842 cl_lock_mutex_get(env, lock);
844 req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
845 req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
847 result = req_capsule_server_pack(cap);
849 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
850 obj = lock->cll_descr.cld_obj;
851 result = cl_object_glimpse(env, obj, lvb);
853 cl_lock_mutex_put(env, lock);
854 osc_ast_data_put(env, olck);
857 * These errors are normal races, so we don't want to
858 * fill the console with messages by calling
861 lustre_pack_reply(req, 1, NULL, NULL);
862 result = -ELDLM_NO_LOCK_DATA;
864 cl_env_nested_put(&nest, env);
866 result = PTR_ERR(env);
867 req->rq_status = result;
871 static unsigned long osc_lock_weigh(const struct lu_env *env,
872 const struct cl_lock_slice *slice)
875 * don't need to grab coh_page_guard since we don't care the exact #
878 return cl_object_header(slice->cls_obj)->coh_pages;
882 * Get the weight of dlm lock for early cancellation.
884 * XXX: it should return the pages covered by this \a dlmlock.
886 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
888 struct cl_env_nest nest;
890 struct osc_lock *lock;
892 unsigned long weight;
897 * osc_ldlm_weigh_ast has a complex context since it might be called
898 * because of lock canceling, or from user's input. We have to make
899 * a new environment for it. Probably it is implementation safe to use
900 * the upper context because cl_lock_put don't modify environment
901 * variables. But in case of ..
903 env = cl_env_nested_get(&nest);
905 /* Mostly because lack of memory, tend to eliminate this lock*/
908 LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
909 lock = osc_ast_data_get(dlmlock);
911 /* cl_lock was destroyed because of memory pressure.
912 * It is much reasonable to assign this type of lock
915 GOTO(out, weight = 0);
918 cll = lock->ols_cl.cls_lock;
919 cl_lock_mutex_get(env, cll);
920 weight = cl_lock_weigh(env, cll);
921 cl_lock_mutex_put(env, cll);
922 osc_ast_data_put(env, lock);
926 cl_env_nested_put(&nest, env);
930 static void osc_lock_build_einfo(const struct lu_env *env,
931 const struct cl_lock *clock,
932 struct osc_lock *lock,
933 struct ldlm_enqueue_info *einfo)
935 enum cl_lock_mode mode;
937 mode = clock->cll_descr.cld_mode;
938 if (mode == CLM_PHANTOM)
940 * For now, enqueue all glimpse locks in read mode. In the
941 * future, client might choose to enqueue LCK_PW lock for
942 * glimpse on a file opened for write.
946 einfo->ei_type = LDLM_EXTENT;
947 einfo->ei_mode = osc_cl_lock2ldlm(mode);
948 einfo->ei_cb_bl = osc_ldlm_blocking_ast;
949 einfo->ei_cb_cp = osc_ldlm_completion_ast;
950 einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
951 einfo->ei_cb_wg = osc_ldlm_weigh_ast;
952 einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
955 static int osc_lock_delete0(struct cl_lock *conflict)
957 struct cl_env_nest nest;
961 env = cl_env_nested_get(&nest);
963 cl_lock_delete(env, conflict);
964 cl_env_nested_put(&nest, env);
970 * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
971 * is called as a part of enqueuing to cancel conflicting locks early.
973 * \retval 0: success, \a conflict was cancelled and destroyed.
975 * \retval CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
976 * released in the process. Repeat enqueing.
978 * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
979 * either \a lock is non-blocking, or current thread
980 * holds other locks, that prevent it from waiting
981 * for cancel to complete.
983 * \retval -ve: other error, including -EINTR.
986 static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
987 struct cl_lock *conflict, int canwait)
991 LASSERT(cl_lock_is_mutexed(lock));
992 LASSERT(cl_lock_is_mutexed(conflict));
995 if (conflict->cll_state != CLS_FREEING) {
996 cl_lock_cancel(env, conflict);
997 rc = osc_lock_delete0(conflict);
1000 if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
1002 if (cl_lock_nr_mutexed(env) > 2)
1004 * If mutices of locks other than @lock and
1005 * @scan are held by the current thread, it
1006 * cannot wait on @scan state change in a
1007 * dead-lock safe matter, so simply skip early
1008 * cancellation in this case.
1010 * This means that early cancellation doesn't
1011 * work when there is even slight mutex
1012 * contention, as top-lock's mutex is usually
1013 * held at this time.
1017 /* Waiting for @scan to be destroyed */
1018 cl_lock_mutex_put(env, lock);
1020 rc = cl_lock_state_wait(env, conflict);
1022 conflict->cll_state < CLS_FREEING);
1023 /* mutex was released, repeat enqueue. */
1024 rc = rc ?: CLO_REPEAT;
1025 cl_lock_mutex_get(env, lock);
1028 LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
1029 CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
1030 conflict, rc ? "not":"", rc);
1036 * Determine if the lock should be converted into a lockless lock.
1039 * - if the lock has an explicite requirment for a non-lockless lock;
1040 * - if the io lock request type ci_lockreq;
1041 * - send the enqueue rpc to ost to make the further decision;
1042 * - special treat to truncate lockless lock
1044 * Additional policy can be implemented here, e.g., never do lockless-io
1045 * for large extents.
1047 static void osc_lock_to_lockless(const struct lu_env *env,
1048 struct osc_lock *ols, int force)
1050 struct cl_lock_slice *slice = &ols->ols_cl;
1051 struct cl_lock *lock = slice->cls_lock;
1053 LASSERT(ols->ols_state == OLS_NEW ||
1054 ols->ols_state == OLS_UPCALL_RECEIVED);
1057 ols->ols_locklessable = 1;
1058 LASSERT(cl_lock_is_mutexed(lock));
1059 slice->cls_ops = &osc_lock_lockless_ops;
1061 struct osc_io *oio = osc_env_io(env);
1062 struct cl_io *io = oio->oi_cl.cis_io;
1063 struct cl_object *obj = slice->cls_obj;
1064 struct osc_object *oob = cl2osc(obj);
1065 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1066 struct obd_connect_data *ocd;
1068 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1069 io->ci_lockreq == CILR_MAYBE ||
1070 io->ci_lockreq == CILR_NEVER);
1072 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1073 ols->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
1074 (io->ci_lockreq == CILR_MAYBE) &&
1075 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1076 if (io->ci_lockreq == CILR_NEVER ||
1078 (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1079 /* lockless truncate */
1080 (io->ci_type == CIT_TRUNC &&
1081 (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1082 osd->od_lockless_truncate)) {
1083 ols->ols_locklessable = 1;
1084 slice->cls_ops = &osc_lock_lockless_ops;
1087 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1091 * Cancel all conflicting locks and wait for them to be destroyed.
1093 * This function is used for two purposes:
1095 * - early cancel all conflicting locks before starting IO, and
1097 * - guarantee that pages added to the page cache by lockless IO are never
1098 * covered by locks other than lockless IO lock, and, hence, are not
1099 * visible to other threads.
1101 static int osc_lock_enqueue_wait(const struct lu_env *env,
1102 const struct osc_lock *olck)
1104 struct cl_lock *lock = olck->ols_cl.cls_lock;
1105 struct cl_lock_descr *descr = &lock->cll_descr;
1106 struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
1107 struct cl_lock_closure *closure = &osc_env_info(env)->oti_closure;
1108 struct cl_lock *scan;
1109 struct cl_lock *temp;
1110 int lockless = osc_lock_is_lockless(olck);
1116 LASSERT(cl_lock_is_mutexed(lock));
1117 LASSERT(lock->cll_state == CLS_QUEUING);
1120 * XXX This function could be sped up if we had asynchronous
1125 !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
1126 cl_lock_nr_mutexed(env) == 1;
1127 cl_lock_closure_init(env, closure, lock, canwait);
1128 spin_lock(&hdr->coh_lock_guard);
1129 list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
1133 if (scan->cll_state < CLS_QUEUING ||
1134 scan->cll_state == CLS_FREEING ||
1135 scan->cll_descr.cld_start > descr->cld_end ||
1136 scan->cll_descr.cld_end < descr->cld_start)
1139 /* overlapped and living locks. */
1141 /* We're not supposed to give up group lock. */
1142 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1143 LASSERT(descr->cld_mode != CLM_GROUP ||
1144 descr->cld_gid != scan->cll_descr.cld_gid);
1148 /* A tricky case for lockless pages:
1149 * We need to cancel the compatible locks if we're enqueuing
1150 * a lockless lock, for example:
1151 * imagine that client has PR lock on [0, 1000], and thread T0
1152 * is doing lockless IO in [500, 1500] region. Concurrent
1153 * thread T1 can see lockless data in [500, 1000], which is
1154 * wrong, because these data are possibly stale.
1156 if (!lockless && cl_lock_compatible(scan, lock))
1159 /* Now @scan is conflicting with @lock, this means current
1160 * thread have to sleep for @scan being destroyed. */
1161 cl_lock_get_trust(scan);
1162 if (&temp->cll_linkage != &hdr->coh_locks)
1163 cl_lock_get_trust(temp);
1164 spin_unlock(&hdr->coh_lock_guard);
1165 lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
1167 LASSERT(list_empty(&closure->clc_list));
1168 rc = cl_lock_closure_build(env, scan, closure);
1170 rc = osc_lock_cancel_wait(env, lock, scan, canwait);
1171 cl_lock_disclosure(env, closure);
1172 if (rc == -EWOULDBLOCK)
1175 if (rc == CLO_REPEAT && !canwait)
1176 /* cannot wait... no early cancellation. */
1179 lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
1180 cl_lock_put(env, scan);
1181 spin_lock(&hdr->coh_lock_guard);
1183 * Lock list could have been modified, while spin-lock was
1184 * released. Check that it is safe to continue.
1186 stop = list_empty(&temp->cll_linkage);
1187 if (&temp->cll_linkage != &hdr->coh_locks)
1188 cl_lock_put(env, temp);
1189 if (stop || rc != 0)
1192 spin_unlock(&hdr->coh_lock_guard);
1193 cl_lock_closure_fini(closure);
1198 * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
1200 * - Thread0: obtains PR:[0, 10]. Lock is busy.
1202 * - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
1203 * PR:[0, 10], but cancellation of busy lock is postponed.
1205 * - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
1206 * PW:[5, 50], and thread0 waits for the lock completion never
1207 * releasing PR:[0, 10]---deadlock.
1209 * The second PR lock can be glimpse (it is to deal with that situation that
1210 * ll_glimpse_size() has second argument, preventing local match of
1211 * not-yet-granted locks, see bug 10295). Similar situation is possible in the
1212 * case of memory mapped user level buffer.
1214 * To prevent this we can detect a situation when current "thread" or "io"
1215 * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
1216 * the ols->ols_flags, or prevent local match with PW locks.
1218 static int osc_deadlock_is_possible(const struct lu_env *env,
1219 struct cl_lock *lock)
1221 struct cl_object *obj;
1222 struct cl_object_header *head;
1223 struct cl_lock *scan;
1230 LASSERT(cl_lock_is_mutexed(lock));
1232 oio = osc_env_io(env);
1233 obj = lock->cll_descr.cld_obj;
1234 head = cl_object_header(obj);
1237 spin_lock(&head->coh_lock_guard);
1238 list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1240 struct osc_lock *oscan;
1242 oscan = osc_lock_at(scan);
1243 LASSERT(oscan != NULL);
1244 if (oscan->ols_owner == oio) {
1250 spin_unlock(&head->coh_lock_guard);
1255 * Implementation of cl_lock_operations::clo_enqueue() method for osc
1256 * layer. This initiates ldlm enqueue:
1258 * - checks for possible dead-lock conditions (osc_deadlock_is_possible());
1260 * - cancels conflicting locks early (osc_lock_enqueue_wait());
1262 * - calls osc_enqueue_base() to do actual enqueue.
1264 * osc_enqueue_base() is supplied with an upcall function that is executed
1265 * when lock is received either after a local cached ldlm lock is matched, or
1266 * when a reply from the server is received.
1268 * This function does not wait for the network communication to complete.
1270 static int osc_lock_enqueue(const struct lu_env *env,
1271 const struct cl_lock_slice *slice,
1272 struct cl_io *unused, __u32 enqflags)
1274 struct osc_lock *ols = cl2osc_lock(slice);
1275 struct cl_lock *lock = ols->ols_cl.cls_lock;
1276 struct osc_object *obj = cl2osc(slice->cls_obj);
1277 struct osc_thread_info *info = osc_env_info(env);
1278 struct ldlm_res_id *resname = &info->oti_resname;
1279 ldlm_policy_data_t *policy = &info->oti_policy;
1280 struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1284 LASSERT(cl_lock_is_mutexed(lock));
1285 LASSERT(lock->cll_state == CLS_QUEUING);
1286 LASSERT(ols->ols_state == OLS_NEW);
1288 osc_lock_build_res(env, obj, resname);
1289 osc_lock_build_policy(env, lock, policy);
1290 ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1291 if (osc_deadlock_is_possible(env, lock))
1292 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1293 if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1294 ols->ols_glimpse = 1;
1296 result = osc_lock_enqueue_wait(env, ols);
1298 if (!(enqflags & CEF_MUST))
1299 /* try to convert this lock to a lockless lock */
1300 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1301 if (!osc_lock_is_lockless(ols)) {
1302 if (ols->ols_locklessable)
1303 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1305 /* a reference for lock, passed as an upcall cookie */
1307 lu_ref_add(&lock->cll_reference, "upcall", lock);
1308 ols->ols_state = OLS_ENQUEUED;
1311 * XXX: this is possible blocking point as
1312 * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1315 result = osc_enqueue_base(osc_export(obj), resname,
1316 &ols->ols_flags, policy,
1318 obj->oo_oinfo->loi_kms_valid,
1320 ols, einfo, &ols->ols_handle,
1323 lu_ref_del(&lock->cll_reference,
1325 cl_lock_put(env, lock);
1328 ols->ols_state = OLS_GRANTED;
1331 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1335 static int osc_lock_wait(const struct lu_env *env,
1336 const struct cl_lock_slice *slice)
1338 struct osc_lock *olck = cl2osc_lock(slice);
1339 struct cl_lock *lock = olck->ols_cl.cls_lock;
1341 LINVRNT(osc_lock_invariant(olck));
1342 if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1345 LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1346 lock->cll_error == 0, olck->ols_lock != NULL));
1348 return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1352 * An implementation of cl_lock_operations::clo_use() method that pins cached
1355 static int osc_lock_use(const struct lu_env *env,
1356 const struct cl_lock_slice *slice)
1358 struct osc_lock *olck = cl2osc_lock(slice);
1361 LASSERT(!olck->ols_hold);
1364 * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1365 * flag is not set. This protects us from a concurrent blocking ast.
1367 rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1370 olck->ols_state = OLS_GRANTED;
1372 struct cl_lock *lock;
1375 * Lock is being cancelled somewhere within
1376 * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1377 * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1380 lock = slice->cls_lock;
1381 LASSERT(lock->cll_state == CLS_INTRANSIT);
1382 LASSERT(lock->cll_users > 0);
1383 /* set a flag for osc_dlm_blocking_ast0() to signal the
1385 olck->ols_ast_wait = 1;
1391 static int osc_lock_flush(struct osc_lock *ols, int discard)
1393 struct cl_lock *lock = ols->ols_cl.cls_lock;
1394 struct cl_env_nest nest;
1398 env = cl_env_nested_get(&nest);
1400 result = cl_lock_page_out(env, lock, discard);
1401 cl_env_nested_put(&nest, env);
1403 result = PTR_ERR(env);
1406 LINVRNT(!osc_lock_has_pages(ols));
1412 * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1413 * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1414 * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1415 * with some other lock some where in the cluster. This function does the
1418 * - invalidates all pages protected by this lock (after sending dirty
1419 * ones to the server, as necessary);
1421 * - decref's underlying ldlm lock;
1423 * - cancels ldlm lock (ldlm_cli_cancel()).
1425 static void osc_lock_cancel(const struct lu_env *env,
1426 const struct cl_lock_slice *slice)
1428 struct cl_lock *lock = slice->cls_lock;
1429 struct osc_lock *olck = cl2osc_lock(slice);
1430 struct ldlm_lock *dlmlock = olck->ols_lock;
1434 LASSERT(cl_lock_is_mutexed(lock));
1435 LINVRNT(osc_lock_invariant(olck));
1437 if (dlmlock != NULL) {
1440 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1441 result = osc_lock_flush(olck, discard);
1442 osc_lock_unhold(olck);
1444 lock_res_and_lock(dlmlock);
1445 /* Now that we're the only user of dlm read/write reference,
1446 * mostly the ->l_readers + ->l_writers should be zero.
1447 * However, there is a corner case.
1448 * See bug 18829 for details.*/
1449 do_cancel = (dlmlock->l_readers == 0 &&
1450 dlmlock->l_writers == 0);
1451 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1452 unlock_res_and_lock(dlmlock);
1454 result = ldlm_cli_cancel(&olck->ols_handle);
1456 CL_LOCK_DEBUG(D_ERROR, env, lock,
1457 "lock %p cancel failure with error(%d)\n",
1460 olck->ols_state = OLS_CANCELLED;
1461 osc_lock_detach(env, olck);
1464 void cl_lock_page_list_fixup(const struct lu_env *env,
1465 struct cl_io *io, struct cl_lock *lock,
1466 struct cl_page_list *queue);
1468 #ifdef INVARIANT_CHECK
1470 * Returns true iff there are pages under \a olck not protected by other
1473 static int osc_lock_has_pages(struct osc_lock *olck)
1475 struct cl_lock *lock;
1476 struct cl_lock_descr *descr;
1477 struct cl_object *obj;
1478 struct osc_object *oob;
1479 struct cl_page_list *plist;
1480 struct cl_page *page;
1481 struct cl_env_nest nest;
1486 env = cl_env_nested_get(&nest);
1488 obj = olck->ols_cl.cls_obj;
1490 io = &oob->oo_debug_io;
1491 lock = olck->ols_cl.cls_lock;
1492 descr = &lock->cll_descr;
1493 plist = &osc_env_info(env)->oti_plist;
1494 cl_page_list_init(plist);
1496 mutex_lock(&oob->oo_debug_mutex);
1498 io->ci_obj = cl_object_top(obj);
1499 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1500 cl_page_gang_lookup(env, obj, io,
1501 descr->cld_start, descr->cld_end, plist, 0);
1502 cl_lock_page_list_fixup(env, io, lock, plist);
1503 if (plist->pl_nr > 0) {
1504 CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1505 cl_page_list_for_each(page, plist)
1506 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1508 result = plist->pl_nr > 0;
1509 cl_page_list_disown(env, io, plist);
1510 cl_page_list_fini(env, plist);
1511 cl_io_fini(env, io);
1512 mutex_unlock(&oob->oo_debug_mutex);
1513 cl_env_nested_put(&nest, env);
1519 static int osc_lock_has_pages(struct osc_lock *olck)
1523 #endif /* INVARIANT_CHECK */
1525 static void osc_lock_delete(const struct lu_env *env,
1526 const struct cl_lock_slice *slice)
1528 struct osc_lock *olck;
1530 olck = cl2osc_lock(slice);
1531 if (olck->ols_glimpse) {
1532 LASSERT(!olck->ols_hold);
1533 LASSERT(!olck->ols_lock);
1537 LINVRNT(osc_lock_invariant(olck));
1538 LINVRNT(!osc_lock_has_pages(olck));
1540 osc_lock_unhold(olck);
1541 osc_lock_detach(env, olck);
1545 * Implements cl_lock_operations::clo_state() method for osc layer.
1547 * Maintains osc_lock::ols_owner field.
1549 * This assumes that lock always enters CLS_HELD (from some other state) in
1550 * the same IO context as one that requested the lock. This should not be a
1551 * problem, because context is by definition shared by all activity pertaining
1552 * to the same high-level IO.
1554 static void osc_lock_state(const struct lu_env *env,
1555 const struct cl_lock_slice *slice,
1556 enum cl_lock_state state)
1558 struct osc_lock *lock = cl2osc_lock(slice);
1559 struct osc_io *oio = osc_env_io(env);
1562 * XXX multiple io contexts can use the lock at the same time.
1564 LINVRNT(osc_lock_invariant(lock));
1565 if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1566 LASSERT(lock->ols_owner == NULL);
1567 lock->ols_owner = oio;
1568 } else if (state != CLS_HELD)
1569 lock->ols_owner = NULL;
1572 static int osc_lock_print(const struct lu_env *env, void *cookie,
1573 lu_printer_t p, const struct cl_lock_slice *slice)
1575 struct osc_lock *lock = cl2osc_lock(slice);
1578 * XXX print ldlm lock and einfo properly.
1580 (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
1581 lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1582 lock->ols_state, lock->ols_owner);
1583 osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1587 static int osc_lock_fits_into(const struct lu_env *env,
1588 const struct cl_lock_slice *slice,
1589 const struct cl_lock_descr *need,
1590 const struct cl_io *io)
1592 struct osc_lock *ols = cl2osc_lock(slice);
1594 if (need->cld_enq_flags & CEF_NEVER)
1597 if (need->cld_mode == CLM_PHANTOM) {
1599 * Note: the QUEUED lock can't be matched here, otherwise
1600 * it might cause the deadlocks.
1602 * P1: enqueued read lock, create sublock1
1603 * P2: enqueued write lock, create sublock2(conflicted
1605 * P1: Grant read lock.
1606 * P1: enqueued glimpse lock(with holding sublock1_read),
1607 * matched with sublock2, waiting sublock2 to be granted.
1608 * But sublock2 can not be granted, because P1
1609 * will not release sublock1. Bang!
1611 if (ols->ols_state < OLS_GRANTED ||
1612 ols->ols_state > OLS_RELEASED)
1614 } else if (need->cld_enq_flags & CEF_MUST) {
1616 * If the lock hasn't ever enqueued, it can't be matched
1617 * because enqueue process brings in many information
1618 * which can be used to determine things such as lockless,
1621 if (ols->ols_state < OLS_GRANTED ||
1622 ols->ols_state > OLS_RELEASED)
1624 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1625 ols->ols_locklessable)
1631 static const struct cl_lock_operations osc_lock_ops = {
1632 .clo_fini = osc_lock_fini,
1633 .clo_enqueue = osc_lock_enqueue,
1634 .clo_wait = osc_lock_wait,
1635 .clo_unuse = osc_lock_unuse,
1636 .clo_use = osc_lock_use,
1637 .clo_delete = osc_lock_delete,
1638 .clo_state = osc_lock_state,
1639 .clo_cancel = osc_lock_cancel,
1640 .clo_weigh = osc_lock_weigh,
1641 .clo_print = osc_lock_print,
1642 .clo_fits_into = osc_lock_fits_into,
1645 static int osc_lock_lockless_enqueue(const struct lu_env *env,
1646 const struct cl_lock_slice *slice,
1647 struct cl_io *unused, __u32 enqflags)
1653 static int osc_lock_lockless_unuse(const struct lu_env *env,
1654 const struct cl_lock_slice *slice)
1656 struct osc_lock *ols = cl2osc_lock(slice);
1657 struct cl_lock *lock = slice->cls_lock;
1659 LASSERT(ols->ols_state == OLS_GRANTED);
1660 LINVRNT(osc_lock_invariant(ols));
1662 cl_lock_cancel(env, lock);
1663 cl_lock_delete(env, lock);
1667 static void osc_lock_lockless_cancel(const struct lu_env *env,
1668 const struct cl_lock_slice *slice)
1670 struct osc_lock *ols = cl2osc_lock(slice);
1673 result = osc_lock_flush(ols, 0);
1675 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1677 ols->ols_state = OLS_CANCELLED;
1680 static int osc_lock_lockless_wait(const struct lu_env *env,
1681 const struct cl_lock_slice *slice)
1683 struct osc_lock *olck = cl2osc_lock(slice);
1684 struct cl_lock *lock = olck->ols_cl.cls_lock;
1686 LINVRNT(osc_lock_invariant(olck));
1687 LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1689 return lock->cll_error;
1692 static void osc_lock_lockless_state(const struct lu_env *env,
1693 const struct cl_lock_slice *slice,
1694 enum cl_lock_state state)
1696 struct osc_lock *lock = cl2osc_lock(slice);
1697 struct osc_io *oio = osc_env_io(env);
1699 LINVRNT(osc_lock_invariant(lock));
1700 if (state == CLS_HELD) {
1701 LASSERT(lock->ols_owner == NULL);
1702 lock->ols_owner = oio;
1704 /* set the io to be lockless if this lock is for io's
1706 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1707 oio->oi_lockless = 1;
1709 lock->ols_owner = NULL;
1712 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1713 const struct cl_lock_slice *slice,
1714 const struct cl_lock_descr *need,
1715 const struct cl_io *io)
1720 static const struct cl_lock_operations osc_lock_lockless_ops = {
1721 .clo_fini = osc_lock_fini,
1722 .clo_enqueue = osc_lock_lockless_enqueue,
1723 .clo_wait = osc_lock_lockless_wait,
1724 .clo_unuse = osc_lock_lockless_unuse,
1725 .clo_state = osc_lock_lockless_state,
1726 .clo_fits_into = osc_lock_lockless_fits_into,
1727 .clo_cancel = osc_lock_lockless_cancel,
1728 .clo_print = osc_lock_print
1731 int osc_lock_init(const struct lu_env *env,
1732 struct cl_object *obj, struct cl_lock *lock,
1733 const struct cl_io *unused)
1735 struct osc_lock *clk;
1738 OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1740 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1741 clk->ols_state = OLS_NEW;
1742 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);