1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_lock for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 /** \addtogroup osc osc @{ */
43 #define DEBUG_SUBSYSTEM S_OSC
46 # include <libcfs/libcfs.h>
48 # include <liblustre.h>
50 /* fid_build_reg_res_name() */
51 #include <lustre_fid.h>
53 #include "osc_cl_internal.h"
55 /*****************************************************************************
61 static const struct cl_lock_operations osc_lock_ops;
62 static const struct cl_lock_operations osc_lock_lockless_ops;
64 int osc_lock_is_lockless(const struct osc_lock *olck)
66 return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
70 * Returns a weak pointer to the ldlm lock identified by a handle. Returned
71 * pointer cannot be dereferenced, as lock is not protected from concurrent
72 * reclaim. This function is a helper for osc_lock_invariant().
74 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
76 struct ldlm_lock *lock;
78 lock = ldlm_handle2lock(handle);
85 * Invariant that has to be true all of the time.
87 static int osc_lock_invariant(struct osc_lock *ols)
89 struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
90 struct ldlm_lock *olock = ols->ols_lock;
91 int handle_used = lustre_handle_is_used(&ols->ols_handle);
94 ergo(osc_lock_is_lockless(ols),
95 ols->ols_locklessable && ols->ols_lock == NULL) ||
96 (ergo(olock != NULL, handle_used) &&
98 olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
100 * Check that ->ols_handle and ->ols_lock are consistent, but
101 * take into account that they are set at the different time.
104 ergo(lock != NULL && olock != NULL, lock == olock) &&
105 ergo(lock == NULL, olock == NULL)) &&
106 ergo(ols->ols_state == OLS_CANCELLED,
107 olock == NULL && !handle_used) &&
109 * DLM lock is destroyed only after we have seen cancellation
112 ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
113 !olock->l_destroyed) &&
114 ergo(ols->ols_state == OLS_GRANTED,
116 olock->l_req_mode == olock->l_granted_mode &&
120 /*****************************************************************************
127 * Breaks a link between osc_lock and dlm_lock.
129 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
131 struct ldlm_lock *dlmlock;
133 spin_lock(&osc_ast_guard);
134 dlmlock = olck->ols_lock;
135 if (dlmlock == NULL) {
136 spin_unlock(&osc_ast_guard);
140 olck->ols_lock = NULL;
141 /* wb(); --- for all who checks (ols->ols_lock != NULL) before
142 * call to osc_lock_detach() */
143 dlmlock->l_ast_data = NULL;
144 olck->ols_handle.cookie = 0ULL;
145 spin_unlock(&osc_ast_guard);
147 lock_res_and_lock(dlmlock);
148 if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
149 struct cl_object *obj = olck->ols_cl.cls_obj;
150 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
151 __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
153 /* Update the kms. Need to loop all granted locks.
154 * Not a problem for the client */
155 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
156 unlock_res_and_lock(dlmlock);
158 cl_object_attr_lock(obj);
159 cl_object_attr_set(env, obj, attr, CAT_KMS);
160 cl_object_attr_unlock(obj);
162 unlock_res_and_lock(dlmlock);
164 /* release a reference taken in osc_lock_upcall0(). */
165 lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
166 LDLM_LOCK_RELEASE(dlmlock);
169 static int osc_lock_unuse(const struct lu_env *env,
170 const struct cl_lock_slice *slice)
172 struct osc_lock *ols = cl2osc_lock(slice);
175 LASSERT(ols->ols_state == OLS_GRANTED ||
176 ols->ols_state == OLS_UPCALL_RECEIVED);
177 LINVRNT(osc_lock_invariant(ols));
179 if (ols->ols_glimpse) {
180 LASSERT(ols->ols_hold == 0);
183 LASSERT(ols->ols_hold);
186 * Move lock into OLS_RELEASED state before calling osc_cancel_base()
187 * so that possible synchronous cancellation (that always happens
188 * e.g., for liblustre) sees that lock is released.
190 ols->ols_state = OLS_RELEASED;
192 result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode);
193 ols->ols_has_ref = 0;
197 static void osc_lock_fini(const struct lu_env *env,
198 struct cl_lock_slice *slice)
200 struct osc_lock *ols = cl2osc_lock(slice);
202 LINVRNT(osc_lock_invariant(ols));
204 * ->ols_hold can still be true at this point if, for example, a
205 * thread that requested a lock was killed (and released a reference
206 * to the lock), before reply from a server was received. In this case
207 * lock is destroyed immediately after upcall.
210 osc_lock_unuse(env, slice);
211 if (ols->ols_lock != NULL)
212 osc_lock_detach(env, ols);
214 OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
217 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
218 struct ldlm_res_id *resname)
220 const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
223 * In the perfect world of the future, where ost servers talk
226 fid_build_reg_res_name(fid, resname);
229 * In reality, where ost server expects ->lsm_object_id and
230 * ->lsm_object_gr in rename.
232 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
237 static void osc_lock_build_policy(const struct lu_env *env,
238 const struct cl_lock *lock,
239 ldlm_policy_data_t *policy)
241 const struct cl_lock_descr *d = &lock->cll_descr;
243 osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
246 static int osc_enq2ldlm_flags(__u32 enqflags)
250 LASSERT((enqflags & ~(CEF_NONBLOCK|CEF_ASYNC|CEF_DISCARD_DATA)) == 0);
252 if (enqflags & CEF_NONBLOCK)
253 result |= LDLM_FL_BLOCK_NOWAIT;
254 if (enqflags & CEF_ASYNC)
255 result |= LDLM_FL_HAS_INTENT;
256 if (enqflags & CEF_DISCARD_DATA)
257 result |= LDLM_AST_DISCARD_DATA;
262 * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
263 * pointers. Initialized in osc_init().
265 spinlock_t osc_ast_guard;
267 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
269 struct osc_lock *olck;
271 lock_res_and_lock(dlm_lock);
272 spin_lock(&osc_ast_guard);
273 olck = dlm_lock->l_ast_data;
275 struct cl_lock *lock = olck->ols_cl.cls_lock;
277 * If osc_lock holds a reference on ldlm lock, return it even
278 * when cl_lock is in CLS_FREEING state. This way
280 * osc_ast_data_get(dlmlock) == NULL
282 * guarantees that all osc references on dlmlock were
283 * released. osc_dlm_blocking_ast0() relies on that.
285 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
286 cl_lock_get_trust(lock);
287 lu_ref_add_atomic(&lock->cll_reference,
288 "ast", cfs_current());
292 spin_unlock(&osc_ast_guard);
293 unlock_res_and_lock(dlm_lock);
297 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
299 struct cl_lock *lock;
301 lock = olck->ols_cl.cls_lock;
302 lu_ref_del(&lock->cll_reference, "ast", cfs_current());
303 cl_lock_put(env, lock);
306 static void osc_lock_to_lockless(struct osc_lock *olck)
308 struct cl_lock_slice *slice = &olck->ols_cl;
309 struct cl_lock *lock = slice->cls_lock;
312 * TODO: Discover which locks we need to convert the lock
315 LASSERT(cl_lock_is_mutexed(lock));
316 slice->cls_ops = &osc_lock_lockless_ops;
320 * Updates object attributes from a lock value block (lvb) received together
321 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
324 * This can be optimized to not update attributes when lock is a result of a
327 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
331 struct cl_object *obj;
332 struct lov_oinfo *oinfo;
333 struct cl_attr *attr;
338 if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
343 lvb = &olck->ols_lvb;
344 obj = olck->ols_cl.cls_obj;
345 oinfo = cl2osc(obj)->oo_oinfo;
346 attr = &osc_env_info(env)->oti_attr;
347 valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
348 cl_lvb2attr(attr, lvb);
350 cl_object_attr_lock(obj);
352 struct ldlm_lock *dlmlock;
355 dlmlock = olck->ols_lock;
356 LASSERT(dlmlock != NULL);
358 size = lvb->lvb_size;
359 /* Extend KMS up to the end of this lock and no further
360 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
361 if (size > dlmlock->l_policy_data.l_extent.end)
362 size = dlmlock->l_policy_data.l_extent.end + 1;
363 if (size >= oinfo->loi_kms) {
364 LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
365 ", kms="LPU64, lvb->lvb_size, size);
367 attr->cat_kms = size;
369 LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
370 LPU64"; leaving kms="LPU64", end="LPU64,
371 lvb->lvb_size, oinfo->loi_kms,
372 dlmlock->l_policy_data.l_extent.end);
374 ldlm_lock_allow_match(dlmlock);
375 } else if (rc == -ENAVAIL && olck->ols_glimpse) {
376 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
377 " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
382 cl_object_attr_set(env, obj, attr, valid);
384 cl_object_attr_unlock(obj);
389 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
390 struct ldlm_lock *dlmlock, int rc)
392 struct ldlm_extent *ext;
393 struct cl_lock *lock;
394 struct cl_lock_descr *descr;
396 LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
399 if (olck->ols_state != OLS_GRANTED) {
400 lock = olck->ols_cl.cls_lock;
401 ext = &dlmlock->l_policy_data.l_extent;
402 descr = &osc_env_info(env)->oti_descr;
403 descr->cld_obj = lock->cll_descr.cld_obj;
405 /* XXX check that ->l_granted_mode is valid. */
406 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
407 descr->cld_start = cl_index(descr->cld_obj, ext->start);
408 descr->cld_end = cl_index(descr->cld_obj, ext->end);
410 * tell upper layers the extent of the lock that was actually
413 cl_lock_modify(env, lock, descr);
414 LINVRNT(osc_lock_invariant(olck));
415 olck->ols_state = OLS_GRANTED;
416 osc_lock_lvb_update(env, olck, rc);
417 cl_lock_signal(env, lock);
422 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
425 struct ldlm_lock *dlmlock;
429 dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
430 LASSERT(dlmlock != NULL);
432 lock_res_and_lock(dlmlock);
433 spin_lock(&osc_ast_guard);
434 LASSERT(dlmlock->l_ast_data == olck);
435 LASSERT(olck->ols_lock == NULL);
436 olck->ols_lock = dlmlock;
437 spin_unlock(&osc_ast_guard);
438 unlock_res_and_lock(dlmlock);
441 * Lock might be not yet granted. In this case, completion ast
442 * (osc_ldlm_completion_ast()) comes later and finishes lock
445 if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
446 osc_lock_granted(env, olck, dlmlock, 0);
448 * osc_enqueue_interpret() decrefs asynchronous locks, counter
451 ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
452 olck->ols_hold = olck->ols_has_ref = 1;
454 /* lock reference taken by ldlm_handle2lock_long() is owned by
455 * osc_lock and released in osc_lock_detach() */
456 lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
460 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
461 * received from a server, or after osc_enqueue_base() matched a local DLM
464 static int osc_lock_upcall(void *cookie, int errcode)
466 struct osc_lock *olck = cookie;
467 struct cl_lock_slice *slice = &olck->ols_cl;
468 struct cl_lock *lock = slice->cls_lock;
475 * XXX environment should be created in ptlrpcd.
477 env = cl_env_get(&refcheck);
481 cl_lock_mutex_get(env, lock);
483 LASSERT(lock->cll_state >= CLS_QUEUING);
484 if (olck->ols_state == OLS_ENQUEUED) {
485 olck->ols_state = OLS_UPCALL_RECEIVED;
486 rc = ldlm_error2errno(errcode);
487 } else if (olck->ols_state == OLS_CANCELLED) {
490 CERROR("Impossible state: %i\n", olck->ols_state);
494 struct ldlm_lock *dlmlock;
496 dlmlock = ldlm_handle2lock(&olck->ols_handle);
497 if (dlmlock != NULL) {
498 lock_res_and_lock(dlmlock);
499 spin_lock(&osc_ast_guard);
500 LASSERT(olck->ols_lock == NULL);
501 dlmlock->l_ast_data = NULL;
502 olck->ols_handle.cookie = 0ULL;
503 spin_unlock(&osc_ast_guard);
504 unlock_res_and_lock(dlmlock);
505 LDLM_LOCK_PUT(dlmlock);
508 if (olck->ols_glimpse)
509 olck->ols_glimpse = 0;
510 osc_lock_upcall0(env, olck);
513 /* Error handling, some errors are tolerable. */
514 if (olck->ols_locklessable && rc == -EUSERS) {
515 /* This is a tolerable error, turn this lock into
518 osc_object_set_contended(cl2osc(slice->cls_obj));
519 LASSERT(slice->cls_ops == &osc_lock_ops);
521 /* Change this lock to ldlmlock-less lock. */
522 osc_lock_to_lockless(olck);
523 olck->ols_state = OLS_GRANTED;
525 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
526 osc_lock_lvb_update(env, olck, rc);
527 cl_lock_delete(env, lock);
528 /* Hide the error. */
533 /* on error, lock was signaled by cl_lock_error() */
534 cl_lock_signal(env, lock);
536 cl_lock_error(env, lock, rc);
538 cl_lock_mutex_put(env, lock);
540 /* release cookie reference, acquired by osc_lock_enqueue() */
541 lu_ref_del(&lock->cll_reference, "upcall", lock);
542 cl_lock_put(env, lock);
543 cl_env_put(env, &refcheck);
545 /* should never happen, similar to osc_ldlm_blocking_ast(). */
551 * Core of osc_dlm_blocking_ast() logic.
553 static void osc_lock_blocking(const struct lu_env *env,
554 struct ldlm_lock *dlmlock,
555 struct osc_lock *olck, int blocking)
557 struct cl_lock *lock = olck->ols_cl.cls_lock;
559 LASSERT(olck->ols_lock == dlmlock);
560 CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
561 LASSERT(!osc_lock_is_lockless(olck));
565 * Lock might be still addref-ed here, if e.g., blocking ast
566 * is sent for a failed lock.
568 osc_lock_unuse(env, &olck->ols_cl);
570 if (blocking && olck->ols_state < OLS_BLOCKED)
572 * Move osc_lock into OLS_BLOCKED before canceling the lock,
573 * because it recursively re-enters osc_lock_blocking(), with
574 * the state set to OLS_CANCELLED.
576 olck->ols_state = OLS_BLOCKED;
578 * cancel and destroy lock at least once no matter how blocking ast is
579 * entered (see comment above osc_ldlm_blocking_ast() for use
580 * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
582 cl_lock_cancel(env, lock);
583 cl_lock_delete(env, lock);
587 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
588 * and ldlm_lock caches.
590 static int osc_dlm_blocking_ast0(const struct lu_env *env,
591 struct ldlm_lock *dlmlock,
592 void *data, int flag)
594 struct osc_lock *olck;
595 struct cl_lock *lock;
599 LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
602 olck = osc_ast_data_get(dlmlock);
604 lock = olck->ols_cl.cls_lock;
605 cl_lock_mutex_get(env, lock);
606 LINVRNT(osc_lock_invariant(olck));
607 if (olck->ols_ast_wait) {
608 /* wake up osc_lock_use() */
609 cl_lock_signal(env, lock);
610 olck->ols_ast_wait = 0;
613 * Lock might have been canceled while this thread was
614 * sleeping for lock mutex, but olck is pinned in memory.
616 if (olck == dlmlock->l_ast_data) {
618 * NOTE: DLM sends blocking AST's for failed locks
619 * (that are still in pre-OLS_GRANTED state)
620 * too, and they have to be canceled otherwise
621 * DLM lock is never destroyed and stuck in
624 * Alternatively, ldlm_cli_cancel() can be
625 * called here directly for osc_locks with
626 * ols_state < OLS_GRANTED to maintain an
627 * invariant that ->clo_cancel() is only called
628 * for locks that were granted.
630 LASSERT(data == olck);
631 osc_lock_blocking(env, dlmlock,
632 olck, flag == LDLM_CB_BLOCKING);
635 cl_lock_mutex_put(env, lock);
636 osc_ast_data_put(env, olck);
639 * DLM lock exists, but there is no cl_lock attached to it.
640 * This is a `normal' race. cl_object and its cl_lock's can be
641 * removed by memory pressure, together with all pages.
643 cancel = (flag == LDLM_CB_BLOCKING);
646 struct lustre_handle *lockh;
648 lockh = &osc_env_info(env)->oti_handle;
649 ldlm_lock2handle(dlmlock, lockh);
650 result = ldlm_cli_cancel(lockh);
657 * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
658 * some other lock, or is canceled. This function is installed as a
659 * ldlm_lock::l_blocking_ast() for client extent locks.
661 * Control flow is tricky, because ldlm uses the same call-back
662 * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
664 * \param dlmlock lock for which ast occurred.
666 * \param new description of a conflicting lock in case of blocking ast.
668 * \param data value of dlmlock->l_ast_data
670 * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
671 * cancellation and blocking ast's.
673 * Possible use cases:
675 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
676 * lock due to lock lru pressure, or explicit user request to purge
679 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
680 * us that dlmlock conflicts with another lock that some client is
681 * enqueing. Lock is canceled.
683 * - cl_lock_cancel() is called. osc_lock_cancel() calls
684 * ldlm_cli_cancel() that calls
686 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
688 * recursively entering osc_ldlm_blocking_ast().
690 * - client cancels lock voluntary (e.g., as a part of early cancellation):
693 * osc_lock_cancel()->
694 * ldlm_cli_cancel()->
695 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
698 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
699 struct ldlm_lock_desc *new, void *data,
703 struct cl_env_nest nest;
707 * This can be called in the context of outer IO, e.g.,
710 * ->osc_enqueue_base()->...
711 * ->ldlm_prep_elc_req()->...
712 * ->ldlm_cancel_callback()->...
713 * ->osc_ldlm_blocking_ast()
715 * new environment has to be created to not corrupt outer context.
717 env = cl_env_nested_get(&nest);
719 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
721 result = PTR_ERR(env);
723 * XXX This should never happen, as cl_lock is
724 * stuck. Pre-allocated environment a la vvp_inode_fini_env
730 if (result == -ENODATA)
733 CERROR("BAST failed: %d\n", result);
735 cl_env_nested_put(&nest, env);
739 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
740 int flags, void *data)
744 struct osc_lock *olck;
745 struct cl_lock *lock;
750 /* first, do dlm part of the work */
751 dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
752 /* then, notify cl_lock */
753 env_cookie = cl_env_reenter();
754 env = cl_env_get(&refcheck);
756 olck = osc_ast_data_get(dlmlock);
758 lock = olck->ols_cl.cls_lock;
759 cl_lock_mutex_get(env, lock);
761 * ldlm_handle_cp_callback() copied LVB from request
762 * to lock->l_lvb_data, store it in osc_lock.
764 LASSERT(dlmlock->l_lvb_data != NULL);
765 olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
766 if (olck->ols_lock == NULL)
768 * upcall (osc_lock_upcall()) hasn't yet been
769 * called. Do nothing now, upcall will bind
770 * olck to dlmlock and signal the waiters.
772 * This maintains an invariant that osc_lock
773 * and ldlm_lock are always bound when
774 * osc_lock is in OLS_GRANTED state.
777 else if (dlmlock->l_granted_mode != LCK_MINMODE)
778 osc_lock_granted(env, olck, dlmlock, dlmrc);
780 cl_lock_error(env, lock, dlmrc);
781 cl_lock_mutex_put(env, lock);
782 osc_ast_data_put(env, olck);
785 result = -ELDLM_NO_LOCK_DATA;
786 cl_env_put(env, &refcheck);
788 result = PTR_ERR(env);
789 cl_env_reexit(env_cookie);
790 return dlmrc ?: result;
793 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
795 struct ptlrpc_request *req = data;
796 struct osc_lock *olck;
797 struct cl_lock *lock;
798 struct cl_object *obj;
801 struct req_capsule *cap;
805 LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
807 env = cl_env_get(&refcheck);
810 * osc_ast_data_get() has to go after environment is
811 * allocated, because osc_ast_data() acquires a
812 * reference to a lock, and it can only be released in
815 olck = osc_ast_data_get(dlmlock);
817 lock = olck->ols_cl.cls_lock;
818 cl_lock_mutex_get(env, lock);
820 req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
821 req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
823 result = req_capsule_server_pack(cap);
825 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
826 obj = lock->cll_descr.cld_obj;
827 result = cl_object_glimpse(env, obj, lvb);
829 cl_lock_mutex_put(env, lock);
830 osc_ast_data_put(env, olck);
833 * These errors are normal races, so we don't want to
834 * fill the console with messages by calling
837 lustre_pack_reply(req, 1, NULL, NULL);
838 result = -ELDLM_NO_LOCK_DATA;
840 cl_env_put(env, &refcheck);
842 result = PTR_ERR(env);
843 req->rq_status = result;
847 static unsigned long osc_lock_weigh(const struct lu_env *env,
848 const struct cl_lock_slice *slice)
851 * don't need to grab coh_page_guard since we don't care the exact #
854 return cl_object_header(slice->cls_obj)->coh_pages;
858 * Get the weight of dlm lock for early cancellation.
860 * XXX: it should return the pages covered by this \a dlmlock.
862 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
867 struct osc_lock *lock;
869 unsigned long weight;
873 cookie = cl_env_reenter();
875 * osc_ldlm_weigh_ast has a complex context since it might be called
876 * because of lock canceling, or from user's input. We have to make
877 * a new environment for it. Probably it is implementation safe to use
878 * the upper context because cl_lock_put don't modify environment
879 * variables. But in case of ..
881 env = cl_env_get(&refcheck);
883 /* Mostly because lack of memory, tend to eliminate this lock*/
884 cl_env_reexit(cookie);
888 LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
889 lock = osc_ast_data_get(dlmlock);
891 /* cl_lock was destroyed because of memory pressure.
892 * It is much reasonable to assign this type of lock
895 GOTO(out, weight = 0);
898 cll = lock->ols_cl.cls_lock;
899 cl_lock_mutex_get(env, cll);
900 weight = cl_lock_weigh(env, cll);
901 cl_lock_mutex_put(env, cll);
902 osc_ast_data_put(env, lock);
906 cl_env_put(env, &refcheck);
907 cl_env_reexit(cookie);
911 static void osc_lock_build_einfo(const struct lu_env *env,
912 const struct cl_lock *clock,
913 struct osc_lock *lock,
914 struct ldlm_enqueue_info *einfo)
916 enum cl_lock_mode mode;
918 mode = clock->cll_descr.cld_mode;
919 if (mode == CLM_PHANTOM)
921 * For now, enqueue all glimpse locks in read mode. In the
922 * future, client might choose to enqueue LCK_PW lock for
923 * glimpse on a file opened for write.
927 einfo->ei_type = LDLM_EXTENT;
928 einfo->ei_mode = osc_cl_lock2ldlm(mode);
929 einfo->ei_cb_bl = osc_ldlm_blocking_ast;
930 einfo->ei_cb_cp = osc_ldlm_completion_ast;
931 einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
932 einfo->ei_cb_wg = osc_ldlm_weigh_ast;
933 einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
937 * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
938 * is called as a part of enqueuing to cancel conflicting locks early.
940 * \retval 0: success, \a conflict was cancelled and destroyed.
942 * \retval CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
943 * released in the process. Repeat enqueing.
945 * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
946 * either \a lock is non-blocking, or current thread
947 * holds other locks, that prevent it from waiting
948 * for cancel to complete.
950 * \retval -ve: other error, including -EINTR.
953 static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
954 struct cl_lock *conflict, int canwait)
958 LASSERT(cl_lock_is_mutexed(lock));
959 LASSERT(cl_lock_is_mutexed(conflict));
962 if (conflict->cll_state != CLS_FREEING) {
963 cl_lock_cancel(env, conflict);
964 cl_lock_delete(env, conflict);
965 if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
967 if (cl_lock_nr_mutexed(env) > 2)
969 * If mutices of locks other than @lock and
970 * @scan are held by the current thread, it
971 * cannot wait on @scan state change in a
972 * dead-lock safe matter, so simply skip early
973 * cancellation in this case.
975 * This means that early cancellation doesn't
976 * work when there is even slight mutex
977 * contention, as top-lock's mutex is usually
982 /* Waiting for @scan to be destroyed */
983 cl_lock_mutex_put(env, lock);
985 rc = cl_lock_state_wait(env, conflict);
987 conflict->cll_state < CLS_FREEING);
988 /* mutex was released, repeat enqueue. */
989 rc = rc ?: CLO_REPEAT;
990 cl_lock_mutex_get(env, lock);
993 LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
994 CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
995 conflict, rc ? "not":"", rc);
1001 * Cancel all conflicting locks and wait for them to be destroyed.
1003 * This function is used for two purposes:
1005 * - early cancel all conflicting locks before starting IO, and
1007 * - guarantee that pages added to the page cache by lockless IO are never
1008 * covered by locks other than lockless IO lock, and, hence, are not
1009 * visible to other threads.
1011 static int osc_lock_enqueue_wait(const struct lu_env *env,
1012 const struct osc_lock *olck)
1014 struct cl_lock *lock = olck->ols_cl.cls_lock;
1015 struct cl_lock_descr *descr = &lock->cll_descr;
1016 struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
1017 struct cl_lock_closure *closure = &osc_env_info(env)->oti_closure;
1018 struct cl_lock *scan;
1019 struct cl_lock *temp;
1020 int lockless = osc_lock_is_lockless(olck);
1026 LASSERT(cl_lock_is_mutexed(lock));
1027 LASSERT(lock->cll_state == CLS_QUEUING);
1030 * XXX This function could be sped up if we had asynchronous
1035 !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
1036 cl_lock_nr_mutexed(env) == 1;
1037 cl_lock_closure_init(env, closure, lock, canwait);
1038 spin_lock(&hdr->coh_lock_guard);
1039 list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
1043 if (scan->cll_state < CLS_QUEUING ||
1044 scan->cll_state == CLS_FREEING ||
1045 scan->cll_descr.cld_start > descr->cld_end ||
1046 scan->cll_descr.cld_end < descr->cld_start)
1049 /* overlapped and living locks. */
1050 /* A tricky case for lockless pages:
1051 * We need to cancel the compatible locks if we're enqueuing
1052 * a lockless lock, for example:
1053 * imagine that client has PR lock on [0, 1000], and thread T0
1054 * is doing lockless IO in [500, 1500] region. Concurrent
1055 * thread T1 can see lockless data in [500, 1000], which is
1056 * wrong, because these data are possibly stale.
1058 if (!lockless && cl_lock_compatible(scan, lock))
1061 /* Now @scan is conflicting with @lock, this means current
1062 * thread have to sleep for @scan being destroyed. */
1063 cl_lock_get_trust(scan);
1064 if (&temp->cll_linkage != &hdr->coh_locks)
1065 cl_lock_get_trust(temp);
1066 spin_unlock(&hdr->coh_lock_guard);
1067 lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
1069 LASSERT(list_empty(&closure->clc_list));
1070 rc = cl_lock_closure_build(env, scan, closure);
1072 rc = osc_lock_cancel_wait(env, lock, scan, canwait);
1073 cl_lock_disclosure(env, closure);
1074 if (rc == -EWOULDBLOCK)
1077 if (rc == CLO_REPEAT && !canwait)
1078 /* cannot wait... no early cancellation. */
1081 lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
1082 cl_lock_put(env, scan);
1083 spin_lock(&hdr->coh_lock_guard);
1085 * Lock list could have been modified, while spin-lock was
1086 * released. Check that it is safe to continue.
1088 stop = list_empty(&temp->cll_linkage);
1089 if (&temp->cll_linkage != &hdr->coh_locks)
1090 cl_lock_put(env, temp);
1091 if (stop || rc != 0)
1094 spin_unlock(&hdr->coh_lock_guard);
1095 cl_lock_closure_fini(closure);
1100 * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
1102 * - Thread0: obtains PR:[0, 10]. Lock is busy.
1104 * - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
1105 * PR:[0, 10], but cancellation of busy lock is postponed.
1107 * - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
1108 * PW:[5, 50], and thread0 waits for the lock completion never
1109 * releasing PR:[0, 10]---deadlock.
1111 * The second PR lock can be glimpse (it is to deal with that situation that
1112 * ll_glimpse_size() has second argument, preventing local match of
1113 * not-yet-granted locks, see bug 10295). Similar situation is possible in the
1114 * case of memory mapped user level buffer.
1116 * To prevent this we can detect a situation when current "thread" or "io"
1117 * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
1118 * the ols->ols_flags, or prevent local match with PW locks.
1120 static int osc_deadlock_is_possible(const struct lu_env *env,
1121 struct cl_lock *lock)
1123 struct cl_object *obj;
1124 struct cl_object_header *head;
1125 struct cl_lock *scan;
1132 LASSERT(cl_lock_is_mutexed(lock));
1134 oio = osc_env_io(env);
1135 obj = lock->cll_descr.cld_obj;
1136 head = cl_object_header(obj);
1139 spin_lock(&head->coh_lock_guard);
1140 list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1142 struct osc_lock *oscan;
1144 oscan = osc_lock_at(scan);
1145 LASSERT(oscan != NULL);
1146 if (oscan->ols_owner == oio) {
1152 spin_unlock(&head->coh_lock_guard);
1157 * Implementation of cl_lock_operations::clo_enqueue() method for osc
1158 * layer. This initiates ldlm enqueue:
1160 * - checks for possible dead-lock conditions (osc_deadlock_is_possible());
1162 * - cancels conflicting locks early (osc_lock_enqueue_wait());
1164 * - calls osc_enqueue_base() to do actual enqueue.
1166 * osc_enqueue_base() is supplied with an upcall function that is executed
1167 * when lock is received either after a local cached ldlm lock is matched, or
1168 * when a reply from the server is received.
1170 * This function does not wait for the network communication to complete.
1172 static int osc_lock_enqueue(const struct lu_env *env,
1173 const struct cl_lock_slice *slice,
1174 struct cl_io *_, __u32 enqflags)
1176 struct osc_lock *ols = cl2osc_lock(slice);
1177 struct cl_lock *lock = ols->ols_cl.cls_lock;
1178 struct osc_object *obj = cl2osc(slice->cls_obj);
1179 struct osc_thread_info *info = osc_env_info(env);
1180 struct ldlm_res_id *resname = &info->oti_resname;
1181 ldlm_policy_data_t *policy = &info->oti_policy;
1182 struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1186 LASSERT(cl_lock_is_mutexed(lock));
1187 LASSERT(lock->cll_state == CLS_QUEUING);
1188 LASSERT(ols->ols_state == OLS_NEW);
1190 osc_lock_build_res(env, obj, resname);
1191 osc_lock_build_policy(env, lock, policy);
1192 ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1193 if (ols->ols_locklessable)
1194 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1195 if (osc_deadlock_is_possible(env, lock))
1196 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1197 if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1198 ols->ols_glimpse = 1;
1200 result = osc_lock_enqueue_wait(env, ols);
1202 /* a reference for lock, passed as an upcall cookie */
1204 lu_ref_add(&lock->cll_reference, "upcall", lock);
1205 ols->ols_state = OLS_ENQUEUED;
1208 * XXX: this is possible blocking point as
1209 * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1212 result = osc_enqueue_base(osc_export(obj), resname,
1213 &ols->ols_flags, policy,
1215 obj->oo_oinfo->loi_kms_valid,
1217 ols, einfo, &ols->ols_handle,
1220 lu_ref_del(&lock->cll_reference, "upcall", lock);
1221 cl_lock_put(env, lock);
1228 static int osc_lock_wait(const struct lu_env *env,
1229 const struct cl_lock_slice *slice)
1231 struct osc_lock *olck = cl2osc_lock(slice);
1232 struct cl_lock *lock = olck->ols_cl.cls_lock;
1234 LINVRNT(osc_lock_invariant(olck));
1235 if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1238 LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1239 lock->cll_error == 0, olck->ols_lock != NULL));
1241 return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1245 * An implementation of cl_lock_operations::clo_use() method that pins cached
1248 static int osc_lock_use(const struct lu_env *env,
1249 const struct cl_lock_slice *slice)
1251 struct osc_lock *olck = cl2osc_lock(slice);
1254 LASSERT(!olck->ols_hold);
1256 * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1257 * flag is not set. This protects us from a concurrent blocking ast.
1259 rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1261 olck->ols_hold = olck->ols_has_ref = 1;
1262 olck->ols_state = OLS_GRANTED;
1264 struct cl_lock *lock;
1267 * Lock is being cancelled somewhere within
1268 * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1269 * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1272 lock = slice->cls_lock;
1273 LASSERT(lock->cll_state == CLS_CACHED);
1274 LASSERT(lock->cll_users > 0);
1275 LASSERT(olck->ols_lock->l_flags & LDLM_FL_CBPENDING);
1276 /* set a flag for osc_dlm_blocking_ast0() to signal the
1278 olck->ols_ast_wait = 1;
1284 static int osc_lock_flush(struct osc_lock *ols, int discard)
1286 struct cl_lock *lock = ols->ols_cl.cls_lock;
1287 struct cl_env_nest nest;
1291 env = cl_env_nested_get(&nest);
1293 result = cl_lock_page_out(env, lock, discard);
1294 cl_env_nested_put(&nest, env);
1296 result = PTR_ERR(env);
1303 * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1304 * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1305 * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1306 * with some other lock some where in the cluster. This function does the
1309 * - invalidates all pages protected by this lock (after sending dirty
1310 * ones to the server, as necessary);
1312 * - decref's underlying ldlm lock;
1314 * - cancels ldlm lock (ldlm_cli_cancel()).
1316 static void osc_lock_cancel(const struct lu_env *env,
1317 const struct cl_lock_slice *slice)
1319 struct cl_lock *lock = slice->cls_lock;
1320 struct osc_lock *olck = cl2osc_lock(slice);
1321 struct ldlm_lock *dlmlock = olck->ols_lock;
1325 LASSERT(cl_lock_is_mutexed(lock));
1326 LINVRNT(osc_lock_invariant(olck));
1328 if (dlmlock != NULL) {
1329 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1330 result = osc_lock_flush(olck, discard);
1332 osc_lock_unuse(env, slice);
1333 LASSERT(dlmlock->l_readers == 0 && dlmlock->l_writers == 0);
1334 result = ldlm_cli_cancel(&olck->ols_handle);
1336 CL_LOCK_DEBUG(D_ERROR, env, lock,
1337 "lock %p cancel failure with error(%d)\n",
1340 olck->ols_state = OLS_CANCELLED;
1341 osc_lock_detach(env, olck);
1344 void cl_lock_page_list_fixup(const struct lu_env *env,
1345 struct cl_io *io, struct cl_lock *lock,
1346 struct cl_page_list *queue);
1348 #ifdef INVARIANT_CHECK
1350 * Returns true iff there are pages under \a olck not protected by other
1353 static int osc_lock_has_pages(struct osc_lock *olck)
1355 struct cl_lock *lock;
1356 struct cl_lock_descr *descr;
1357 struct cl_object *obj;
1358 struct osc_object *oob;
1359 struct cl_page_list *plist;
1360 struct cl_page *page;
1361 struct cl_env_nest nest;
1366 env = cl_env_nested_get(&nest);
1368 obj = olck->ols_cl.cls_obj;
1370 io = &oob->oo_debug_io;
1371 lock = olck->ols_cl.cls_lock;
1372 descr = &lock->cll_descr;
1373 plist = &osc_env_info(env)->oti_plist;
1374 cl_page_list_init(plist);
1376 mutex_lock(&oob->oo_debug_mutex);
1378 io->ci_obj = cl_object_top(obj);
1379 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1380 cl_page_gang_lookup(env, obj, io,
1381 descr->cld_start, descr->cld_end, plist);
1382 cl_lock_page_list_fixup(env, io, lock, plist);
1383 if (plist->pl_nr > 0) {
1384 CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1385 cl_page_list_for_each(page, plist)
1386 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1388 result = plist->pl_nr > 0;
1389 cl_page_list_disown(env, io, plist);
1390 cl_page_list_fini(env, plist);
1391 cl_io_fini(env, io);
1392 mutex_unlock(&oob->oo_debug_mutex);
1393 cl_env_nested_put(&nest, env);
1399 # define osc_lock_has_pages(olck) (0)
1400 #endif /* INVARIANT_CHECK */
1402 static void osc_lock_delete(const struct lu_env *env,
1403 const struct cl_lock_slice *slice)
1405 struct osc_lock *olck;
1407 olck = cl2osc_lock(slice);
1408 LINVRNT(osc_lock_invariant(olck));
1409 LINVRNT(!osc_lock_has_pages(olck));
1412 osc_lock_unuse(env, slice);
1413 osc_lock_detach(env, olck);
1417 * Implements cl_lock_operations::clo_state() method for osc layer.
1419 * Maintains osc_lock::ols_owner field.
1421 * This assumes that lock always enters CLS_HELD (from some other state) in
1422 * the same IO context as one that requested the lock. This should not be a
1423 * problem, because context is by definition shared by all activity pertaining
1424 * to the same high-level IO.
1426 static void osc_lock_state(const struct lu_env *env,
1427 const struct cl_lock_slice *slice,
1428 enum cl_lock_state state)
1430 struct osc_lock *lock = cl2osc_lock(slice);
1431 struct osc_io *oio = osc_env_io(env);
1434 * XXX multiple io contexts can use the lock at the same time.
1436 LINVRNT(osc_lock_invariant(lock));
1437 if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1438 LASSERT(lock->ols_owner == NULL);
1439 lock->ols_owner = oio;
1440 } else if (state != CLS_HELD)
1441 lock->ols_owner = NULL;
1444 static int osc_lock_print(const struct lu_env *env, void *cookie,
1445 lu_printer_t p, const struct cl_lock_slice *slice)
1447 struct osc_lock *lock = cl2osc_lock(slice);
1450 * XXX print ldlm lock and einfo properly.
1452 (*p)(env, cookie, "%p %08x "LPU64" %d %p ",
1453 lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1454 lock->ols_state, lock->ols_owner);
1455 osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1459 static const struct cl_lock_operations osc_lock_ops = {
1460 .clo_fini = osc_lock_fini,
1461 .clo_enqueue = osc_lock_enqueue,
1462 .clo_wait = osc_lock_wait,
1463 .clo_unuse = osc_lock_unuse,
1464 .clo_use = osc_lock_use,
1465 .clo_delete = osc_lock_delete,
1466 .clo_state = osc_lock_state,
1467 .clo_cancel = osc_lock_cancel,
1468 .clo_weigh = osc_lock_weigh,
1469 .clo_print = osc_lock_print
1472 static int osc_lock_lockless_enqueue(const struct lu_env *env,
1473 const struct cl_lock_slice *slice,
1474 struct cl_io *_, __u32 enqflags)
1476 struct osc_lock *ols = cl2osc_lock(slice);
1477 struct cl_lock *lock = ols->ols_cl.cls_lock;
1480 LASSERT(cl_lock_is_mutexed(lock));
1481 LASSERT(lock->cll_state == CLS_QUEUING);
1482 LASSERT(ols->ols_state == OLS_NEW);
1484 result = osc_lock_enqueue_wait(env, ols);
1486 ols->ols_state = OLS_GRANTED;
1490 static int osc_lock_lockless_unuse(const struct lu_env *env,
1491 const struct cl_lock_slice *slice)
1493 struct osc_lock *ols = cl2osc_lock(slice);
1494 struct cl_lock *lock = slice->cls_lock;
1496 LASSERT(ols->ols_state == OLS_GRANTED);
1497 LINVRNT(osc_lock_invariant(ols));
1499 cl_lock_cancel(env, lock);
1500 cl_lock_delete(env, lock);
1504 static void osc_lock_lockless_cancel(const struct lu_env *env,
1505 const struct cl_lock_slice *slice)
1507 struct osc_lock *ols = cl2osc_lock(slice);
1510 result = osc_lock_flush(ols, 0);
1512 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1514 ols->ols_state = OLS_CANCELLED;
1517 static int osc_lock_lockless_wait(const struct lu_env *env,
1518 const struct cl_lock_slice *slice)
1520 struct osc_lock *olck = cl2osc_lock(slice);
1521 struct cl_lock *lock = olck->ols_cl.cls_lock;
1523 LINVRNT(osc_lock_invariant(olck));
1524 LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1526 return lock->cll_error;
1529 static void osc_lock_lockless_state(const struct lu_env *env,
1530 const struct cl_lock_slice *slice,
1531 enum cl_lock_state state)
1533 struct osc_lock *lock = cl2osc_lock(slice);
1534 struct osc_io *oio = osc_env_io(env);
1536 LINVRNT(osc_lock_invariant(lock));
1537 if (state == CLS_HELD) {
1538 LASSERT(lock->ols_owner == NULL);
1539 lock->ols_owner = oio;
1540 oio->oi_lockless = 1;
1542 lock->ols_owner = NULL;
1545 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1546 const struct cl_lock_slice *slice,
1547 const struct cl_lock_descr *need,
1548 const struct cl_io *io)
1553 static const struct cl_lock_operations osc_lock_lockless_ops = {
1554 .clo_fini = osc_lock_fini,
1555 .clo_enqueue = osc_lock_lockless_enqueue,
1556 .clo_wait = osc_lock_lockless_wait,
1557 .clo_unuse = osc_lock_lockless_unuse,
1558 .clo_state = osc_lock_lockless_state,
1559 .clo_fits_into = osc_lock_lockless_fits_into,
1560 .clo_cancel = osc_lock_lockless_cancel,
1561 .clo_print = osc_lock_print
1564 int osc_lock_init(const struct lu_env *env,
1565 struct cl_object *obj, struct cl_lock *lock,
1566 const struct cl_io *io)
1568 struct osc_lock *clk;
1569 struct osc_io *oio = osc_env_io(env);
1570 struct osc_object *oob = cl2osc(obj);
1573 OBD_SLAB_ALLOC_PTR(clk, osc_lock_kmem);
1575 const struct cl_lock_operations *ops;
1576 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1577 struct obd_connect_data *ocd;
1579 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1580 clk->ols_state = OLS_NEW;
1583 * Check if we need to do lockless IO here.
1584 * Following conditions must be satisfied:
1585 * - the current IO must be locklessable;
1586 * - the stripe is in contention;
1587 * - requested lock is not a glimpse.
1589 * if not, we have to inherit the locklessable flag to
1590 * osc_lock, and let ost make the decision.
1592 * Additional policy can be implemented here, e.g., never do
1593 * lockless-io for large extents.
1595 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1596 io->ci_lockreq == CILR_MAYBE ||
1597 io->ci_lockreq == CILR_NEVER);
1598 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1599 clk->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
1600 (io->ci_lockreq == CILR_MAYBE) &&
1601 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1602 ops = &osc_lock_ops;
1603 if (io->ci_lockreq == CILR_NEVER ||
1605 (clk->ols_locklessable && osc_object_is_contended(oob)) ||
1606 /* lockless truncate */
1607 (io->ci_type == CIT_TRUNC &&
1608 (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1609 osd->od_lockless_truncate)) {
1610 ops = &osc_lock_lockless_ops;
1611 oio->oi_lockless = 1;
1612 clk->ols_locklessable = 1;
1615 cl_lock_slice_add(lock, &clk->ols_cl, obj, ops);