1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011 Whamcloud, Inc.
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
39 * Implementation of cl_lock for OSC layer.
41 * Author: Nikita Danilov <nikita.danilov@sun.com>
44 #define DEBUG_SUBSYSTEM S_OSC
47 # include <libcfs/libcfs.h>
49 # include <liblustre.h>
51 /* fid_build_reg_res_name() */
52 #include <lustre_fid.h>
54 #include "osc_cl_internal.h"
60 #define _PAGEREF_MAGIC (-10000000)
62 /*****************************************************************************
68 static const struct cl_lock_operations osc_lock_ops;
69 static const struct cl_lock_operations osc_lock_lockless_ops;
70 static void osc_lock_to_lockless(const struct lu_env *env,
71 struct osc_lock *ols, int force);
72 static int osc_lock_has_pages(struct osc_lock *olck);
74 int osc_lock_is_lockless(const struct osc_lock *olck)
76 return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
80 * Returns a weak pointer to the ldlm lock identified by a handle. Returned
81 * pointer cannot be dereferenced, as lock is not protected from concurrent
82 * reclaim. This function is a helper for osc_lock_invariant().
84 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
86 struct ldlm_lock *lock;
88 lock = ldlm_handle2lock(handle);
95 * Invariant that has to be true all of the time.
97 static int osc_lock_invariant(struct osc_lock *ols)
99 struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
100 struct ldlm_lock *olock = ols->ols_lock;
101 int handle_used = lustre_handle_is_used(&ols->ols_handle);
104 ergo(osc_lock_is_lockless(ols),
105 ols->ols_locklessable && ols->ols_lock == NULL) ||
106 (ergo(olock != NULL, handle_used) &&
108 olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
110 * Check that ->ols_handle and ->ols_lock are consistent, but
111 * take into account that they are set at the different time.
114 ergo(lock != NULL && olock != NULL, lock == olock) &&
115 ergo(lock == NULL, olock == NULL)) &&
116 ergo(ols->ols_state == OLS_CANCELLED,
117 olock == NULL && !handle_used) &&
119 * DLM lock is destroyed only after we have seen cancellation
122 ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
123 !olock->l_destroyed) &&
124 ergo(ols->ols_state == OLS_GRANTED,
126 olock->l_req_mode == olock->l_granted_mode &&
130 /*****************************************************************************
137 * Breaks a link between osc_lock and dlm_lock.
139 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
141 struct ldlm_lock *dlmlock;
143 cfs_spin_lock(&osc_ast_guard);
144 dlmlock = olck->ols_lock;
145 if (dlmlock == NULL) {
146 cfs_spin_unlock(&osc_ast_guard);
150 olck->ols_lock = NULL;
151 /* wb(); --- for all who checks (ols->ols_lock != NULL) before
152 * call to osc_lock_detach() */
153 dlmlock->l_ast_data = NULL;
154 olck->ols_handle.cookie = 0ULL;
155 cfs_spin_unlock(&osc_ast_guard);
157 lock_res_and_lock(dlmlock);
158 if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
159 struct cl_object *obj = olck->ols_cl.cls_obj;
160 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
163 cl_object_attr_lock(obj);
164 /* Must get the value under the lock to avoid possible races. */
165 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
166 /* Update the kms. Need to loop all granted locks.
167 * Not a problem for the client */
168 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
170 cl_object_attr_set(env, obj, attr, CAT_KMS);
171 cl_object_attr_unlock(obj);
173 unlock_res_and_lock(dlmlock);
175 /* release a reference taken in osc_lock_upcall0(). */
176 LASSERT(olck->ols_has_ref);
177 lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
178 LDLM_LOCK_RELEASE(dlmlock);
179 olck->ols_has_ref = 0;
182 static int osc_lock_unhold(struct osc_lock *ols)
188 result = osc_cancel_base(&ols->ols_handle,
189 ols->ols_einfo.ei_mode);
194 static int osc_lock_unuse(const struct lu_env *env,
195 const struct cl_lock_slice *slice)
197 struct osc_lock *ols = cl2osc_lock(slice);
199 LASSERT(ols->ols_state == OLS_GRANTED ||
200 ols->ols_state == OLS_UPCALL_RECEIVED);
201 LINVRNT(osc_lock_invariant(ols));
203 if (ols->ols_glimpse) {
204 LASSERT(ols->ols_hold == 0);
207 LASSERT(ols->ols_hold);
210 * Move lock into OLS_RELEASED state before calling osc_cancel_base()
211 * so that possible synchronous cancellation (that always happens
212 * e.g., for liblustre) sees that lock is released.
214 ols->ols_state = OLS_RELEASED;
215 return osc_lock_unhold(ols);
218 static void osc_lock_fini(const struct lu_env *env,
219 struct cl_lock_slice *slice)
221 struct osc_lock *ols = cl2osc_lock(slice);
223 LINVRNT(osc_lock_invariant(ols));
225 * ->ols_hold can still be true at this point if, for example, a
226 * thread that requested a lock was killed (and released a reference
227 * to the lock), before reply from a server was received. In this case
228 * lock is destroyed immediately after upcall.
230 osc_lock_unhold(ols);
231 LASSERT(ols->ols_lock == NULL);
232 LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
233 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
235 OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
238 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
239 struct ldlm_res_id *resname)
241 const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
244 * In the perfect world of the future, where ost servers talk
247 fid_build_reg_res_name(fid, resname);
250 * In reality, where ost server expects ->lsm_object_id and
251 * ->lsm_object_seq in rename.
253 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
258 static void osc_lock_build_policy(const struct lu_env *env,
259 const struct cl_lock *lock,
260 ldlm_policy_data_t *policy)
262 const struct cl_lock_descr *d = &lock->cll_descr;
264 osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
265 policy->l_extent.gid = d->cld_gid;
268 static int osc_enq2ldlm_flags(__u32 enqflags)
272 LASSERT((enqflags & ~CEF_MASK) == 0);
274 if (enqflags & CEF_NONBLOCK)
275 result |= LDLM_FL_BLOCK_NOWAIT;
276 if (enqflags & CEF_ASYNC)
277 result |= LDLM_FL_HAS_INTENT;
278 if (enqflags & CEF_DISCARD_DATA)
279 result |= LDLM_AST_DISCARD_DATA;
284 * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
285 * pointers. Initialized in osc_init().
287 cfs_spinlock_t osc_ast_guard;
289 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
291 struct osc_lock *olck;
293 lock_res_and_lock(dlm_lock);
294 cfs_spin_lock(&osc_ast_guard);
295 olck = dlm_lock->l_ast_data;
297 struct cl_lock *lock = olck->ols_cl.cls_lock;
299 * If osc_lock holds a reference on ldlm lock, return it even
300 * when cl_lock is in CLS_FREEING state. This way
302 * osc_ast_data_get(dlmlock) == NULL
304 * guarantees that all osc references on dlmlock were
305 * released. osc_dlm_blocking_ast0() relies on that.
307 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
308 cl_lock_get_trust(lock);
309 lu_ref_add_atomic(&lock->cll_reference,
310 "ast", cfs_current());
314 cfs_spin_unlock(&osc_ast_guard);
315 unlock_res_and_lock(dlm_lock);
319 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
321 struct cl_lock *lock;
323 lock = olck->ols_cl.cls_lock;
324 lu_ref_del(&lock->cll_reference, "ast", cfs_current());
325 cl_lock_put(env, lock);
329 * Updates object attributes from a lock value block (lvb) received together
330 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
333 * This can be optimized to not update attributes when lock is a result of a
336 * Called under lock and resource spin-locks.
338 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
342 struct cl_object *obj;
343 struct lov_oinfo *oinfo;
344 struct cl_attr *attr;
349 if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
354 lvb = &olck->ols_lvb;
355 obj = olck->ols_cl.cls_obj;
356 oinfo = cl2osc(obj)->oo_oinfo;
357 attr = &osc_env_info(env)->oti_attr;
358 valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
359 cl_lvb2attr(attr, lvb);
361 cl_object_attr_lock(obj);
363 struct ldlm_lock *dlmlock;
366 dlmlock = olck->ols_lock;
367 LASSERT(dlmlock != NULL);
369 /* re-grab LVB from a dlm lock under DLM spin-locks. */
370 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
371 size = lvb->lvb_size;
372 /* Extend KMS up to the end of this lock and no further
373 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
374 if (size > dlmlock->l_policy_data.l_extent.end)
375 size = dlmlock->l_policy_data.l_extent.end + 1;
376 if (size >= oinfo->loi_kms) {
377 LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
378 ", kms="LPU64, lvb->lvb_size, size);
380 attr->cat_kms = size;
382 LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
383 LPU64"; leaving kms="LPU64", end="LPU64,
384 lvb->lvb_size, oinfo->loi_kms,
385 dlmlock->l_policy_data.l_extent.end);
387 ldlm_lock_allow_match_locked(dlmlock);
388 } else if (rc == -ENAVAIL && olck->ols_glimpse) {
389 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
390 " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
395 cl_object_attr_set(env, obj, attr, valid);
397 cl_object_attr_unlock(obj);
403 * Called when a lock is granted, from an upcall (when server returned a
404 * granted lock), or from completion AST, when server returned a blocked lock.
406 * Called under lock and resource spin-locks, that are released temporarily
409 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
410 struct ldlm_lock *dlmlock, int rc)
412 struct ldlm_extent *ext;
413 struct cl_lock *lock;
414 struct cl_lock_descr *descr;
416 LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
419 if (olck->ols_state < OLS_GRANTED) {
420 lock = olck->ols_cl.cls_lock;
421 ext = &dlmlock->l_policy_data.l_extent;
422 descr = &osc_env_info(env)->oti_descr;
423 descr->cld_obj = lock->cll_descr.cld_obj;
425 /* XXX check that ->l_granted_mode is valid. */
426 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
427 descr->cld_start = cl_index(descr->cld_obj, ext->start);
428 descr->cld_end = cl_index(descr->cld_obj, ext->end);
429 descr->cld_gid = ext->gid;
431 * tell upper layers the extent of the lock that was actually
434 olck->ols_state = OLS_GRANTED;
435 osc_lock_lvb_update(env, olck, rc);
437 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
438 * to take a semaphore on a parent lock. This is safe, because
439 * spin-locks are needed to protect consistency of
440 * dlmlock->l_*_mode and LVB, and we have finished processing
442 unlock_res_and_lock(dlmlock);
443 cl_lock_modify(env, lock, descr);
444 cl_lock_signal(env, lock);
445 LINVRNT(osc_lock_invariant(olck));
446 lock_res_and_lock(dlmlock);
451 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
454 struct ldlm_lock *dlmlock;
458 dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
459 LASSERT(dlmlock != NULL);
461 lock_res_and_lock(dlmlock);
462 cfs_spin_lock(&osc_ast_guard);
463 LASSERT(dlmlock->l_ast_data == olck);
464 LASSERT(olck->ols_lock == NULL);
465 olck->ols_lock = dlmlock;
466 cfs_spin_unlock(&osc_ast_guard);
469 * Lock might be not yet granted. In this case, completion ast
470 * (osc_ldlm_completion_ast()) comes later and finishes lock
473 if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
474 osc_lock_granted(env, olck, dlmlock, 0);
475 unlock_res_and_lock(dlmlock);
478 * osc_enqueue_interpret() decrefs asynchronous locks, counter
481 ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
484 /* lock reference taken by ldlm_handle2lock_long() is owned by
485 * osc_lock and released in osc_lock_detach() */
486 lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
487 olck->ols_has_ref = 1;
491 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
492 * received from a server, or after osc_enqueue_base() matched a local DLM
495 static int osc_lock_upcall(void *cookie, int errcode)
497 struct osc_lock *olck = cookie;
498 struct cl_lock_slice *slice = &olck->ols_cl;
499 struct cl_lock *lock = slice->cls_lock;
501 struct cl_env_nest nest;
504 env = cl_env_nested_get(&nest);
508 cl_lock_mutex_get(env, lock);
510 LASSERT(lock->cll_state >= CLS_QUEUING);
511 if (olck->ols_state == OLS_ENQUEUED) {
512 olck->ols_state = OLS_UPCALL_RECEIVED;
513 rc = ldlm_error2errno(errcode);
514 } else if (olck->ols_state == OLS_CANCELLED) {
517 CERROR("Impossible state: %d\n", olck->ols_state);
521 struct ldlm_lock *dlmlock;
523 dlmlock = ldlm_handle2lock(&olck->ols_handle);
524 if (dlmlock != NULL) {
525 lock_res_and_lock(dlmlock);
526 cfs_spin_lock(&osc_ast_guard);
527 LASSERT(olck->ols_lock == NULL);
528 dlmlock->l_ast_data = NULL;
529 olck->ols_handle.cookie = 0ULL;
530 cfs_spin_unlock(&osc_ast_guard);
531 unlock_res_and_lock(dlmlock);
532 LDLM_LOCK_PUT(dlmlock);
535 if (olck->ols_glimpse)
536 olck->ols_glimpse = 0;
537 osc_lock_upcall0(env, olck);
540 /* Error handling, some errors are tolerable. */
541 if (olck->ols_locklessable && rc == -EUSERS) {
542 /* This is a tolerable error, turn this lock into
545 osc_object_set_contended(cl2osc(slice->cls_obj));
546 LASSERT(slice->cls_ops == &osc_lock_ops);
548 /* Change this lock to ldlmlock-less lock. */
549 osc_lock_to_lockless(env, olck, 1);
550 olck->ols_state = OLS_GRANTED;
552 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
553 osc_lock_lvb_update(env, olck, rc);
554 cl_lock_delete(env, lock);
555 /* Hide the error. */
560 /* on error, lock was signaled by cl_lock_error() */
561 cl_lock_signal(env, lock);
563 cl_lock_error(env, lock, rc);
565 cl_lock_mutex_put(env, lock);
567 /* release cookie reference, acquired by osc_lock_enqueue() */
568 lu_ref_del(&lock->cll_reference, "upcall", lock);
569 cl_lock_put(env, lock);
570 cl_env_nested_put(&nest, env);
572 /* should never happen, similar to osc_ldlm_blocking_ast(). */
578 * Core of osc_dlm_blocking_ast() logic.
580 static void osc_lock_blocking(const struct lu_env *env,
581 struct ldlm_lock *dlmlock,
582 struct osc_lock *olck, int blocking)
584 struct cl_lock *lock = olck->ols_cl.cls_lock;
586 LASSERT(olck->ols_lock == dlmlock);
587 CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
588 LASSERT(!osc_lock_is_lockless(olck));
591 * Lock might be still addref-ed here, if e.g., blocking ast
592 * is sent for a failed lock.
594 osc_lock_unhold(olck);
596 if (blocking && olck->ols_state < OLS_BLOCKED)
598 * Move osc_lock into OLS_BLOCKED before canceling the lock,
599 * because it recursively re-enters osc_lock_blocking(), with
600 * the state set to OLS_CANCELLED.
602 olck->ols_state = OLS_BLOCKED;
604 * cancel and destroy lock at least once no matter how blocking ast is
605 * entered (see comment above osc_ldlm_blocking_ast() for use
606 * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
608 cl_lock_cancel(env, lock);
609 cl_lock_delete(env, lock);
613 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
614 * and ldlm_lock caches.
616 static int osc_dlm_blocking_ast0(const struct lu_env *env,
617 struct ldlm_lock *dlmlock,
618 void *data, int flag)
620 struct osc_lock *olck;
621 struct cl_lock *lock;
625 LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
628 olck = osc_ast_data_get(dlmlock);
630 lock = olck->ols_cl.cls_lock;
631 cl_lock_mutex_get(env, lock);
632 LINVRNT(osc_lock_invariant(olck));
633 if (olck->ols_ast_wait) {
634 /* wake up osc_lock_use() */
635 cl_lock_signal(env, lock);
636 olck->ols_ast_wait = 0;
639 * Lock might have been canceled while this thread was
640 * sleeping for lock mutex, but olck is pinned in memory.
642 if (olck == dlmlock->l_ast_data) {
644 * NOTE: DLM sends blocking AST's for failed locks
645 * (that are still in pre-OLS_GRANTED state)
646 * too, and they have to be canceled otherwise
647 * DLM lock is never destroyed and stuck in
650 * Alternatively, ldlm_cli_cancel() can be
651 * called here directly for osc_locks with
652 * ols_state < OLS_GRANTED to maintain an
653 * invariant that ->clo_cancel() is only called
654 * for locks that were granted.
656 LASSERT(data == olck);
657 osc_lock_blocking(env, dlmlock,
658 olck, flag == LDLM_CB_BLOCKING);
661 cl_lock_mutex_put(env, lock);
662 osc_ast_data_put(env, olck);
665 * DLM lock exists, but there is no cl_lock attached to it.
666 * This is a `normal' race. cl_object and its cl_lock's can be
667 * removed by memory pressure, together with all pages.
669 cancel = (flag == LDLM_CB_BLOCKING);
672 struct lustre_handle *lockh;
674 lockh = &osc_env_info(env)->oti_handle;
675 ldlm_lock2handle(dlmlock, lockh);
676 result = ldlm_cli_cancel(lockh);
683 * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
684 * some other lock, or is canceled. This function is installed as a
685 * ldlm_lock::l_blocking_ast() for client extent locks.
687 * Control flow is tricky, because ldlm uses the same call-back
688 * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
690 * \param dlmlock lock for which ast occurred.
692 * \param new description of a conflicting lock in case of blocking ast.
694 * \param data value of dlmlock->l_ast_data
696 * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
697 * cancellation and blocking ast's.
699 * Possible use cases:
701 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
702 * lock due to lock lru pressure, or explicit user request to purge
705 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
706 * us that dlmlock conflicts with another lock that some client is
707 * enqueing. Lock is canceled.
709 * - cl_lock_cancel() is called. osc_lock_cancel() calls
710 * ldlm_cli_cancel() that calls
712 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
714 * recursively entering osc_ldlm_blocking_ast().
716 * - client cancels lock voluntary (e.g., as a part of early cancellation):
719 * osc_lock_cancel()->
720 * ldlm_cli_cancel()->
721 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
724 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
725 struct ldlm_lock_desc *new, void *data,
729 struct cl_env_nest nest;
733 * This can be called in the context of outer IO, e.g.,
736 * ->osc_enqueue_base()->...
737 * ->ldlm_prep_elc_req()->...
738 * ->ldlm_cancel_callback()->...
739 * ->osc_ldlm_blocking_ast()
741 * new environment has to be created to not corrupt outer context.
743 env = cl_env_nested_get(&nest);
745 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
746 cl_env_nested_put(&nest, env);
748 result = PTR_ERR(env);
750 * XXX This should never happen, as cl_lock is
751 * stuck. Pre-allocated environment a la vvp_inode_fini_env
757 if (result == -ENODATA)
760 CERROR("BAST failed: %d\n", result);
765 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
766 int flags, void *data)
768 struct cl_env_nest nest;
770 struct osc_lock *olck;
771 struct cl_lock *lock;
775 /* first, do dlm part of the work */
776 dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
777 /* then, notify cl_lock */
778 env = cl_env_nested_get(&nest);
780 olck = osc_ast_data_get(dlmlock);
782 lock = olck->ols_cl.cls_lock;
783 cl_lock_mutex_get(env, lock);
785 * ldlm_handle_cp_callback() copied LVB from request
786 * to lock->l_lvb_data, store it in osc_lock.
788 LASSERT(dlmlock->l_lvb_data != NULL);
789 lock_res_and_lock(dlmlock);
790 olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
791 if (olck->ols_lock == NULL) {
793 * upcall (osc_lock_upcall()) hasn't yet been
794 * called. Do nothing now, upcall will bind
795 * olck to dlmlock and signal the waiters.
797 * This maintains an invariant that osc_lock
798 * and ldlm_lock are always bound when
799 * osc_lock is in OLS_GRANTED state.
801 } else if (dlmlock->l_granted_mode ==
802 dlmlock->l_req_mode) {
803 osc_lock_granted(env, olck, dlmlock, dlmrc);
805 unlock_res_and_lock(dlmlock);
808 CL_LOCK_DEBUG(D_ERROR, env, lock,
809 "dlmlock returned %d\n", dlmrc);
810 cl_lock_error(env, lock, dlmrc);
812 cl_lock_mutex_put(env, lock);
813 osc_ast_data_put(env, olck);
816 result = -ELDLM_NO_LOCK_DATA;
817 cl_env_nested_put(&nest, env);
819 result = PTR_ERR(env);
820 return dlmrc ?: result;
823 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
825 struct ptlrpc_request *req = data;
826 struct osc_lock *olck;
827 struct cl_lock *lock;
828 struct cl_object *obj;
829 struct cl_env_nest nest;
832 struct req_capsule *cap;
835 LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
837 env = cl_env_nested_get(&nest);
840 * osc_ast_data_get() has to go after environment is
841 * allocated, because osc_ast_data() acquires a
842 * reference to a lock, and it can only be released in
845 olck = osc_ast_data_get(dlmlock);
847 lock = olck->ols_cl.cls_lock;
848 cl_lock_mutex_get(env, lock);
850 req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
851 req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
853 result = req_capsule_server_pack(cap);
855 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
856 obj = lock->cll_descr.cld_obj;
857 result = cl_object_glimpse(env, obj, lvb);
859 cl_lock_mutex_put(env, lock);
860 osc_ast_data_put(env, olck);
863 * These errors are normal races, so we don't want to
864 * fill the console with messages by calling
867 lustre_pack_reply(req, 1, NULL, NULL);
868 result = -ELDLM_NO_LOCK_DATA;
870 cl_env_nested_put(&nest, env);
872 result = PTR_ERR(env);
873 req->rq_status = result;
877 static unsigned long osc_lock_weigh(const struct lu_env *env,
878 const struct cl_lock_slice *slice)
881 * don't need to grab coh_page_guard since we don't care the exact #
884 return cl_object_header(slice->cls_obj)->coh_pages;
888 * Get the weight of dlm lock for early cancellation.
890 * XXX: it should return the pages covered by this \a dlmlock.
892 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
894 struct cl_env_nest nest;
896 struct osc_lock *lock;
898 unsigned long weight;
903 * osc_ldlm_weigh_ast has a complex context since it might be called
904 * because of lock canceling, or from user's input. We have to make
905 * a new environment for it. Probably it is implementation safe to use
906 * the upper context because cl_lock_put don't modify environment
907 * variables. But in case of ..
909 env = cl_env_nested_get(&nest);
911 /* Mostly because lack of memory, tend to eliminate this lock*/
914 LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
915 lock = osc_ast_data_get(dlmlock);
917 /* cl_lock was destroyed because of memory pressure.
918 * It is much reasonable to assign this type of lock
921 GOTO(out, weight = 0);
924 cll = lock->ols_cl.cls_lock;
925 cl_lock_mutex_get(env, cll);
926 weight = cl_lock_weigh(env, cll);
927 cl_lock_mutex_put(env, cll);
928 osc_ast_data_put(env, lock);
932 cl_env_nested_put(&nest, env);
936 static void osc_lock_build_einfo(const struct lu_env *env,
937 const struct cl_lock *clock,
938 struct osc_lock *lock,
939 struct ldlm_enqueue_info *einfo)
941 enum cl_lock_mode mode;
943 mode = clock->cll_descr.cld_mode;
944 if (mode == CLM_PHANTOM)
946 * For now, enqueue all glimpse locks in read mode. In the
947 * future, client might choose to enqueue LCK_PW lock for
948 * glimpse on a file opened for write.
952 einfo->ei_type = LDLM_EXTENT;
953 einfo->ei_mode = osc_cl_lock2ldlm(mode);
954 einfo->ei_cb_bl = osc_ldlm_blocking_ast;
955 einfo->ei_cb_cp = osc_ldlm_completion_ast;
956 einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
957 einfo->ei_cb_wg = osc_ldlm_weigh_ast;
958 einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
962 * Determine if the lock should be converted into a lockless lock.
965 * - if the lock has an explicite requirment for a non-lockless lock;
966 * - if the io lock request type ci_lockreq;
967 * - send the enqueue rpc to ost to make the further decision;
968 * - special treat to truncate lockless lock
970 * Additional policy can be implemented here, e.g., never do lockless-io
973 static void osc_lock_to_lockless(const struct lu_env *env,
974 struct osc_lock *ols, int force)
976 struct cl_lock_slice *slice = &ols->ols_cl;
977 struct cl_lock *lock = slice->cls_lock;
979 LASSERT(ols->ols_state == OLS_NEW ||
980 ols->ols_state == OLS_UPCALL_RECEIVED);
983 ols->ols_locklessable = 1;
984 LASSERT(cl_lock_is_mutexed(lock));
985 slice->cls_ops = &osc_lock_lockless_ops;
987 struct osc_io *oio = osc_env_io(env);
988 struct cl_io *io = oio->oi_cl.cis_io;
989 struct cl_object *obj = slice->cls_obj;
990 struct osc_object *oob = cl2osc(obj);
991 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
992 struct obd_connect_data *ocd;
994 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
995 io->ci_lockreq == CILR_MAYBE ||
996 io->ci_lockreq == CILR_NEVER);
998 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
999 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
1000 (io->ci_lockreq == CILR_MAYBE) &&
1001 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1002 if (io->ci_lockreq == CILR_NEVER ||
1004 (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1005 /* lockless truncate */
1006 (cl_io_is_trunc(io) &&
1007 (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1008 osd->od_lockless_truncate)) {
1009 ols->ols_locklessable = 1;
1010 slice->cls_ops = &osc_lock_lockless_ops;
1013 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1016 static int osc_lock_compatible(const struct osc_lock *qing,
1017 const struct osc_lock *qed)
1019 enum cl_lock_mode qing_mode;
1020 enum cl_lock_mode qed_mode;
1022 qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1023 if (qed->ols_glimpse &&
1024 (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1027 qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1028 return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1032 * Cancel all conflicting locks and wait for them to be destroyed.
1034 * This function is used for two purposes:
1036 * - early cancel all conflicting locks before starting IO, and
1038 * - guarantee that pages added to the page cache by lockless IO are never
1039 * covered by locks other than lockless IO lock, and, hence, are not
1040 * visible to other threads.
1042 static int osc_lock_enqueue_wait(const struct lu_env *env,
1043 const struct osc_lock *olck)
1045 struct cl_lock *lock = olck->ols_cl.cls_lock;
1046 struct cl_lock_descr *descr = &lock->cll_descr;
1047 struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
1048 struct cl_lock *scan;
1049 struct cl_lock *conflict= NULL;
1050 int lockless = osc_lock_is_lockless(olck);
1054 LASSERT(cl_lock_is_mutexed(lock));
1055 LASSERT(lock->cll_state == CLS_QUEUING);
1057 /* make it enqueue anyway for glimpse lock, because we actually
1058 * don't need to cancel any conflicting locks. */
1059 if (olck->ols_glimpse)
1062 cfs_spin_lock(&hdr->coh_lock_guard);
1063 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1064 struct cl_lock_descr *cld = &scan->cll_descr;
1065 const struct osc_lock *scan_ols;
1070 if (scan->cll_state < CLS_QUEUING ||
1071 scan->cll_state == CLS_FREEING ||
1072 cld->cld_start > descr->cld_end ||
1073 cld->cld_end < descr->cld_start)
1076 /* overlapped and living locks. */
1078 /* We're not supposed to give up group lock. */
1079 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1080 LASSERT(descr->cld_mode != CLM_GROUP ||
1081 descr->cld_gid != scan->cll_descr.cld_gid);
1085 scan_ols = osc_lock_at(scan);
1087 /* We need to cancel the compatible locks if we're enqueuing
1088 * a lockless lock, for example:
1089 * imagine that client has PR lock on [0, 1000], and thread T0
1090 * is doing lockless IO in [500, 1500] region. Concurrent
1091 * thread T1 can see lockless data in [500, 1000], which is
1092 * wrong, because these data are possibly stale. */
1093 if (!lockless && osc_lock_compatible(olck, scan_ols))
1096 /* Now @scan is conflicting with @lock, this means current
1097 * thread have to sleep for @scan being destroyed. */
1098 if (scan_ols->ols_owner == osc_env_io(env)) {
1099 CERROR("DEADLOCK POSSIBLE!\n");
1100 CL_LOCK_DEBUG(D_ERROR, env, scan, "queued.\n");
1101 CL_LOCK_DEBUG(D_ERROR, env, lock, "queuing.\n");
1102 libcfs_debug_dumpstack(NULL);
1104 cl_lock_get_trust(scan);
1108 cfs_spin_unlock(&hdr->coh_lock_guard);
1111 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1112 /* we want a group lock but a previous lock request
1113 * conflicts, we do not wait but return 0 so the
1114 * request is send to the server
1116 CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1117 "with %p, no wait, send to server\n",
1119 cl_lock_put(env, conflict);
1122 CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1125 LASSERT(lock->cll_conflict == NULL);
1126 lu_ref_add(&conflict->cll_reference, "cancel-wait",
1128 lock->cll_conflict = conflict;
1136 * Implementation of cl_lock_operations::clo_enqueue() method for osc
1137 * layer. This initiates ldlm enqueue:
1139 * - cancels conflicting locks early (osc_lock_enqueue_wait());
1141 * - calls osc_enqueue_base() to do actual enqueue.
1143 * osc_enqueue_base() is supplied with an upcall function that is executed
1144 * when lock is received either after a local cached ldlm lock is matched, or
1145 * when a reply from the server is received.
1147 * This function does not wait for the network communication to complete.
1149 static int osc_lock_enqueue(const struct lu_env *env,
1150 const struct cl_lock_slice *slice,
1151 struct cl_io *unused, __u32 enqflags)
1153 struct osc_lock *ols = cl2osc_lock(slice);
1154 struct cl_lock *lock = ols->ols_cl.cls_lock;
1158 LASSERT(cl_lock_is_mutexed(lock));
1159 LASSERT(lock->cll_state == CLS_QUEUING);
1160 LASSERT(ols->ols_state == OLS_NEW);
1162 ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1163 if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1164 ols->ols_glimpse = 1;
1165 if (!osc_lock_is_lockless(ols) && !(enqflags & CEF_MUST))
1166 /* try to convert this lock to a lockless lock */
1167 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1169 result = osc_lock_enqueue_wait(env, ols);
1171 if (!osc_lock_is_lockless(ols)) {
1172 struct osc_object *obj = cl2osc(slice->cls_obj);
1173 struct osc_thread_info *info = osc_env_info(env);
1174 struct ldlm_res_id *resname = &info->oti_resname;
1175 ldlm_policy_data_t *policy = &info->oti_policy;
1176 struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1178 if (ols->ols_locklessable)
1179 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1181 /* a reference for lock, passed as an upcall cookie */
1183 lu_ref_add(&lock->cll_reference, "upcall", lock);
1184 ols->ols_state = OLS_ENQUEUED;
1187 * XXX: this is possible blocking point as
1188 * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1191 osc_lock_build_res(env, obj, resname);
1192 osc_lock_build_policy(env, lock, policy);
1193 result = osc_enqueue_base(osc_export(obj), resname,
1194 &ols->ols_flags, policy,
1196 obj->oo_oinfo->loi_kms_valid,
1198 ols, einfo, &ols->ols_handle,
1201 lu_ref_del(&lock->cll_reference,
1203 cl_lock_put(env, lock);
1206 ols->ols_state = OLS_GRANTED;
1207 ols->ols_owner = osc_env_io(env);
1210 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1214 static int osc_lock_wait(const struct lu_env *env,
1215 const struct cl_lock_slice *slice)
1217 struct osc_lock *olck = cl2osc_lock(slice);
1218 struct cl_lock *lock = olck->ols_cl.cls_lock;
1220 LINVRNT(osc_lock_invariant(olck));
1221 if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1224 LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1225 lock->cll_error == 0, olck->ols_lock != NULL));
1227 return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1231 * An implementation of cl_lock_operations::clo_use() method that pins cached
1234 static int osc_lock_use(const struct lu_env *env,
1235 const struct cl_lock_slice *slice)
1237 struct osc_lock *olck = cl2osc_lock(slice);
1240 LASSERT(!olck->ols_hold);
1243 * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1244 * flag is not set. This protects us from a concurrent blocking ast.
1246 rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1249 olck->ols_state = OLS_GRANTED;
1251 struct cl_lock *lock;
1254 * Lock is being cancelled somewhere within
1255 * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1256 * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1259 lock = slice->cls_lock;
1260 LASSERT(lock->cll_state == CLS_INTRANSIT);
1261 LASSERT(lock->cll_users > 0);
1262 /* set a flag for osc_dlm_blocking_ast0() to signal the
1264 olck->ols_ast_wait = 1;
1270 static int osc_lock_flush(struct osc_lock *ols, int discard)
1272 struct cl_lock *lock = ols->ols_cl.cls_lock;
1273 struct cl_env_nest nest;
1277 env = cl_env_nested_get(&nest);
1279 result = cl_lock_page_out(env, lock, discard);
1280 cl_env_nested_put(&nest, env);
1282 result = PTR_ERR(env);
1285 LINVRNT(!osc_lock_has_pages(ols));
1291 * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1292 * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1293 * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1294 * with some other lock some where in the cluster. This function does the
1297 * - invalidates all pages protected by this lock (after sending dirty
1298 * ones to the server, as necessary);
1300 * - decref's underlying ldlm lock;
1302 * - cancels ldlm lock (ldlm_cli_cancel()).
1304 static void osc_lock_cancel(const struct lu_env *env,
1305 const struct cl_lock_slice *slice)
1307 struct cl_lock *lock = slice->cls_lock;
1308 struct osc_lock *olck = cl2osc_lock(slice);
1309 struct ldlm_lock *dlmlock = olck->ols_lock;
1313 LASSERT(cl_lock_is_mutexed(lock));
1314 LINVRNT(osc_lock_invariant(olck));
1316 if (dlmlock != NULL) {
1319 discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
1320 result = osc_lock_flush(olck, discard);
1321 osc_lock_unhold(olck);
1323 lock_res_and_lock(dlmlock);
1324 /* Now that we're the only user of dlm read/write reference,
1325 * mostly the ->l_readers + ->l_writers should be zero.
1326 * However, there is a corner case.
1327 * See bug 18829 for details.*/
1328 do_cancel = (dlmlock->l_readers == 0 &&
1329 dlmlock->l_writers == 0);
1330 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1331 unlock_res_and_lock(dlmlock);
1333 result = ldlm_cli_cancel(&olck->ols_handle);
1335 CL_LOCK_DEBUG(D_ERROR, env, lock,
1336 "lock %p cancel failure with error(%d)\n",
1339 olck->ols_state = OLS_CANCELLED;
1340 osc_lock_detach(env, olck);
1343 void cl_lock_page_list_fixup(const struct lu_env *env,
1344 struct cl_io *io, struct cl_lock *lock,
1345 struct cl_page_list *queue);
1347 #ifdef INVARIANT_CHECK
1349 * Returns true iff there are pages under \a olck not protected by other
1352 static int osc_lock_has_pages(struct osc_lock *olck)
1354 struct cl_lock *lock;
1355 struct cl_lock_descr *descr;
1356 struct cl_object *obj;
1357 struct osc_object *oob;
1358 struct cl_page_list *plist;
1359 struct cl_page *page;
1360 struct cl_env_nest nest;
1365 env = cl_env_nested_get(&nest);
1367 obj = olck->ols_cl.cls_obj;
1369 io = &oob->oo_debug_io;
1370 lock = olck->ols_cl.cls_lock;
1371 descr = &lock->cll_descr;
1372 plist = &osc_env_info(env)->oti_plist;
1373 cl_page_list_init(plist);
1375 cfs_mutex_lock(&oob->oo_debug_mutex);
1377 io->ci_obj = cl_object_top(obj);
1378 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1379 cl_page_gang_lookup(env, obj, io,
1380 descr->cld_start, descr->cld_end, plist);
1381 cl_lock_page_list_fixup(env, io, lock, plist);
1382 if (plist->pl_nr > 0) {
1383 CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1384 cl_page_list_for_each(page, plist)
1385 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1387 result = plist->pl_nr > 0;
1388 cl_page_list_disown(env, io, plist);
1389 cl_page_list_fini(env, plist);
1390 cl_io_fini(env, io);
1391 cfs_mutex_unlock(&oob->oo_debug_mutex);
1392 cl_env_nested_put(&nest, env);
1398 static int osc_lock_has_pages(struct osc_lock *olck)
1402 #endif /* INVARIANT_CHECK */
1404 static void osc_lock_delete(const struct lu_env *env,
1405 const struct cl_lock_slice *slice)
1407 struct osc_lock *olck;
1409 olck = cl2osc_lock(slice);
1410 if (olck->ols_glimpse) {
1411 LASSERT(!olck->ols_hold);
1412 LASSERT(!olck->ols_lock);
1416 LINVRNT(osc_lock_invariant(olck));
1417 LINVRNT(!osc_lock_has_pages(olck));
1419 osc_lock_unhold(olck);
1420 osc_lock_detach(env, olck);
1424 * Implements cl_lock_operations::clo_state() method for osc layer.
1426 * Maintains osc_lock::ols_owner field.
1428 * This assumes that lock always enters CLS_HELD (from some other state) in
1429 * the same IO context as one that requested the lock. This should not be a
1430 * problem, because context is by definition shared by all activity pertaining
1431 * to the same high-level IO.
1433 static void osc_lock_state(const struct lu_env *env,
1434 const struct cl_lock_slice *slice,
1435 enum cl_lock_state state)
1437 struct osc_lock *lock = cl2osc_lock(slice);
1440 * XXX multiple io contexts can use the lock at the same time.
1442 LINVRNT(osc_lock_invariant(lock));
1443 if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1444 struct osc_io *oio = osc_env_io(env);
1446 LASSERT(lock->ols_owner == NULL);
1447 lock->ols_owner = oio;
1448 } else if (state != CLS_HELD)
1449 lock->ols_owner = NULL;
1452 static int osc_lock_print(const struct lu_env *env, void *cookie,
1453 lu_printer_t p, const struct cl_lock_slice *slice)
1455 struct osc_lock *lock = cl2osc_lock(slice);
1458 * XXX print ldlm lock and einfo properly.
1460 (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
1461 lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1462 lock->ols_state, lock->ols_owner);
1463 osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1467 static int osc_lock_fits_into(const struct lu_env *env,
1468 const struct cl_lock_slice *slice,
1469 const struct cl_lock_descr *need,
1470 const struct cl_io *io)
1472 struct osc_lock *ols = cl2osc_lock(slice);
1474 if (need->cld_enq_flags & CEF_NEVER)
1477 if (need->cld_mode == CLM_PHANTOM) {
1479 * Note: the QUEUED lock can't be matched here, otherwise
1480 * it might cause the deadlocks.
1482 * P1: enqueued read lock, create sublock1
1483 * P2: enqueued write lock, create sublock2(conflicted
1485 * P1: Grant read lock.
1486 * P1: enqueued glimpse lock(with holding sublock1_read),
1487 * matched with sublock2, waiting sublock2 to be granted.
1488 * But sublock2 can not be granted, because P1
1489 * will not release sublock1. Bang!
1491 if (ols->ols_state < OLS_GRANTED ||
1492 ols->ols_state > OLS_RELEASED)
1494 } else if (need->cld_enq_flags & CEF_MUST) {
1496 * If the lock hasn't ever enqueued, it can't be matched
1497 * because enqueue process brings in many information
1498 * which can be used to determine things such as lockless,
1501 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1502 ols->ols_locklessable)
1508 static const struct cl_lock_operations osc_lock_ops = {
1509 .clo_fini = osc_lock_fini,
1510 .clo_enqueue = osc_lock_enqueue,
1511 .clo_wait = osc_lock_wait,
1512 .clo_unuse = osc_lock_unuse,
1513 .clo_use = osc_lock_use,
1514 .clo_delete = osc_lock_delete,
1515 .clo_state = osc_lock_state,
1516 .clo_cancel = osc_lock_cancel,
1517 .clo_weigh = osc_lock_weigh,
1518 .clo_print = osc_lock_print,
1519 .clo_fits_into = osc_lock_fits_into,
1522 static int osc_lock_lockless_unuse(const struct lu_env *env,
1523 const struct cl_lock_slice *slice)
1525 struct osc_lock *ols = cl2osc_lock(slice);
1526 struct cl_lock *lock = slice->cls_lock;
1528 LASSERT(ols->ols_state == OLS_GRANTED);
1529 LINVRNT(osc_lock_invariant(ols));
1531 cl_lock_cancel(env, lock);
1532 cl_lock_delete(env, lock);
1536 static void osc_lock_lockless_cancel(const struct lu_env *env,
1537 const struct cl_lock_slice *slice)
1539 struct osc_lock *ols = cl2osc_lock(slice);
1542 result = osc_lock_flush(ols, 0);
1544 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1546 ols->ols_state = OLS_CANCELLED;
1549 static int osc_lock_lockless_wait(const struct lu_env *env,
1550 const struct cl_lock_slice *slice)
1552 struct osc_lock *olck = cl2osc_lock(slice);
1553 struct cl_lock *lock = olck->ols_cl.cls_lock;
1555 LINVRNT(osc_lock_invariant(olck));
1556 LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1558 return lock->cll_error;
1561 static void osc_lock_lockless_state(const struct lu_env *env,
1562 const struct cl_lock_slice *slice,
1563 enum cl_lock_state state)
1565 struct osc_lock *lock = cl2osc_lock(slice);
1567 LINVRNT(osc_lock_invariant(lock));
1568 if (state == CLS_HELD) {
1569 struct osc_io *oio = osc_env_io(env);
1571 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1572 lock->ols_owner = oio;
1574 /* set the io to be lockless if this lock is for io's
1576 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1577 oio->oi_lockless = 1;
1581 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1582 const struct cl_lock_slice *slice,
1583 const struct cl_lock_descr *need,
1584 const struct cl_io *io)
1586 struct osc_lock *lock = cl2osc_lock(slice);
1588 if (!(need->cld_enq_flags & CEF_NEVER))
1591 /* lockless lock should only be used by its owning io. b22147 */
1592 return (lock->ols_owner == osc_env_io(env));
1595 static const struct cl_lock_operations osc_lock_lockless_ops = {
1596 .clo_fini = osc_lock_fini,
1597 .clo_enqueue = osc_lock_enqueue,
1598 .clo_wait = osc_lock_lockless_wait,
1599 .clo_unuse = osc_lock_lockless_unuse,
1600 .clo_state = osc_lock_lockless_state,
1601 .clo_fits_into = osc_lock_lockless_fits_into,
1602 .clo_cancel = osc_lock_lockless_cancel,
1603 .clo_print = osc_lock_print
1606 int osc_lock_init(const struct lu_env *env,
1607 struct cl_object *obj, struct cl_lock *lock,
1608 const struct cl_io *unused)
1610 struct osc_lock *clk;
1613 OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1615 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1616 cfs_atomic_set(&clk->ols_pageref, 0);
1617 clk->ols_state = OLS_NEW;
1618 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1625 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1627 struct osc_lock *olock;
1630 cfs_spin_lock(&osc_ast_guard);
1631 olock = dlm->l_ast_data;
1633 * there's a very rare race with osc_page_addref_lock(), but that
1634 * doesn't matter because in the worst case we don't cancel a lock
1635 * which we actually can, that's no harm.
1637 if (olock != NULL &&
1638 cfs_atomic_add_return(_PAGEREF_MAGIC,
1639 &olock->ols_pageref) != _PAGEREF_MAGIC) {
1640 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1643 cfs_spin_unlock(&osc_ast_guard);