4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_lock for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
42 #define DEBUG_SUBSYSTEM S_OSC
44 #include <libcfs/libcfs.h>
45 /* fid_build_reg_res_name() */
46 #include <lustre_fid.h>
48 #include "osc_cl_internal.h"
54 /*****************************************************************************
60 static const struct cl_lock_operations osc_lock_ops;
61 static const struct cl_lock_operations osc_lock_lockless_ops;
62 static void osc_lock_to_lockless(const struct lu_env *env,
63 struct osc_lock *ols, int force);
65 int osc_lock_is_lockless(const struct osc_lock *olck)
67 return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
71 * Returns a weak pointer to the ldlm lock identified by a handle. Returned
72 * pointer cannot be dereferenced, as lock is not protected from concurrent
73 * reclaim. This function is a helper for osc_lock_invariant().
75 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
77 struct ldlm_lock *lock;
79 lock = ldlm_handle2lock(handle);
86 * Invariant that has to be true all of the time.
88 static int osc_lock_invariant(struct osc_lock *ols)
90 struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
91 struct ldlm_lock *olock = ols->ols_dlmlock;
92 int handle_used = lustre_handle_is_used(&ols->ols_handle);
94 if (ergo(osc_lock_is_lockless(ols),
95 ols->ols_locklessable && ols->ols_dlmlock == NULL))
99 * If all the following "ergo"s are true, return 1, otherwise 0
101 if (! ergo(olock != NULL, handle_used))
104 if (! ergo(olock != NULL,
105 olock->l_handle.h_cookie == ols->ols_handle.cookie))
108 if (! ergo(handle_used,
109 ergo(lock != NULL && olock != NULL, lock == olock) &&
110 ergo(lock == NULL, olock == NULL)))
113 * Check that ->ols_handle and ->ols_dlmlock are consistent, but
114 * take into account that they are set at the different time.
116 if (! ergo(ols->ols_state == OLS_CANCELLED,
117 olock == NULL && !handle_used))
120 * DLM lock is destroyed only after we have seen cancellation
123 if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
124 !ldlm_is_destroyed(olock)))
127 if (! ergo(ols->ols_state == OLS_GRANTED,
129 olock->l_req_mode == olock->l_granted_mode &&
135 /*****************************************************************************
141 static void osc_lock_fini(const struct lu_env *env,
142 struct cl_lock_slice *slice)
144 struct osc_lock *ols = cl2osc_lock(slice);
146 LINVRNT(osc_lock_invariant(ols));
147 LASSERT(ols->ols_dlmlock == NULL);
149 OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
152 static void osc_lock_build_policy(const struct lu_env *env,
153 const struct cl_lock *lock,
154 ldlm_policy_data_t *policy)
156 const struct cl_lock_descr *d = &lock->cll_descr;
158 osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
159 policy->l_extent.gid = d->cld_gid;
162 static __u64 osc_enq2ldlm_flags(__u32 enqflags)
166 LASSERT((enqflags & ~CEF_MASK) == 0);
168 if (enqflags & CEF_NONBLOCK)
169 result |= LDLM_FL_BLOCK_NOWAIT;
170 if (enqflags & CEF_ASYNC)
171 result |= LDLM_FL_HAS_INTENT;
172 if (enqflags & CEF_DISCARD_DATA)
173 result |= LDLM_FL_AST_DISCARD_DATA;
174 if (enqflags & CEF_PEEK)
175 result |= LDLM_FL_TEST_LOCK;
180 * Updates object attributes from a lock value block (lvb) received together
181 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
184 * This can be optimized to not update attributes when lock is a result of a
187 * Called under lock and resource spin-locks.
189 static void osc_lock_lvb_update(const struct lu_env *env,
190 struct osc_object *osc,
191 struct ldlm_lock *dlmlock,
194 struct cl_object *obj = osc2cl(osc);
195 struct lov_oinfo *oinfo = osc->oo_oinfo;
196 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
201 valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
203 LASSERT(dlmlock != NULL);
204 lvb = dlmlock->l_lvb_data;
206 cl_lvb2attr(attr, lvb);
208 cl_object_attr_lock(obj);
209 if (dlmlock != NULL) {
212 check_res_locked(dlmlock->l_resource);
214 LASSERT(lvb == dlmlock->l_lvb_data);
215 size = lvb->lvb_size;
217 /* Extend KMS up to the end of this lock and no further
218 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
219 if (size > dlmlock->l_policy_data.l_extent.end)
220 size = dlmlock->l_policy_data.l_extent.end + 1;
221 if (size >= oinfo->loi_kms) {
222 LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
223 ", kms="LPU64, lvb->lvb_size, size);
225 attr->cat_kms = size;
227 LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
228 LPU64"; leaving kms="LPU64", end="LPU64,
229 lvb->lvb_size, oinfo->loi_kms,
230 dlmlock->l_policy_data.l_extent.end);
232 ldlm_lock_allow_match_locked(dlmlock);
235 cl_object_attr_update(env, obj, attr, valid);
236 cl_object_attr_unlock(obj);
241 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
242 struct lustre_handle *lockh, bool lvb_update)
244 struct ldlm_lock *dlmlock;
246 dlmlock = ldlm_handle2lock_long(lockh, 0);
247 LASSERT(dlmlock != NULL);
249 /* lock reference taken by ldlm_handle2lock_long() is
250 * owned by osc_lock and released in osc_lock_detach()
252 lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
253 oscl->ols_has_ref = 1;
255 LASSERT(oscl->ols_dlmlock == NULL);
256 oscl->ols_dlmlock = dlmlock;
258 /* This may be a matched lock for glimpse request, do not hold
259 * lock reference in that case. */
260 if (!oscl->ols_glimpse) {
261 /* hold a refc for non glimpse lock which will
262 * be released in osc_lock_cancel() */
263 lustre_handle_copy(&oscl->ols_handle, lockh);
264 ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
268 /* Lock must have been granted. */
269 lock_res_and_lock(dlmlock);
270 if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
271 struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
272 struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
274 /* extend the lock extent, otherwise it will have problem when
275 * we decide whether to grant a lockless lock. */
276 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
277 descr->cld_start = cl_index(descr->cld_obj, ext->start);
278 descr->cld_end = cl_index(descr->cld_obj, ext->end);
279 descr->cld_gid = ext->gid;
281 /* no lvb update for matched lock */
283 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
284 osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
287 LINVRNT(osc_lock_invariant(oscl));
289 unlock_res_and_lock(dlmlock);
291 LASSERT(oscl->ols_state != OLS_GRANTED);
292 oscl->ols_state = OLS_GRANTED;
296 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
297 * received from a server, or after osc_enqueue_base() matched a local DLM
300 static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
303 struct osc_lock *oscl = cookie;
304 struct cl_lock_slice *slice = &oscl->ols_cl;
306 struct cl_env_nest nest;
311 env = cl_env_nested_get(&nest);
312 /* should never happen, similar to osc_ldlm_blocking_ast(). */
313 LASSERT(!IS_ERR(env));
315 rc = ldlm_error2errno(errcode);
316 if (oscl->ols_state == OLS_ENQUEUED) {
317 oscl->ols_state = OLS_UPCALL_RECEIVED;
318 } else if (oscl->ols_state == OLS_CANCELLED) {
321 CERROR("Impossible state: %d\n", oscl->ols_state);
326 osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);
328 /* Error handling, some errors are tolerable. */
329 if (oscl->ols_locklessable && rc == -EUSERS) {
330 /* This is a tolerable error, turn this lock into
333 osc_object_set_contended(cl2osc(slice->cls_obj));
334 LASSERT(slice->cls_ops == &osc_lock_ops);
336 /* Change this lock to ldlmlock-less lock. */
337 osc_lock_to_lockless(env, oscl, 1);
338 oscl->ols_state = OLS_GRANTED;
340 } else if (oscl->ols_glimpse && rc == -ENAVAIL) {
341 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
342 osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
343 NULL, &oscl->ols_lvb);
344 /* Hide the error. */
348 if (oscl->ols_owner != NULL)
349 cl_sync_io_note(env, oscl->ols_owner, rc);
350 cl_env_nested_put(&nest, env);
355 static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
358 struct osc_object *osc = cookie;
359 struct ldlm_lock *dlmlock;
361 struct cl_env_nest nest;
364 env = cl_env_nested_get(&nest);
365 LASSERT(!IS_ERR(env));
367 if (errcode == ELDLM_LOCK_MATCHED)
368 GOTO(out, errcode = ELDLM_OK);
370 if (errcode != ELDLM_OK)
373 dlmlock = ldlm_handle2lock(lockh);
374 LASSERT(dlmlock != NULL);
376 lock_res_and_lock(dlmlock);
377 LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
379 /* there is no osc_lock associated with AGL lock */
380 osc_lock_lvb_update(env, osc, dlmlock, NULL);
382 unlock_res_and_lock(dlmlock);
383 LDLM_LOCK_PUT(dlmlock);
386 cl_object_put(env, osc2cl(osc));
387 cl_env_nested_put(&nest, env);
388 RETURN(ldlm_error2errno(errcode));
391 static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
392 enum cl_lock_mode mode, int discard)
395 struct cl_env_nest nest;
401 env = cl_env_nested_get(&nest);
403 RETURN(PTR_ERR(env));
405 if (mode == CLM_WRITE) {
406 rc = osc_cache_writeback_range(env, obj, start, end, 1,
408 CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
410 discard ? "discarded" : "written back");
415 rc2 = osc_lock_discard_pages(env, obj, start, end, mode);
416 if (rc == 0 && rc2 < 0)
419 cl_env_nested_put(&nest, env);
424 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
425 * and ldlm_lock caches.
427 static int osc_dlm_blocking_ast0(const struct lu_env *env,
428 struct ldlm_lock *dlmlock,
429 void *data, int flag)
431 struct cl_object *obj = NULL;
434 enum cl_lock_mode mode = CLM_READ;
437 LASSERT(flag == LDLM_CB_CANCELING);
439 lock_res_and_lock(dlmlock);
440 if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
441 dlmlock->l_ast_data = NULL;
442 unlock_res_and_lock(dlmlock);
446 discard = ldlm_is_discard_data(dlmlock);
447 if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
450 if (dlmlock->l_ast_data != NULL) {
451 obj = osc2cl(dlmlock->l_ast_data);
452 dlmlock->l_ast_data = NULL;
457 unlock_res_and_lock(dlmlock);
459 /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
460 * the object has been destroyed. */
462 struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
463 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
466 /* Destroy pages covered by the extent of the DLM lock */
467 result = osc_lock_flush(cl2osc(obj),
468 cl_index(obj, extent->start),
469 cl_index(obj, extent->end),
472 /* losing a lock, update kms */
473 lock_res_and_lock(dlmlock);
474 cl_object_attr_lock(obj);
475 /* Must get the value under the lock to avoid race. */
476 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
477 /* Update the kms. Need to loop all granted locks.
478 * Not a problem for the client */
479 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
481 cl_object_attr_update(env, obj, attr, CAT_KMS);
482 cl_object_attr_unlock(obj);
483 unlock_res_and_lock(dlmlock);
485 cl_object_put(env, obj);
491 * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
492 * some other lock, or is canceled. This function is installed as a
493 * ldlm_lock::l_blocking_ast() for client extent locks.
495 * Control flow is tricky, because ldlm uses the same call-back
496 * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
498 * \param dlmlock lock for which ast occurred.
500 * \param new description of a conflicting lock in case of blocking ast.
502 * \param data value of dlmlock->l_ast_data
504 * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
505 * cancellation and blocking ast's.
507 * Possible use cases:
509 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
510 * lock due to lock lru pressure, or explicit user request to purge
513 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
514 * us that dlmlock conflicts with another lock that some client is
515 * enqueuing. Lock is canceled.
517 * - cl_lock_cancel() is called. osc_lock_cancel() calls
518 * ldlm_cli_cancel() that calls
520 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
522 * recursively entering osc_ldlm_blocking_ast().
524 * - client cancels lock voluntary (e.g., as a part of early cancellation):
527 * osc_lock_cancel()->
528 * ldlm_cli_cancel()->
529 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
532 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
533 struct ldlm_lock_desc *new, void *data,
540 case LDLM_CB_BLOCKING: {
541 struct lustre_handle lockh;
543 ldlm_lock2handle(dlmlock, &lockh);
544 result = ldlm_cli_cancel(&lockh, LCF_ASYNC);
545 if (result == -ENODATA)
549 case LDLM_CB_CANCELING: {
551 struct cl_env_nest nest;
554 * This can be called in the context of outer IO, e.g.,
556 * osc_enqueue_base()->...
557 * ->ldlm_prep_elc_req()->...
558 * ->ldlm_cancel_callback()->...
559 * ->osc_ldlm_blocking_ast()
561 * new environment has to be created to not corrupt outer
564 env = cl_env_nested_get(&nest);
566 result = PTR_ERR(env);
570 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
571 cl_env_nested_put(&nest, env);
580 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
582 struct ptlrpc_request *req = data;
583 struct cl_env_nest nest;
586 struct req_capsule *cap;
591 LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
593 env = cl_env_nested_get(&nest);
595 struct cl_object *obj = NULL;
597 lock_res_and_lock(dlmlock);
598 if (dlmlock->l_ast_data != NULL) {
599 obj = osc2cl(dlmlock->l_ast_data);
602 unlock_res_and_lock(dlmlock);
605 /* Do not grab the mutex of cl_lock for glimpse.
606 * See LU-1274 for details.
607 * BTW, it's okay for cl_lock to be cancelled during
608 * this period because server can handle this race.
609 * See ldlm_server_glimpse_ast() for details.
610 * cl_lock_mutex_get(env, lock); */
612 req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
613 req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
615 result = req_capsule_server_pack(cap);
617 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
618 result = cl_object_glimpse(env, obj, lvb);
620 if (!exp_connect_lvb_type(req->rq_export))
621 req_capsule_shrink(&req->rq_pill,
623 sizeof(struct ost_lvb_v1),
625 cl_object_put(env, obj);
628 * These errors are normal races, so we don't want to
629 * fill the console with messages by calling
632 lustre_pack_reply(req, 1, NULL, NULL);
633 result = -ELDLM_NO_LOCK_DATA;
635 cl_env_nested_put(&nest, env);
637 result = PTR_ERR(env);
638 req->rq_status = result;
642 static int weigh_cb(const struct lu_env *env, struct cl_io *io,
643 struct osc_page *ops, void *cbdata)
645 struct cl_page *page = ops->ops_cl.cpl_page;
647 if (cl_page_is_vmlocked(env, page)
648 || PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
650 return CLP_GANG_ABORT;
652 *(pgoff_t *)cbdata = osc_index(ops) + 1;
653 return CLP_GANG_OKAY;
656 static unsigned long osc_lock_weight(const struct lu_env *env,
657 struct osc_object *oscobj,
658 struct ldlm_extent *extent)
660 struct cl_io *io = &osc_env_info(env)->oti_io;
661 struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
667 io->ci_ignore_layout = 1;
668 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
672 page_index = cl_index(obj, extent->start);
674 result = osc_page_gang_lookup(env, io, oscobj,
676 cl_index(obj, extent->end),
677 weigh_cb, (void *)&page_index);
678 if (result == CLP_GANG_ABORT)
680 if (result == CLP_GANG_RESCHED)
682 } while (result != CLP_GANG_OKAY);
685 return result == CLP_GANG_ABORT ? 1 : 0;
689 * Get the weight of dlm lock for early cancellation.
691 unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
693 struct cl_env_nest nest;
695 struct osc_object *obj;
696 struct osc_lock *oscl;
697 unsigned long weight;
703 * osc_ldlm_weigh_ast has a complex context since it might be called
704 * because of lock canceling, or from user's input. We have to make
705 * a new environment for it. Probably it is implementation safe to use
706 * the upper context because cl_lock_put don't modify environment
707 * variables. But just in case ..
709 env = cl_env_nested_get(&nest);
711 /* Mostly because lack of memory, do not eliminate this lock */
714 LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
715 obj = dlmlock->l_ast_data;
717 GOTO(out, weight = 1);
719 spin_lock(&obj->oo_ol_spin);
720 list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) {
721 if (oscl->ols_dlmlock != NULL && oscl->ols_dlmlock != dlmlock)
725 spin_unlock(&obj->oo_ol_spin);
728 * If the lock is being used by an IO, definitely not cancel it.
730 GOTO(out, weight = 1);
733 weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
737 cl_env_nested_put(&nest, env);
741 static void osc_lock_build_einfo(const struct lu_env *env,
742 const struct cl_lock *lock,
743 struct osc_object *osc,
744 struct ldlm_enqueue_info *einfo)
746 einfo->ei_type = LDLM_EXTENT;
747 einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
748 einfo->ei_cb_bl = osc_ldlm_blocking_ast;
749 einfo->ei_cb_cp = ldlm_completion_ast;
750 einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
751 einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
755 * Determine if the lock should be converted into a lockless lock.
758 * - if the lock has an explicite requirment for a non-lockless lock;
759 * - if the io lock request type ci_lockreq;
760 * - send the enqueue rpc to ost to make the further decision;
761 * - special treat to truncate lockless lock
763 * Additional policy can be implemented here, e.g., never do lockless-io
766 static void osc_lock_to_lockless(const struct lu_env *env,
767 struct osc_lock *ols, int force)
769 struct cl_lock_slice *slice = &ols->ols_cl;
771 LASSERT(ols->ols_state == OLS_NEW ||
772 ols->ols_state == OLS_UPCALL_RECEIVED);
775 ols->ols_locklessable = 1;
776 slice->cls_ops = &osc_lock_lockless_ops;
778 struct osc_io *oio = osc_env_io(env);
779 struct cl_io *io = oio->oi_cl.cis_io;
780 struct cl_object *obj = slice->cls_obj;
781 struct osc_object *oob = cl2osc(obj);
782 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
783 struct obd_connect_data *ocd;
785 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
786 io->ci_lockreq == CILR_MAYBE ||
787 io->ci_lockreq == CILR_NEVER);
789 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
790 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
791 (io->ci_lockreq == CILR_MAYBE) &&
792 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
793 if (io->ci_lockreq == CILR_NEVER ||
795 (ols->ols_locklessable && osc_object_is_contended(oob)) ||
796 /* lockless truncate */
797 (cl_io_is_trunc(io) &&
798 (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
799 osd->od_lockless_truncate)) {
800 ols->ols_locklessable = 1;
801 slice->cls_ops = &osc_lock_lockless_ops;
804 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
807 static bool osc_lock_compatible(const struct osc_lock *qing,
808 const struct osc_lock *qed)
810 struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr;
811 struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr;
813 if (qed->ols_glimpse)
816 if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ)
819 if (qed->ols_state < OLS_GRANTED)
822 if (qed_descr->cld_mode >= qing_descr->cld_mode &&
823 qed_descr->cld_start <= qing_descr->cld_start &&
824 qed_descr->cld_end >= qing_descr->cld_end)
830 static void osc_lock_wake_waiters(const struct lu_env *env,
831 struct osc_object *osc,
832 struct osc_lock *oscl)
834 spin_lock(&osc->oo_ol_spin);
835 list_del_init(&oscl->ols_nextlock_oscobj);
836 spin_unlock(&osc->oo_ol_spin);
838 spin_lock(&oscl->ols_lock);
839 while (!list_empty(&oscl->ols_waiting_list)) {
840 struct osc_lock *scan;
842 scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock,
844 list_del_init(&scan->ols_wait_entry);
846 cl_sync_io_note(env, scan->ols_owner, 0);
848 spin_unlock(&oscl->ols_lock);
851 static void osc_lock_enqueue_wait(const struct lu_env *env,
852 struct osc_object *obj,
853 struct osc_lock *oscl)
855 struct osc_lock *tmp_oscl;
856 struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr;
857 struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor;
859 spin_lock(&obj->oo_ol_spin);
860 list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list);
863 list_for_each_entry(tmp_oscl, &obj->oo_ol_list,
864 ols_nextlock_oscobj) {
865 struct cl_lock_descr *descr;
867 if (tmp_oscl == oscl)
870 descr = &tmp_oscl->ols_cl.cls_lock->cll_descr;
871 if (descr->cld_start > need->cld_end ||
872 descr->cld_end < need->cld_start)
875 /* We're not supposed to give up group lock */
876 if (descr->cld_mode == CLM_GROUP)
879 if (!osc_lock_is_lockless(oscl) &&
880 osc_lock_compatible(oscl, tmp_oscl))
883 /* wait for conflicting lock to be canceled */
884 cl_sync_io_init(waiter, 1, cl_sync_io_end);
885 oscl->ols_owner = waiter;
887 spin_lock(&tmp_oscl->ols_lock);
888 /* add oscl into tmp's ols_waiting list */
889 list_add_tail(&oscl->ols_wait_entry,
890 &tmp_oscl->ols_waiting_list);
891 spin_unlock(&tmp_oscl->ols_lock);
893 spin_unlock(&obj->oo_ol_spin);
894 (void)cl_sync_io_wait(env, waiter, 0);
896 spin_lock(&obj->oo_ol_spin);
897 oscl->ols_owner = NULL;
900 spin_unlock(&obj->oo_ol_spin);
904 * Implementation of cl_lock_operations::clo_enqueue() method for osc
905 * layer. This initiates ldlm enqueue:
907 * - cancels conflicting locks early (osc_lock_enqueue_wait());
909 * - calls osc_enqueue_base() to do actual enqueue.
911 * osc_enqueue_base() is supplied with an upcall function that is executed
912 * when lock is received either after a local cached ldlm lock is matched, or
913 * when a reply from the server is received.
915 * This function does not wait for the network communication to complete.
917 static int osc_lock_enqueue(const struct lu_env *env,
918 const struct cl_lock_slice *slice,
919 struct cl_io *unused, struct cl_sync_io *anchor)
921 struct osc_thread_info *info = osc_env_info(env);
922 struct osc_io *oio = osc_env_io(env);
923 struct osc_object *osc = cl2osc(slice->cls_obj);
924 struct osc_lock *oscl = cl2osc_lock(slice);
925 struct cl_lock *lock = slice->cls_lock;
926 struct ldlm_res_id *resname = &info->oti_resname;
927 ldlm_policy_data_t *policy = &info->oti_policy;
928 osc_enqueue_upcall_f upcall = osc_lock_upcall;
935 LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
936 "lock = %p, ols = %p\n", lock, oscl);
938 if (oscl->ols_state == OLS_GRANTED)
941 if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
942 GOTO(enqueue_base, 0);
944 if (oscl->ols_glimpse) {
945 LASSERT(equi(oscl->ols_agl, anchor == NULL));
947 GOTO(enqueue_base, 0);
950 osc_lock_enqueue_wait(env, osc, oscl);
952 /* we can grant lockless lock right after all conflicting locks
954 if (osc_lock_is_lockless(oscl)) {
955 oscl->ols_state = OLS_GRANTED;
956 oio->oi_lockless = 1;
961 oscl->ols_state = OLS_ENQUEUED;
962 if (anchor != NULL) {
963 atomic_inc(&anchor->csi_sync_nr);
964 oscl->ols_owner = anchor;
968 * DLM lock's ast data must be osc_object;
969 * if glimpse or AGL lock, async of osc_enqueue_base() must be true,
970 * DLM's enqueue callback set to osc_lock_upcall() with cookie as
973 ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
974 osc_lock_build_einfo(env, lock, osc, &oscl->ols_einfo);
975 osc_lock_build_policy(env, lock, policy);
977 oscl->ols_einfo.ei_cbdata = NULL;
978 /* hold a reference for callback */
979 cl_object_get(osc2cl(osc));
980 upcall = osc_lock_upcall_agl;
983 result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags,
984 policy, &oscl->ols_lvb,
985 osc->oo_oinfo->loi_kms_valid,
987 &oscl->ols_einfo, PTLRPCD_SET, async,
990 oscl->ols_state = OLS_CANCELLED;
991 osc_lock_wake_waiters(env, osc, oscl);
993 /* hide error for AGL lock. */
995 cl_object_put(env, osc2cl(osc));
1000 cl_sync_io_note(env, anchor, result);
1002 if (osc_lock_is_lockless(oscl)) {
1003 oio->oi_lockless = 1;
1004 } else if (!async) {
1005 LASSERT(oscl->ols_state == OLS_GRANTED);
1006 LASSERT(oscl->ols_hold);
1007 LASSERT(oscl->ols_dlmlock != NULL);
1014 * Breaks a link between osc_lock and dlm_lock.
1016 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
1018 struct ldlm_lock *dlmlock;
1020 dlmlock = olck->ols_dlmlock;
1021 if (dlmlock == NULL)
1024 if (olck->ols_hold) {
1026 osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode);
1027 olck->ols_handle.cookie = 0ULL;
1030 olck->ols_dlmlock = NULL;
1032 /* release a reference taken in osc_lock_upcall(). */
1033 LASSERT(olck->ols_has_ref);
1034 lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
1035 LDLM_LOCK_RELEASE(dlmlock);
1036 olck->ols_has_ref = 0;
1040 * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1041 * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1042 * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1043 * with some other lock some where in the cluster. This function does the
1046 * - invalidates all pages protected by this lock (after sending dirty
1047 * ones to the server, as necessary);
1049 * - decref's underlying ldlm lock;
1051 * - cancels ldlm lock (ldlm_cli_cancel()).
1053 static void osc_lock_cancel(const struct lu_env *env,
1054 const struct cl_lock_slice *slice)
1056 struct osc_object *obj = cl2osc(slice->cls_obj);
1057 struct osc_lock *oscl = cl2osc_lock(slice);
1061 LINVRNT(osc_lock_invariant(oscl));
1063 osc_lock_detach(env, oscl);
1064 oscl->ols_state = OLS_CANCELLED;
1065 oscl->ols_flags &= ~LDLM_FL_LVB_READY;
1067 osc_lock_wake_waiters(env, obj, oscl);
1071 static int osc_lock_print(const struct lu_env *env, void *cookie,
1072 lu_printer_t p, const struct cl_lock_slice *slice)
1074 struct osc_lock *lock = cl2osc_lock(slice);
1076 (*p)(env, cookie, "%p "LPX64" "LPX64" %d %p ",
1077 lock->ols_dlmlock, lock->ols_flags, lock->ols_handle.cookie,
1078 lock->ols_state, lock->ols_owner);
1079 osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1083 static const struct cl_lock_operations osc_lock_ops = {
1084 .clo_fini = osc_lock_fini,
1085 .clo_enqueue = osc_lock_enqueue,
1086 .clo_cancel = osc_lock_cancel,
1087 .clo_print = osc_lock_print,
1090 static void osc_lock_lockless_cancel(const struct lu_env *env,
1091 const struct cl_lock_slice *slice)
1093 struct osc_lock *ols = cl2osc_lock(slice);
1094 struct osc_object *osc = cl2osc(slice->cls_obj);
1095 struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
1098 LASSERT(ols->ols_dlmlock == NULL);
1099 result = osc_lock_flush(osc, descr->cld_start, descr->cld_end,
1100 descr->cld_mode, 0);
1102 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1105 osc_lock_wake_waiters(env, osc, ols);
1108 static const struct cl_lock_operations osc_lock_lockless_ops = {
1109 .clo_fini = osc_lock_fini,
1110 .clo_enqueue = osc_lock_enqueue,
1111 .clo_cancel = osc_lock_lockless_cancel,
1112 .clo_print = osc_lock_print
1115 static void osc_lock_set_writer(const struct lu_env *env,
1116 const struct cl_io *io,
1117 struct cl_object *obj, struct osc_lock *oscl)
1119 struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
1123 if (!cl_object_same(io->ci_obj, obj))
1126 if (likely(io->ci_type == CIT_WRITE)) {
1127 io_start = cl_index(obj, io->u.ci_rw.crw_pos);
1128 io_end = cl_index(obj, io->u.ci_rw.crw_pos +
1129 io->u.ci_rw.crw_count - 1);
1130 if (cl_io_is_append(io)) {
1132 io_end = CL_PAGE_EOF;
1135 LASSERT(cl_io_is_mkwrite(io));
1136 io_start = io_end = io->u.ci_fault.ft_index;
1139 if (descr->cld_mode >= CLM_WRITE &&
1140 descr->cld_start <= io_start && descr->cld_end >= io_end) {
1141 struct osc_io *oio = osc_env_io(env);
1143 /* There must be only one lock to match the write region */
1144 LASSERT(oio->oi_write_osclock == NULL);
1145 oio->oi_write_osclock = oscl;
1149 int osc_lock_init(const struct lu_env *env,
1150 struct cl_object *obj, struct cl_lock *lock,
1151 const struct cl_io *io)
1153 struct osc_lock *oscl;
1154 __u32 enqflags = lock->cll_descr.cld_enq_flags;
1156 OBD_SLAB_ALLOC_PTR_GFP(oscl, osc_lock_kmem, GFP_NOFS);
1160 oscl->ols_state = OLS_NEW;
1161 spin_lock_init(&oscl->ols_lock);
1162 INIT_LIST_HEAD(&oscl->ols_waiting_list);
1163 INIT_LIST_HEAD(&oscl->ols_wait_entry);
1164 INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj);
1166 oscl->ols_flags = osc_enq2ldlm_flags(enqflags);
1167 oscl->ols_agl = !!(enqflags & CEF_AGL);
1169 oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
1170 if (oscl->ols_flags & LDLM_FL_HAS_INTENT) {
1171 oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1172 oscl->ols_glimpse = 1;
1175 cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops);
1177 if (!(enqflags & CEF_MUST))
1178 /* try to convert this lock to a lockless lock */
1179 osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER));
1180 if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
1181 oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1183 if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
1184 osc_lock_set_writer(env, io, obj, oscl);
1186 LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags "LPX64"\n",
1187 lock, oscl, oscl->ols_flags);
1193 * Finds an existing lock covering given index and optionally different from a
1194 * given \a except lock.
1196 struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
1197 struct osc_object *obj, pgoff_t index,
1198 enum osc_dap_flags dap_flags)
1200 struct osc_thread_info *info = osc_env_info(env);
1201 struct ldlm_res_id *resname = &info->oti_resname;
1202 ldlm_policy_data_t *policy = &info->oti_policy;
1203 struct lustre_handle lockh;
1204 struct ldlm_lock *lock = NULL;
1210 ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
1211 osc_index2policy(policy, osc2cl(obj), index, index);
1212 policy->l_extent.gid = LDLM_GID_ANY;
1214 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
1215 if (dap_flags & OSC_DAP_FL_TEST_LOCK)
1216 flags |= LDLM_FL_TEST_LOCK;
1218 * It is fine to match any group lock since there could be only one
1219 * with a uniq gid and it conflicts with all other lock modes too
1222 mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace,
1223 flags, resname, LDLM_EXTENT, policy,
1224 LCK_PR | LCK_PW | LCK_GROUP, &lockh,
1225 dap_flags & OSC_DAP_FL_CANCELING);
1227 lock = ldlm_handle2lock(&lockh);
1228 /* RACE: the lock is cancelled so let's try again */
1229 if (unlikely(lock == NULL))