4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * Implementation of cl_lock for OSC layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
37 #define DEBUG_SUBSYSTEM S_OSC
39 /* fid_build_reg_res_name() */
40 #include <lustre_fid.h>
41 #include <lustre_osc.h>
43 #include "osc_internal.h"
50 * Returns a weak pointer to the ldlm lock identified by a handle. Returned
51 * pointer cannot be dereferenced, as lock is not protected from concurrent
52 * reclaim. This function is a helper for osc_lock_invariant().
54 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
56 struct ldlm_lock *lock;
58 lock = ldlm_handle2lock(handle);
65 * Invariant that has to be true all of the time.
67 static int osc_lock_invariant(struct osc_lock *ols)
69 struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
70 struct ldlm_lock *olock = ols->ols_dlmlock;
71 int handle_used = lustre_handle_is_used(&ols->ols_handle);
73 if (ergo(osc_lock_is_lockless(ols),
74 ols->ols_locklessable && ols->ols_dlmlock == NULL))
78 * If all the following "ergo"s are true, return 1, otherwise 0
80 if (! ergo(olock != NULL, handle_used))
83 if (! ergo(olock != NULL,
84 olock->l_handle.h_cookie == ols->ols_handle.cookie))
87 if (! ergo(handle_used,
88 ergo(lock != NULL && olock != NULL, lock == olock) &&
89 ergo(lock == NULL, olock == NULL)))
92 * Check that ->ols_handle and ->ols_dlmlock are consistent, but
93 * take into account that they are set at the different time.
95 if (! ergo(ols->ols_state == OLS_CANCELLED,
96 olock == NULL && !handle_used))
99 * DLM lock is destroyed only after we have seen cancellation
102 if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
103 !ldlm_is_destroyed(olock)))
106 if (! ergo(ols->ols_state == OLS_GRANTED,
108 ldlm_is_granted(olock) &&
114 /*****************************************************************************
120 void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
122 struct osc_lock *ols = cl2osc_lock(slice);
124 LINVRNT(osc_lock_invariant(ols));
125 LASSERT(ols->ols_dlmlock == NULL);
127 OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
129 EXPORT_SYMBOL(osc_lock_fini);
131 static void osc_lock_build_policy(const struct lu_env *env,
132 const struct cl_lock *lock,
133 union ldlm_policy_data *policy)
135 const struct cl_lock_descr *d = &lock->cll_descr;
137 osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
138 policy->l_extent.gid = d->cld_gid;
142 * Updates object attributes from a lock value block (lvb) received together
143 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
146 * Called under lock and resource spin-locks.
148 void osc_lock_lvb_update(const struct lu_env *env,
149 struct osc_object *osc,
150 struct ldlm_lock *dlmlock,
153 struct cl_object *obj = osc2cl(osc);
154 struct lov_oinfo *oinfo = osc->oo_oinfo;
155 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
156 unsigned valid, setkms = 0;
160 valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
162 LASSERT(dlmlock != NULL);
163 lvb = dlmlock->l_lvb_data;
165 cl_lvb2attr(attr, lvb);
167 cl_object_attr_lock(obj);
168 if (dlmlock != NULL) {
171 check_res_locked(dlmlock->l_resource);
173 LASSERT(lvb == dlmlock->l_lvb_data);
174 size = lvb->lvb_size;
176 /* Extend KMS up to the end of this lock and no further
177 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
178 if (size > dlmlock->l_policy_data.l_extent.end)
179 size = dlmlock->l_policy_data.l_extent.end + 1;
180 if (size >= oinfo->loi_kms) {
182 attr->cat_kms = size;
185 ldlm_lock_allow_match_locked(dlmlock);
188 /* The size should not be less than the kms */
189 if (attr->cat_size < oinfo->loi_kms)
190 attr->cat_size = oinfo->loi_kms;
192 LDLM_DEBUG(dlmlock, "acquired size %llu, setting rss=%llu;%s "
193 "kms=%llu, end=%llu", lvb->lvb_size, attr->cat_size,
194 setkms ? "" : " leaving",
195 setkms ? attr->cat_kms : oinfo->loi_kms,
196 dlmlock ? dlmlock->l_policy_data.l_extent.end : -1ull);
198 cl_object_attr_update(env, obj, attr, valid);
199 cl_object_attr_unlock(obj);
204 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
205 struct lustre_handle *lockh)
207 struct osc_object *osc = cl2osc(oscl->ols_cl.cls_obj);
208 struct ldlm_lock *dlmlock;
210 dlmlock = ldlm_handle2lock_long(lockh, 0);
211 LASSERT(dlmlock != NULL);
213 /* lock reference taken by ldlm_handle2lock_long() is
214 * owned by osc_lock and released in osc_lock_detach()
216 lu_ref_add_atomic(&dlmlock->l_reference, "osc_lock", oscl);
217 oscl->ols_has_ref = 1;
219 LASSERT(oscl->ols_dlmlock == NULL);
220 oscl->ols_dlmlock = dlmlock;
222 /* This may be a matched lock for glimpse request, do not hold
223 * lock reference in that case. */
224 if (!oscl->ols_glimpse) {
225 /* hold a refc for non glimpse lock which will
226 * be released in osc_lock_cancel() */
227 lustre_handle_copy(&oscl->ols_handle, lockh);
228 ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
232 /* Lock must have been granted. */
233 lock_res_and_lock(dlmlock);
234 if (ldlm_is_granted(dlmlock)) {
235 struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
236 struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
238 /* extend the lock extent, otherwise it will have problem when
239 * we decide whether to grant a lockless lock. */
240 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
241 descr->cld_start = cl_index(descr->cld_obj, ext->start);
242 descr->cld_end = cl_index(descr->cld_obj, ext->end);
243 descr->cld_gid = ext->gid;
245 /* no lvb update for matched lock */
246 if (!ldlm_is_lvb_cached(dlmlock)) {
247 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
248 LASSERT(osc == dlmlock->l_ast_data);
249 osc_lock_lvb_update(env, osc, dlmlock, NULL);
250 ldlm_set_lvb_cached(dlmlock);
252 LINVRNT(osc_lock_invariant(oscl));
254 unlock_res_and_lock(dlmlock);
256 LASSERT(oscl->ols_state != OLS_GRANTED);
257 oscl->ols_state = OLS_GRANTED;
261 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
262 * received from a server, or after osc_enqueue_base() matched a local DLM
265 static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
268 struct osc_lock *oscl = cookie;
269 struct cl_lock_slice *slice = &oscl->ols_cl;
275 env = cl_env_percpu_get();
276 /* should never happen, similar to osc_ldlm_blocking_ast(). */
277 LASSERT(!IS_ERR(env));
279 rc = ldlm_error2errno(errcode);
280 if (oscl->ols_state == OLS_ENQUEUED) {
281 oscl->ols_state = OLS_UPCALL_RECEIVED;
282 } else if (oscl->ols_state == OLS_CANCELLED) {
285 CERROR("Impossible state: %d\n", oscl->ols_state);
290 osc_lock_granted(env, oscl, lockh);
292 /* Error handling, some errors are tolerable. */
293 if (oscl->ols_locklessable && rc == -EUSERS) {
294 /* This is a tolerable error, turn this lock into
297 osc_object_set_contended(cl2osc(slice->cls_obj));
298 LASSERT(slice->cls_ops != oscl->ols_lockless_ops);
300 /* Change this lock to ldlmlock-less lock. */
301 osc_lock_to_lockless(env, oscl, 1);
302 oscl->ols_state = OLS_GRANTED;
304 } else if (oscl->ols_glimpse && rc == -ENAVAIL) {
305 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
306 osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
307 NULL, &oscl->ols_lvb);
308 /* Hide the error. */
310 } else if (rc < 0 && oscl->ols_flags & LDLM_FL_NDELAY) {
314 if (oscl->ols_owner != NULL)
315 cl_sync_io_note(env, oscl->ols_owner, rc);
316 cl_env_percpu_put(env);
321 static int osc_lock_upcall_speculative(void *cookie,
322 struct lustre_handle *lockh,
325 struct osc_object *osc = cookie;
326 struct ldlm_lock *dlmlock;
331 env = cl_env_get(&refcheck);
332 LASSERT(!IS_ERR(env));
334 if (errcode == ELDLM_LOCK_MATCHED)
335 GOTO(out, errcode = ELDLM_OK);
337 if (errcode != ELDLM_OK)
340 dlmlock = ldlm_handle2lock(lockh);
341 LASSERT(dlmlock != NULL);
343 lock_res_and_lock(dlmlock);
344 LASSERT(ldlm_is_granted(dlmlock));
346 /* there is no osc_lock associated with speculative locks
347 * thus no need to set LDLM_FL_LVB_CACHED */
348 osc_lock_lvb_update(env, osc, dlmlock, NULL);
350 unlock_res_and_lock(dlmlock);
351 LDLM_LOCK_PUT(dlmlock);
354 cl_object_put(env, osc2cl(osc));
355 cl_env_put(env, &refcheck);
356 RETURN(ldlm_error2errno(errcode));
359 static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
360 enum cl_lock_mode mode, bool discard)
369 env = cl_env_get(&refcheck);
371 RETURN(PTR_ERR(env));
373 if (mode == CLM_WRITE) {
374 rc = osc_cache_writeback_range(env, obj, start, end, 1,
376 CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
378 discard ? "discarded" : "written back");
384 * Do not try to match other locks with CLM_WRITE since we already
387 rc2 = osc_lock_discard_pages(env, obj, start, end,
388 mode == CLM_WRITE || discard);
389 if (rc == 0 && rc2 < 0)
392 cl_env_put(env, &refcheck);
397 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
398 * and ldlm_lock caches.
400 static int osc_dlm_blocking_ast0(const struct lu_env *env,
401 struct ldlm_lock *dlmlock,
402 void *data, int flag)
404 struct cl_object *obj = NULL;
407 enum cl_lock_mode mode = CLM_READ;
410 LASSERT(flag == LDLM_CB_CANCELING);
412 lock_res_and_lock(dlmlock);
413 if (!ldlm_is_granted(dlmlock)) {
414 dlmlock->l_ast_data = NULL;
415 unlock_res_and_lock(dlmlock);
419 discard = ldlm_is_discard_data(dlmlock);
420 if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
423 if (dlmlock->l_ast_data != NULL) {
424 obj = osc2cl(dlmlock->l_ast_data);
428 unlock_res_and_lock(dlmlock);
430 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_CANCEL, 5);
432 /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
433 * the object has been destroyed. */
435 struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
436 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
439 /* Destroy pages covered by the extent of the DLM lock */
440 result = osc_lock_flush(cl2osc(obj),
441 cl_index(obj, extent->start),
442 cl_index(obj, extent->end),
445 /* losing a lock, update kms */
446 lock_res_and_lock(dlmlock);
447 /* clearing l_ast_data after flushing data,
448 * to let glimpse ast find the lock and the object */
449 dlmlock->l_ast_data = NULL;
450 cl_object_attr_lock(obj);
451 /* Must get the value under the lock to avoid race. */
452 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
453 /* Update the kms. Need to loop all granted locks.
454 * Not a problem for the client */
455 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
457 cl_object_attr_update(env, obj, attr, CAT_KMS);
458 cl_object_attr_unlock(obj);
459 unlock_res_and_lock(dlmlock);
461 cl_object_put(env, obj);
467 * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
468 * some other lock, or is canceled. This function is installed as a
469 * ldlm_lock::l_blocking_ast() for client extent locks.
471 * Control flow is tricky, because ldlm uses the same call-back
472 * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
474 * \param dlmlock lock for which ast occurred.
476 * \param new description of a conflicting lock in case of blocking ast.
478 * \param data value of dlmlock->l_ast_data
480 * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
481 * cancellation and blocking ast's.
483 * Possible use cases:
485 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
486 * lock due to lock lru pressure, or explicit user request to purge
489 * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
490 * us that dlmlock conflicts with another lock that some client is
491 * enqueuing. Lock is canceled.
493 * - cl_lock_cancel() is called. osc_lock_cancel() calls
494 * ldlm_cli_cancel() that calls
496 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
498 * recursively entering osc_ldlm_blocking_ast().
500 * - client cancels lock voluntary (e.g., as a part of early cancellation):
503 * osc_lock_cancel()->
504 * ldlm_cli_cancel()->
505 * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
508 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
509 struct ldlm_lock_desc *new, void *data,
516 case LDLM_CB_BLOCKING: {
517 struct lustre_handle lockh;
519 ldlm_lock2handle(dlmlock, &lockh);
520 result = ldlm_cli_cancel(&lockh, LCF_ASYNC);
521 if (result == -ENODATA)
525 case LDLM_CB_CANCELING: {
530 * This can be called in the context of outer IO, e.g.,
532 * osc_enqueue_base()->...
533 * ->ldlm_prep_elc_req()->...
534 * ->ldlm_cancel_callback()->...
535 * ->osc_ldlm_blocking_ast()
537 * new environment has to be created to not corrupt outer
540 env = cl_env_get(&refcheck);
542 result = PTR_ERR(env);
546 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
547 cl_env_put(env, &refcheck);
556 int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
558 struct ptlrpc_request *req = data;
561 struct req_capsule *cap;
562 struct cl_object *obj = NULL;
563 struct ldlm_resource *res = dlmlock->l_resource;
564 struct ldlm_match_data matchdata = { 0 };
565 union ldlm_policy_data policy;
566 enum ldlm_mode mode = LCK_PW | LCK_GROUP | LCK_PR;
572 LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
574 env = cl_env_get(&refcheck);
576 GOTO(out, result = PTR_ERR(env));
578 policy.l_extent.start = 0;
579 policy.l_extent.end = LUSTRE_EOF;
581 matchdata.lmd_mode = &mode;
582 matchdata.lmd_policy = &policy;
583 matchdata.lmd_flags = LDLM_FL_TEST_LOCK | LDLM_FL_CBPENDING;
584 matchdata.lmd_match = LDLM_MATCH_UNREF | LDLM_MATCH_AST_ANY;
586 LDLM_LOCK_GET(dlmlock);
588 /* If any dlmlock has l_ast_data set, we must find it or we risk
589 * missing a size update done under a different lock.
592 lock_res_and_lock(dlmlock);
593 if (dlmlock->l_ast_data) {
594 obj = osc2cl(dlmlock->l_ast_data);
597 unlock_res_and_lock(dlmlock);
598 LDLM_LOCK_RELEASE(dlmlock);
602 if (obj == NULL && res->lr_type == LDLM_EXTENT) {
603 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_SIZE_DATA))
607 dlmlock = search_itree(res, &matchdata);
613 /* Do not grab the mutex of cl_lock for glimpse.
614 * See LU-1274 for details.
615 * BTW, it's okay for cl_lock to be cancelled during
616 * this period because server can handle this race.
617 * See ldlm_server_glimpse_ast() for details.
618 * cl_lock_mutex_get(env, lock); */
620 req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
621 req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
623 result = req_capsule_server_pack(cap);
625 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
626 result = cl_object_glimpse(env, obj, lvb);
628 if (!exp_connect_lvb_type(req->rq_export))
629 req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB,
630 sizeof(struct ost_lvb_v1), RCL_SERVER);
631 cl_object_put(env, obj);
634 * These errors are normal races, so we don't want to
635 * fill the console with messages by calling
638 lustre_pack_reply(req, 1, NULL, NULL);
639 result = -ELDLM_NO_LOCK_DATA;
641 cl_env_put(env, &refcheck);
645 req->rq_status = result;
648 EXPORT_SYMBOL(osc_ldlm_glimpse_ast);
650 static bool weigh_cb(const struct lu_env *env, struct cl_io *io,
651 struct osc_page *ops, void *cbdata)
653 struct cl_page *page = ops->ops_cl.cpl_page;
655 if (cl_page_is_vmlocked(env, page) || PageDirty(page->cp_vmpage) ||
656 PageWriteback(page->cp_vmpage))
659 *(pgoff_t *)cbdata = osc_index(ops) + 1;
663 static unsigned long osc_lock_weight(const struct lu_env *env,
664 struct osc_object *oscobj,
665 loff_t start, loff_t end)
667 struct cl_io *io = osc_env_thread_io(env);
668 struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
675 io->ci_ignore_layout = 1;
676 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
680 page_index = cl_index(obj, start);
682 if (!osc_page_gang_lookup(env, io, oscobj,
683 page_index, cl_index(obj, end),
684 weigh_cb, (void *)&page_index))
692 * Get the weight of dlm lock for early cancellation.
694 unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
697 struct osc_object *obj;
698 struct osc_lock *oscl;
699 unsigned long weight;
707 * osc_ldlm_weigh_ast has a complex context since it might be called
708 * because of lock canceling, or from user's input. We have to make
709 * a new environment for it. Probably it is implementation safe to use
710 * the upper context because cl_lock_put don't modify environment
711 * variables. But just in case ..
713 env = cl_env_get(&refcheck);
715 /* Mostly because lack of memory, do not eliminate this lock */
718 LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT ||
719 dlmlock->l_resource->lr_type == LDLM_IBITS);
721 lock_res_and_lock(dlmlock);
722 obj = dlmlock->l_ast_data;
724 cl_object_get(osc2cl(obj));
725 unlock_res_and_lock(dlmlock);
728 GOTO(out, weight = 0);
730 spin_lock(&obj->oo_ol_spin);
731 list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) {
732 if (oscl->ols_dlmlock == dlmlock) {
737 spin_unlock(&obj->oo_ol_spin);
740 * If the lock is being used by an IO, definitely not cancel it.
742 GOTO(out, weight = 1);
745 if (dlmlock->l_resource->lr_type == LDLM_EXTENT)
746 weight = osc_lock_weight(env, obj,
747 dlmlock->l_policy_data.l_extent.start,
748 dlmlock->l_policy_data.l_extent.end);
749 else if (ldlm_has_dom(dlmlock))
750 weight = osc_lock_weight(env, obj, 0, OBD_OBJECT_EOF);
751 /* The DOM bit can be cancelled at any time; in that case, we know
752 * there are no pages, so just return weight of 0
761 cl_object_put(env, osc2cl(obj));
763 cl_env_put(env, &refcheck);
766 EXPORT_SYMBOL(osc_ldlm_weigh_ast);
768 static void osc_lock_build_einfo(const struct lu_env *env,
769 const struct cl_lock *lock,
770 struct osc_object *osc,
771 struct ldlm_enqueue_info *einfo)
773 einfo->ei_type = LDLM_EXTENT;
774 einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
775 einfo->ei_cb_bl = osc_ldlm_blocking_ast;
776 einfo->ei_cb_cp = ldlm_completion_ast;
777 einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
778 einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
782 * Determine if the lock should be converted into a lockless lock.
785 * - if the lock has an explicite requirment for a non-lockless lock;
786 * - if the io lock request type ci_lockreq;
787 * - send the enqueue rpc to ost to make the further decision;
788 * - special treat to truncate lockless lock
790 * Additional policy can be implemented here, e.g., never do lockless-io
793 void osc_lock_to_lockless(const struct lu_env *env,
794 struct osc_lock *ols, int force)
796 struct cl_lock_slice *slice = &ols->ols_cl;
797 struct osc_io *oio = osc_env_io(env);
798 struct cl_io *io = oio->oi_cl.cis_io;
799 struct cl_object *obj = slice->cls_obj;
800 struct osc_object *oob = cl2osc(obj);
801 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
802 struct obd_connect_data *ocd;
804 LASSERT(ols->ols_state == OLS_NEW ||
805 ols->ols_state == OLS_UPCALL_RECEIVED);
808 ols->ols_locklessable = 1;
809 slice->cls_ops = ols->ols_lockless_ops;
811 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
812 io->ci_lockreq == CILR_MAYBE ||
813 io->ci_lockreq == CILR_NEVER);
815 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
816 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
817 (io->ci_lockreq == CILR_MAYBE) &&
818 (ocd->ocd_connect_flags &
819 OBD_CONNECT_SRVLOCK);
820 if (io->ci_lockreq == CILR_NEVER ||
822 (ols->ols_locklessable && osc_object_is_contended(oob)) ||
823 /* lockless truncate */
824 (cl_io_is_trunc(io) && osd->od_lockless_truncate &&
825 (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK))) {
826 ols->ols_locklessable = 1;
827 slice->cls_ops = ols->ols_lockless_ops;
830 LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
832 EXPORT_SYMBOL(osc_lock_to_lockless);
834 static bool osc_lock_compatible(const struct osc_lock *qing,
835 const struct osc_lock *qed)
837 struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr;
838 struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr;
840 if (qed->ols_glimpse || qed->ols_speculative)
843 if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ)
846 if (qed->ols_state < OLS_GRANTED)
849 if (qed_descr->cld_mode >= qing_descr->cld_mode &&
850 qed_descr->cld_start <= qing_descr->cld_start &&
851 qed_descr->cld_end >= qing_descr->cld_end)
857 void osc_lock_wake_waiters(const struct lu_env *env, struct osc_object *osc,
858 struct osc_lock *oscl)
860 spin_lock(&osc->oo_ol_spin);
861 list_del_init(&oscl->ols_nextlock_oscobj);
862 spin_unlock(&osc->oo_ol_spin);
864 spin_lock(&oscl->ols_lock);
865 while (!list_empty(&oscl->ols_waiting_list)) {
866 struct osc_lock *scan;
868 scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock,
870 list_del_init(&scan->ols_wait_entry);
872 cl_sync_io_note(env, scan->ols_owner, 0);
874 spin_unlock(&oscl->ols_lock);
876 EXPORT_SYMBOL(osc_lock_wake_waiters);
878 int osc_lock_enqueue_wait(const struct lu_env *env, struct osc_object *obj,
879 struct osc_lock *oscl)
881 struct osc_lock *tmp_oscl;
882 struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr;
883 struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor;
888 spin_lock(&obj->oo_ol_spin);
889 list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list);
892 list_for_each_entry(tmp_oscl, &obj->oo_ol_list,
893 ols_nextlock_oscobj) {
894 struct cl_lock_descr *descr;
896 if (tmp_oscl == oscl)
899 descr = &tmp_oscl->ols_cl.cls_lock->cll_descr;
900 if (descr->cld_start > need->cld_end ||
901 descr->cld_end < need->cld_start)
904 /* We're not supposed to give up group lock */
905 if (descr->cld_mode == CLM_GROUP)
908 if (!osc_lock_is_lockless(oscl) &&
909 osc_lock_compatible(oscl, tmp_oscl))
912 /* wait for conflicting lock to be canceled */
913 cl_sync_io_init(waiter, 1);
914 oscl->ols_owner = waiter;
916 spin_lock(&tmp_oscl->ols_lock);
917 /* add oscl into tmp's ols_waiting list */
918 list_add_tail(&oscl->ols_wait_entry,
919 &tmp_oscl->ols_waiting_list);
920 spin_unlock(&tmp_oscl->ols_lock);
922 spin_unlock(&obj->oo_ol_spin);
923 rc = cl_sync_io_wait(env, waiter, 0);
924 spin_lock(&obj->oo_ol_spin);
929 oscl->ols_owner = NULL;
932 spin_unlock(&obj->oo_ol_spin);
936 EXPORT_SYMBOL(osc_lock_enqueue_wait);
939 * Implementation of cl_lock_operations::clo_enqueue() method for osc
940 * layer. This initiates ldlm enqueue:
942 * - cancels conflicting locks early (osc_lock_enqueue_wait());
944 * - calls osc_enqueue_base() to do actual enqueue.
946 * osc_enqueue_base() is supplied with an upcall function that is executed
947 * when lock is received either after a local cached ldlm lock is matched, or
948 * when a reply from the server is received.
950 * This function does not wait for the network communication to complete.
952 static int osc_lock_enqueue(const struct lu_env *env,
953 const struct cl_lock_slice *slice,
954 struct cl_io *unused, struct cl_sync_io *anchor)
956 struct osc_thread_info *info = osc_env_info(env);
957 struct osc_io *oio = osc_env_io(env);
958 struct osc_object *osc = cl2osc(slice->cls_obj);
959 struct osc_lock *oscl = cl2osc_lock(slice);
960 struct obd_export *exp = osc_export(osc);
961 struct cl_lock *lock = slice->cls_lock;
962 struct ldlm_res_id *resname = &info->oti_resname;
963 union ldlm_policy_data *policy = &info->oti_policy;
964 osc_enqueue_upcall_f upcall = osc_lock_upcall;
971 LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
972 "lock = %p, ols = %p\n", lock, oscl);
974 if (oscl->ols_state == OLS_GRANTED)
977 if ((oscl->ols_flags & LDLM_FL_NO_EXPANSION) &&
978 !exp_connect_lockahead(exp)) {
979 result = -EOPNOTSUPP;
980 CERROR("%s: server does not support lockahead/locknoexpand: rc = %d\n",
981 exp->exp_obd->obd_name, result);
985 if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
986 GOTO(enqueue_base, 0);
988 /* For glimpse and/or speculative locks, do not wait for reply from
989 * server on LDLM request */
990 if (oscl->ols_glimpse || oscl->ols_speculative) {
991 /* Speculative and glimpse locks do not have an anchor */
992 LASSERT(equi(oscl->ols_speculative, anchor == NULL));
994 GOTO(enqueue_base, 0);
997 result = osc_lock_enqueue_wait(env, osc, oscl);
1001 /* we can grant lockless lock right after all conflicting locks
1003 if (osc_lock_is_lockless(oscl)) {
1004 oscl->ols_state = OLS_GRANTED;
1005 oio->oi_lockless = 1;
1010 oscl->ols_state = OLS_ENQUEUED;
1011 if (anchor != NULL) {
1012 atomic_inc(&anchor->csi_sync_nr);
1013 oscl->ols_owner = anchor;
1017 * DLM lock's ast data must be osc_object;
1018 * if glimpse or speculative lock, async of osc_enqueue_base()
1021 * For non-speculative locks:
1022 * DLM's enqueue callback set to osc_lock_upcall() with cookie as
1024 * For speculative locks:
1025 * osc_lock_upcall_speculative & cookie is the osc object, since
1026 * there is no osc_lock
1028 ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
1029 osc_lock_build_policy(env, lock, policy);
1030 if (oscl->ols_speculative) {
1031 oscl->ols_einfo.ei_cbdata = NULL;
1032 /* hold a reference for callback */
1033 cl_object_get(osc2cl(osc));
1034 upcall = osc_lock_upcall_speculative;
1037 result = osc_enqueue_base(exp, resname, &oscl->ols_flags,
1038 policy, &oscl->ols_lvb,
1040 &oscl->ols_einfo, PTLRPCD_SET, async,
1041 oscl->ols_speculative);
1043 if (osc_lock_is_lockless(oscl)) {
1044 oio->oi_lockless = 1;
1045 } else if (!async) {
1046 LASSERT(oscl->ols_state == OLS_GRANTED);
1047 LASSERT(oscl->ols_hold);
1048 LASSERT(oscl->ols_dlmlock != NULL);
1050 } else if (oscl->ols_speculative) {
1051 cl_object_put(env, osc2cl(osc));
1052 if (oscl->ols_glimpse) {
1053 /* hide error for AGL request */
1060 oscl->ols_state = OLS_CANCELLED;
1061 osc_lock_wake_waiters(env, osc, oscl);
1064 cl_sync_io_note(env, anchor, result);
1070 * Breaks a link between osc_lock and dlm_lock.
1072 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
1074 struct ldlm_lock *dlmlock;
1078 dlmlock = olck->ols_dlmlock;
1079 if (dlmlock == NULL)
1082 if (olck->ols_hold) {
1084 ldlm_lock_decref(&olck->ols_handle, olck->ols_einfo.ei_mode);
1085 olck->ols_handle.cookie = 0ULL;
1088 olck->ols_dlmlock = NULL;
1090 /* release a reference taken in osc_lock_upcall(). */
1091 LASSERT(olck->ols_has_ref);
1092 lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
1093 LDLM_LOCK_RELEASE(dlmlock);
1094 olck->ols_has_ref = 0;
1100 * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1101 * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1102 * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1103 * with some other lock some where in the cluster. This function does the
1106 * - invalidates all pages protected by this lock (after sending dirty
1107 * ones to the server, as necessary);
1109 * - decref's underlying ldlm lock;
1111 * - cancels ldlm lock (ldlm_cli_cancel()).
1113 void osc_lock_cancel(const struct lu_env *env,
1114 const struct cl_lock_slice *slice)
1116 struct osc_object *obj = cl2osc(slice->cls_obj);
1117 struct osc_lock *oscl = cl2osc_lock(slice);
1121 LINVRNT(osc_lock_invariant(oscl));
1123 osc_lock_detach(env, oscl);
1124 oscl->ols_state = OLS_CANCELLED;
1125 oscl->ols_flags &= ~LDLM_FL_LVB_READY;
1127 osc_lock_wake_waiters(env, obj, oscl);
1130 EXPORT_SYMBOL(osc_lock_cancel);
1132 int osc_lock_print(const struct lu_env *env, void *cookie,
1133 lu_printer_t p, const struct cl_lock_slice *slice)
1135 struct osc_lock *lock = cl2osc_lock(slice);
1137 (*p)(env, cookie, "%p %#llx %#llx %d %p ",
1138 lock->ols_dlmlock, lock->ols_flags, lock->ols_handle.cookie,
1139 lock->ols_state, lock->ols_owner);
1140 osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1143 EXPORT_SYMBOL(osc_lock_print);
1145 static const struct cl_lock_operations osc_lock_ops = {
1146 .clo_fini = osc_lock_fini,
1147 .clo_enqueue = osc_lock_enqueue,
1148 .clo_cancel = osc_lock_cancel,
1149 .clo_print = osc_lock_print,
1152 static void osc_lock_lockless_cancel(const struct lu_env *env,
1153 const struct cl_lock_slice *slice)
1155 struct osc_lock *ols = cl2osc_lock(slice);
1156 struct osc_object *osc = cl2osc(slice->cls_obj);
1157 struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
1160 LASSERT(ols->ols_dlmlock == NULL);
1161 result = osc_lock_flush(osc, descr->cld_start, descr->cld_end,
1162 descr->cld_mode, false);
1164 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1167 osc_lock_wake_waiters(env, osc, ols);
1170 static const struct cl_lock_operations osc_lock_lockless_ops = {
1171 .clo_fini = osc_lock_fini,
1172 .clo_enqueue = osc_lock_enqueue,
1173 .clo_cancel = osc_lock_lockless_cancel,
1174 .clo_print = osc_lock_print
1177 void osc_lock_set_writer(const struct lu_env *env, const struct cl_io *io,
1178 struct cl_object *obj, struct osc_lock *oscl)
1180 struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
1184 if (!cl_object_same(io->ci_obj, obj))
1187 if (likely(io->ci_type == CIT_WRITE)) {
1188 io_start = cl_index(obj, io->u.ci_rw.crw_pos);
1189 io_end = cl_index(obj, io->u.ci_rw.crw_pos +
1190 io->u.ci_rw.crw_count - 1);
1192 LASSERT(cl_io_is_mkwrite(io));
1193 io_start = io_end = io->u.ci_fault.ft_index;
1196 if (descr->cld_mode >= CLM_WRITE &&
1197 (cl_io_is_append(io) ||
1198 (descr->cld_start <= io_start && descr->cld_end >= io_end))) {
1199 struct osc_io *oio = osc_env_io(env);
1201 /* There must be only one lock to match the write region */
1202 LASSERT(oio->oi_write_osclock == NULL);
1203 oio->oi_write_osclock = oscl;
1206 EXPORT_SYMBOL(osc_lock_set_writer);
1208 int osc_lock_init(const struct lu_env *env,
1209 struct cl_object *obj, struct cl_lock *lock,
1210 const struct cl_io *io)
1212 struct osc_lock *oscl;
1213 __u32 enqflags = lock->cll_descr.cld_enq_flags;
1215 OBD_SLAB_ALLOC_PTR_GFP(oscl, osc_lock_kmem, GFP_NOFS);
1219 oscl->ols_state = OLS_NEW;
1220 spin_lock_init(&oscl->ols_lock);
1221 INIT_LIST_HEAD(&oscl->ols_waiting_list);
1222 INIT_LIST_HEAD(&oscl->ols_wait_entry);
1223 INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj);
1224 oscl->ols_lockless_ops = &osc_lock_lockless_ops;
1226 /* Speculative lock requests must be either no_expand or glimpse
1227 * request (CEF_GLIMPSE). non-glimpse no_expand speculative extent
1228 * locks will break ofd_intent_cb. (see comment there)*/
1229 LASSERT(ergo((enqflags & CEF_SPECULATIVE) != 0,
1230 (enqflags & (CEF_LOCK_NO_EXPAND | CEF_GLIMPSE)) != 0));
1232 oscl->ols_flags = osc_enq2ldlm_flags(enqflags);
1233 oscl->ols_speculative = !!(enqflags & CEF_SPECULATIVE);
1234 if (lock->cll_descr.cld_mode == CLM_GROUP)
1235 oscl->ols_flags |= LDLM_FL_ATOMIC_CB;
1237 if (oscl->ols_flags & LDLM_FL_HAS_INTENT) {
1238 oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1239 oscl->ols_glimpse = 1;
1241 if (io->ci_ndelay && cl_object_same(io->ci_obj, obj))
1242 oscl->ols_flags |= LDLM_FL_NDELAY;
1243 osc_lock_build_einfo(env, lock, cl2osc(obj), &oscl->ols_einfo);
1245 cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops);
1247 if (!(enqflags & CEF_MUST))
1248 /* try to convert this lock to a lockless lock */
1249 osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER));
1250 if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
1251 oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1253 if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
1254 osc_lock_set_writer(env, io, obj, oscl);
1256 LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %#llx",
1257 lock, oscl, oscl->ols_flags);
1263 * Finds an existing lock covering given index and optionally different from a
1264 * given \a except lock.
1266 struct ldlm_lock *osc_obj_dlmlock_at_pgoff(const struct lu_env *env,
1267 struct osc_object *obj,
1269 enum osc_dap_flags dap_flags)
1271 struct osc_thread_info *info = osc_env_info(env);
1272 struct ldlm_res_id *resname = &info->oti_resname;
1273 union ldlm_policy_data *policy = &info->oti_policy;
1274 struct lustre_handle lockh;
1275 struct ldlm_lock *lock = NULL;
1276 enum ldlm_mode mode;
1278 enum ldlm_match_flags match_flags = 0;
1282 ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
1283 osc_index2policy(policy, osc2cl(obj), index, index);
1284 policy->l_extent.gid = LDLM_GID_ANY;
1286 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
1287 if (dap_flags & OSC_DAP_FL_TEST_LOCK)
1288 flags |= LDLM_FL_TEST_LOCK;
1290 if (dap_flags & OSC_DAP_FL_AST)
1291 match_flags |= LDLM_MATCH_AST;
1293 if (dap_flags & OSC_DAP_FL_CANCELING)
1294 match_flags |= LDLM_MATCH_UNREF;
1296 if (dap_flags & OSC_DAP_FL_RIGHT)
1297 match_flags |= LDLM_MATCH_RIGHT;
1300 * It is fine to match any group lock since there could be only one
1301 * with a uniq gid and it conflicts with all other lock modes too
1304 mode = osc_match_base(env, osc_export(obj), resname, LDLM_EXTENT,
1305 policy, LCK_PR | LCK_PW | LCK_GROUP, &flags,
1306 obj, &lockh, match_flags);
1308 lock = ldlm_handle2lock(&lockh);
1309 /* RACE: the lock is cancelled so let's try again */
1310 if (unlikely(lock == NULL))