1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017, Intel Corporation.
8 * This file is part of Lustre, http://www.lustre.org/
10 * Implementation of cl_device, cl_req for MDC layer.
12 * Author: Mikhail Pershin <mike.pershin@intel.com>
15 #define DEBUG_SUBSYSTEM S_MDC
17 #include <obd_class.h>
18 #include <lustre_osc.h>
19 #include <linux/falloc.h>
20 #include <uapi/linux/lustre/lustre_param.h>
22 #include "mdc_internal.h"
24 static void mdc_lock_build_policy(const struct lu_env *env,
25 const struct cl_lock *lock,
26 union ldlm_policy_data *policy)
28 memset(policy, 0, sizeof *policy);
29 policy->l_inodebits.bits = MDS_INODELOCK_DOM;
31 policy->l_inodebits.li_gid = lock->cll_descr.cld_gid;
35 int mdc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
37 return osc_ldlm_glimpse_ast(dlmlock, data);
40 static void mdc_lock_build_einfo(const struct lu_env *env,
41 const struct cl_lock *lock,
42 struct osc_object *osc,
43 struct ldlm_enqueue_info *einfo)
45 einfo->ei_type = LDLM_IBITS;
46 einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
47 einfo->ei_cb_bl = mdc_ldlm_blocking_ast;
48 einfo->ei_cb_cp = ldlm_completion_ast;
49 einfo->ei_cb_gl = mdc_ldlm_glimpse_ast;
50 einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
51 einfo->ei_req_slot = 1;
54 static void mdc_lock_lvb_update(const struct lu_env *env,
55 struct osc_object *osc,
56 struct ldlm_lock *dlmlock,
59 static int mdc_set_dom_lock_data(struct ldlm_lock *lock, void *data)
63 LASSERT(lock != NULL);
64 LASSERT(lock->l_glimpse_ast == mdc_ldlm_glimpse_ast);
66 lock_res_and_lock(lock);
68 if (lock->l_ast_data == NULL)
69 lock->l_ast_data = data;
70 if (lock->l_ast_data == data)
73 unlock_res_and_lock(lock);
78 static int mdc_dom_lock_match(const struct lu_env *env, struct obd_export *exp,
79 struct ldlm_res_id *res_id, enum ldlm_type type,
80 union ldlm_policy_data *policy,
81 enum ldlm_mode mode, __u64 *flags,
82 struct osc_object *obj,
83 enum ldlm_match_flags match_flags,
84 struct lustre_handle *lockh)
86 struct obd_device *obd = exp->exp_obd;
87 __u64 lflags = *flags;
92 rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
93 res_id, type, policy, mode, match_flags, lockh);
94 if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
98 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
100 LASSERT(lock != NULL);
101 if (mdc_set_dom_lock_data(lock, obj)) {
102 lock_res_and_lock(lock);
103 if (!(lock->l_flags & LDLM_FL_LVB_CACHED)) {
104 LASSERT(lock->l_ast_data == obj);
105 mdc_lock_lvb_update(env, obj, lock, NULL);
106 (lock->l_flags |= LDLM_FL_LVB_CACHED);
108 unlock_res_and_lock(lock);
110 ldlm_lock_decref(lockh, rc);
119 * Finds an existing lock covering a page with given index.
120 * Copy of osc_obj_dlmlock_at_pgoff() but for DoM IBITS lock.
122 static struct ldlm_lock *mdc_dlmlock_at_pgoff(const struct lu_env *env,
123 struct osc_object *obj,
125 enum osc_dap_flags dap_flags)
127 struct osc_thread_info *info = osc_env_info(env);
128 struct ldlm_res_id *resname = &info->oti_resname;
129 union ldlm_policy_data *policy = &info->oti_policy;
130 struct lustre_handle lockh;
131 struct ldlm_lock *lock = NULL;
134 enum ldlm_match_flags match_flags = 0;
138 fid_build_reg_res_name(lu_object_fid(osc2lu(obj)), resname);
139 mdc_lock_build_policy(env, NULL, policy);
140 policy->l_inodebits.li_gid = LDLM_GID_ANY;
142 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
143 if (dap_flags & OSC_DAP_FL_TEST_LOCK)
144 flags |= LDLM_FL_TEST_LOCK;
146 if (dap_flags & OSC_DAP_FL_AST)
147 match_flags |= LDLM_MATCH_AST;
149 if (dap_flags & OSC_DAP_FL_CANCELING)
150 match_flags |= LDLM_MATCH_UNREF;
153 /* Next, search for already existing extent locks that will cover us */
154 /* If we're trying to read, we also search for an existing PW lock. The
155 * VFS and page cache already protect us locally, so lots of readers/
156 * writers can share a single PW lock. */
157 mode = mdc_dom_lock_match(env, osc_export(obj), resname, LDLM_IBITS,
158 policy, LCK_PR | LCK_PW | LCK_GROUP, &flags,
159 obj, match_flags, &lockh);
161 lock = ldlm_handle2lock(&lockh);
162 /* RACE: the lock is cancelled so let's try again */
163 if (unlikely(lock == NULL))
171 * Check if page @page is covered by an extra lock or discard it.
173 static bool mdc_check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
174 void **pvec, int count, void *cbdata)
176 struct osc_thread_info *info = osc_env_info(env);
177 struct osc_object *osc = cbdata;
181 for (i = 0; i < count; i++) {
182 struct osc_page *ops = pvec[i];
184 index = osc_index(ops);
185 if (index >= info->oti_fn_index) {
186 struct ldlm_lock *tmp;
187 struct cl_page *page = ops->ops_cl.cpl_page;
189 /* refresh non-overlapped index */
190 tmp = mdc_dlmlock_at_pgoff(env, osc, index,
191 OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_AST);
193 info->oti_fn_index = CL_PAGE_EOF;
195 } else if (cl_page_own(env, io, page) == 0) {
196 /* discard the page */
197 cl_page_discard(env, io, page);
198 cl_page_disown(env, io, page);
200 if (page->cp_type != CPT_TRANSIENT)
201 LASSERT(page->cp_state == CPS_FREEING);
205 info->oti_next_index = index + 1;
211 * Discard pages protected by the given lock. This function traverses radix
212 * tree to find all covering pages and discard them. If a page is being covered
213 * by other locks, it should remain in cache.
215 * If error happens on any step, the process continues anyway (the reasoning
216 * behind this being that lock cancellation cannot be delayed indefinitely).
218 static int mdc_lock_discard_pages(const struct lu_env *env,
219 struct osc_object *osc,
220 pgoff_t start, pgoff_t end,
223 struct osc_thread_info *info = osc_env_info(env);
224 struct cl_io *io = &info->oti_io;
225 osc_page_gang_cbt cb;
230 io->ci_obj = cl_object_top(osc2cl(osc));
231 io->ci_ignore_layout = 1;
232 io->u.ci_misc.lm_next_rpc_time = 0;
234 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
238 cb = discard ? osc_discard_cb : mdc_check_and_discard_cb;
239 info->oti_fn_index = info->oti_next_index = start;
241 osc_page_gang_lookup(env, io, osc, info->oti_next_index,
242 end, cb, (void *)osc);
248 static int mdc_lock_flush(const struct lu_env *env, struct osc_object *obj,
249 pgoff_t start, pgoff_t end, enum cl_lock_mode mode,
257 if (mode == CLM_WRITE) {
258 result = osc_cache_writeback_range(env, obj, start, end, 1,
260 CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
261 obj, start, end, result,
262 discard ? "discarded" : "written back");
267 /* Avoid lock matching with CLM_WRITE, there can be no other locks */
268 rc = mdc_lock_discard_pages(env, obj, start, end,
269 mode == CLM_WRITE || discard);
270 if (result == 0 && rc < 0)
276 static void mdc_lock_lockless_cancel(const struct lu_env *env,
277 const struct cl_lock_slice *slice)
279 struct osc_lock *ols = cl2osc_lock(slice);
280 struct osc_object *osc = cl2osc(slice->cls_obj);
281 struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
284 LASSERT(ols->ols_dlmlock == NULL);
285 rc = mdc_lock_flush(env, osc, descr->cld_start, descr->cld_end,
288 CERROR("Pages for lockless lock %p were not purged(%d)\n",
291 osc_lock_wake_waiters(env, osc, ols);
295 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
296 * and ldlm_lock caches.
298 static int mdc_dlm_canceling(const struct lu_env *env,
299 struct ldlm_lock *dlmlock)
301 struct cl_object *obj = NULL;
304 enum cl_lock_mode mode = CLM_READ;
308 lock_res_and_lock(dlmlock);
309 if (!ldlm_is_granted(dlmlock)) {
310 dlmlock->l_ast_data = NULL;
311 unlock_res_and_lock(dlmlock);
315 discard = (dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
316 if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
319 if (dlmlock->l_ast_data != NULL) {
320 obj = osc2cl(dlmlock->l_ast_data);
323 unlock_res_and_lock(dlmlock);
325 /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
326 * the object has been destroyed. */
328 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
330 /* Destroy pages covered by the extent of the DLM lock */
331 result = mdc_lock_flush(env, cl2osc(obj), 0,
332 CL_PAGE_EOF, mode, discard);
333 /* Losing a lock, set KMS to 0.
334 * NB: assumed that DOM lock covers whole data on MDT.
336 /* losing a lock, update kms */
337 lock_res_and_lock(dlmlock);
338 dlmlock->l_ast_data = NULL;
339 cl_object_attr_lock(obj);
341 cl_object_attr_update(env, obj, attr, CAT_KMS);
342 cl_object_attr_unlock(obj);
343 unlock_res_and_lock(dlmlock);
344 cl_object_put(env, obj);
349 int mdc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
350 struct ldlm_lock_desc *new, void *data, int reason)
357 case LDLM_CB_BLOCKING: {
358 struct lustre_handle lockh;
360 ldlm_lock2handle(dlmlock, &lockh);
361 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
366 case LDLM_CB_CANCELING: {
371 * This can be called in the context of outer IO, e.g.,
373 * osc_enqueue_base()->...
374 * ->ldlm_prep_elc_req()->...
375 * ->ldlm_cancel_callback()->...
376 * ->osc_ldlm_blocking_ast()
378 * new environment has to be created to not corrupt outer
381 env = cl_env_get(&refcheck);
387 rc = mdc_dlm_canceling(env, dlmlock);
388 cl_env_put(env, &refcheck);
398 * Updates object attributes from a lock value block (lvb) received together
399 * with the DLM lock reply from the server.
400 * This can be optimized to not update attributes when lock is a result of a
403 * Called under lock and resource spin-locks.
405 void mdc_lock_lvb_update(const struct lu_env *env, struct osc_object *osc,
406 struct ldlm_lock *dlmlock, struct ost_lvb *lvb)
408 struct cl_object *obj = osc2cl(osc);
409 struct lov_oinfo *oinfo = osc->oo_oinfo;
410 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
411 enum cl_attr_valid valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME |
412 CAT_MTIME | CAT_SIZE;
413 unsigned int setkms = 0;
418 LASSERT(dlmlock != NULL);
419 /* l_ost_lvb is only in the LDLM_IBITS union **/
420 LASSERT(dlmlock->l_resource->lr_type == LDLM_IBITS);
421 lvb = &dlmlock->l_ost_lvb;
423 cl_lvb2attr(attr, lvb);
425 cl_object_attr_lock(obj);
426 if (dlmlock != NULL) {
429 check_res_locked(dlmlock->l_resource);
430 size = lvb->lvb_size;
432 if (size >= oinfo->loi_kms) {
434 attr->cat_kms = size;
437 ldlm_lock_allow_match_locked(dlmlock);
440 /* The size should not be less than the kms */
441 if (attr->cat_size < oinfo->loi_kms)
442 attr->cat_size = oinfo->loi_kms;
444 LDLM_DEBUG(dlmlock, "acquired size %llu, setting rss=%llu;%s "
445 "kms=%llu, end=%llu", lvb->lvb_size, attr->cat_size,
446 setkms ? "" : " leaving",
447 setkms ? attr->cat_kms : oinfo->loi_kms,
448 dlmlock ? dlmlock->l_policy_data.l_extent.end : -1ull);
450 cl_object_attr_update(env, obj, attr, valid);
451 cl_object_attr_unlock(obj);
455 static void mdc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
456 struct lustre_handle *lockh)
458 struct osc_object *osc = cl2osc(oscl->ols_cl.cls_obj);
459 struct ldlm_lock *dlmlock;
463 dlmlock = ldlm_handle2lock_long(lockh, 0);
464 LASSERT(dlmlock != NULL);
466 /* lock reference taken by ldlm_handle2lock_long() is
467 * owned by osc_lock and released in osc_lock_detach()
469 oscl->ols_has_ref = 1;
471 LASSERT(oscl->ols_dlmlock == NULL);
472 oscl->ols_dlmlock = dlmlock;
474 /* This may be a matched lock for glimpse request, do not hold
475 * lock reference in that case. */
476 if (!oscl->ols_glimpse) {
477 /* hold a refc for non glimpse lock which will
478 * be released in osc_lock_cancel() */
479 lustre_handle_copy(&oscl->ols_handle, lockh);
480 ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
484 /* Lock must have been granted. */
485 lock_res_and_lock(dlmlock);
486 if (ldlm_is_granted(dlmlock)) {
487 struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
489 /* extend the lock extent, otherwise it will have problem when
490 * we decide whether to grant a lockless lock. */
491 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
492 descr->cld_start = 0;
493 descr->cld_end = CL_PAGE_EOF;
495 /* no lvb update for matched lock */
496 if (!(dlmlock->l_flags & LDLM_FL_LVB_CACHED)) {
497 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
498 LASSERT(osc == dlmlock->l_ast_data);
499 mdc_lock_lvb_update(env, osc, dlmlock, NULL);
500 (dlmlock->l_flags |= LDLM_FL_LVB_CACHED);
503 unlock_res_and_lock(dlmlock);
505 LASSERT(oscl->ols_state != OLS_GRANTED);
506 oscl->ols_state = OLS_GRANTED;
511 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
512 * received from a server, or after mdc_enqueue_send() matched a local DLM
515 static int mdc_lock_upcall(void *cookie, struct lustre_handle *lockh,
518 struct osc_lock *oscl = cookie;
519 struct cl_lock_slice *slice = &oscl->ols_cl;
525 env = cl_env_percpu_get();
526 /* should never happen, similar to osc_ldlm_blocking_ast(). */
527 LASSERT(!IS_ERR(env));
529 rc = ldlm_error2errno(errcode);
530 if (oscl->ols_state == OLS_ENQUEUED) {
531 oscl->ols_state = OLS_UPCALL_RECEIVED;
532 } else if (oscl->ols_state == OLS_CANCELLED) {
535 CERROR("Impossible state: %d\n", oscl->ols_state);
539 CDEBUG(D_INODE, "rc %d, err %d\n", rc, errcode);
541 mdc_lock_granted(env, oscl, lockh);
543 /* Error handling, some errors are tolerable. */
544 if (oscl->ols_glimpse && rc == -ENAVAIL) {
545 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
546 mdc_lock_lvb_update(env, cl2osc(slice->cls_obj),
547 NULL, &oscl->ols_lvb);
548 /* Hide the error. */
552 if (oscl->ols_owner != NULL)
553 cl_sync_io_note(env, oscl->ols_owner, rc);
554 cl_env_percpu_put(env);
559 /* This is needed only for old servers (before 2.14) support */
560 int mdc_fill_lvb(struct req_capsule *pill, struct ost_lvb *lvb)
562 struct mdt_body *body;
564 /* get LVB data from mdt_body otherwise */
565 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
569 if (!(body->mbo_valid & OBD_MD_DOM_SIZE))
572 mdc_body2lvb(body, lvb);
576 static int mdc_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
577 osc_enqueue_upcall_f upcall, void *cookie,
578 struct lustre_handle *lockh, enum ldlm_mode mode,
579 __u64 *flags, int errcode)
581 struct osc_lock *ols = cookie;
582 bool glimpse = *flags & LDLM_FL_HAS_INTENT;
587 /* needed only for glimpse from an old server (< 2.14) */
588 if (glimpse && !exp_connect_dom_lvb(exp) && errcode >= 0)
589 rc = mdc_fill_lvb(&req->rq_pill, &ols->ols_lvb);
591 if (glimpse && errcode == ELDLM_LOCK_ABORTED) {
592 struct ldlm_reply *rep;
594 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
596 rep->lock_policy_res2 =
597 ptlrpc_status_ntoh(rep->lock_policy_res2);
598 if (rep->lock_policy_res2)
599 errcode = rep->lock_policy_res2;
603 *flags |= LDLM_FL_LVB_READY;
604 } else if (errcode == ELDLM_OK) {
605 struct ldlm_lock *lock;
607 /* Callers have references, should be valid always */
608 lock = ldlm_handle2lock(lockh);
610 /* At this point ols_lvb must be filled with correct LVB either
611 * by mdc_fill_lvb() above or by ldlm_cli_enqueue_fini().
612 * DoM uses l_ost_lvb to store LVB data (only available with
613 * LDLM_IBITS locks), so copy it here from just updated ols_lvb.
615 LASSERT(lock->l_resource->lr_type == LDLM_IBITS);
616 lock_res_and_lock(lock);
617 memcpy(&lock->l_ost_lvb, &ols->ols_lvb,
618 sizeof(lock->l_ost_lvb));
619 unlock_res_and_lock(lock);
621 *flags |= LDLM_FL_LVB_READY;
624 /* Call the update callback. */
625 rc = (*upcall)(cookie, lockh, rc < 0 ? rc : errcode);
627 /* release the reference taken in ldlm_cli_enqueue() */
628 if (errcode == ELDLM_LOCK_MATCHED)
630 if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
631 ldlm_lock_decref(lockh, mode);
636 static int mdc_enqueue_interpret(const struct lu_env *env,
637 struct ptlrpc_request *req,
640 struct osc_enqueue_args *aa = args;
641 struct ldlm_lock *lock;
642 struct lustre_handle *lockh = &aa->oa_lockh;
643 enum ldlm_mode mode = aa->oa_mode;
644 struct ldlm_enqueue_info einfo = {
645 .ei_type = aa->oa_type,
651 LASSERT(!aa->oa_speculative);
653 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
655 lock = ldlm_handle2lock(lockh);
656 LASSERTF(lock != NULL,
657 "lockh %#llx, req %px, aa %px - client evicted?\n",
658 lockh->cookie, req, aa);
660 /* Take an additional reference so that a blocking AST that
661 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
662 * to arrive after an upcall has been executed by
663 * mdc_enqueue_fini().
665 ldlm_lock_addref(lockh, mode);
667 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
668 CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
670 /* Let CP AST to grant the lock first. */
671 CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
673 /* Complete obtaining the lock procedure. */
674 rc = ldlm_cli_enqueue_fini(aa->oa_exp, &req->rq_pill, &einfo, 1,
675 aa->oa_flags, aa->oa_lvb, aa->oa_lvb ?
676 sizeof(*aa->oa_lvb) : 0, lockh, rc, true);
677 /* Complete mdc stuff. */
678 rc = mdc_enqueue_fini(aa->oa_exp, req, aa->oa_upcall, aa->oa_cookie,
679 lockh, mode, aa->oa_flags, rc);
681 CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
683 ldlm_lock_decref(lockh, mode);
688 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
689 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
690 * other synchronous requests, however keeping some locks and trying to obtain
691 * others may take a considerable amount of time in a case of ost failure; and
692 * when other sync requests do not get released lock from a client, the client
693 * is excluded from the cluster -- such scenarious make the life difficult, so
694 * release locks just after they are obtained. */
695 static int mdc_enqueue_send(const struct lu_env *env, struct obd_export *exp,
696 struct ldlm_res_id *res_id, __u64 *flags,
697 union ldlm_policy_data *policy, struct ost_lvb *lvb,
698 osc_enqueue_upcall_f upcall, void *cookie,
699 struct ldlm_enqueue_info *einfo, int async)
701 struct obd_device *obd = exp->exp_obd;
702 struct lustre_handle lockh = { 0 };
703 struct ptlrpc_request *req = NULL;
704 struct ldlm_intent *lit;
706 bool glimpse = *flags & LDLM_FL_HAS_INTENT;
707 __u64 search_flags = *flags;
708 __u64 match_flags = 0;
712 bool compat_glimpse = glimpse && !exp_connect_dom_lvb(exp);
716 mode = einfo->ei_mode;
717 if (einfo->ei_mode == LCK_PR)
720 search_flags |= LDLM_FL_LVB_READY;
722 search_flags |= LDLM_FL_BLOCK_GRANTED;
723 if (mode == LCK_GROUP)
724 match_flags = LDLM_MATCH_GROUP;
725 mode = ldlm_lock_match_with_skip(obd->obd_namespace, search_flags, 0,
726 res_id, einfo->ei_type, policy, mode,
727 match_flags, &lockh);
729 struct ldlm_lock *matched;
731 if (*flags & LDLM_FL_TEST_LOCK)
734 matched = ldlm_handle2lock(&lockh);
736 if (CFS_FAIL_CHECK(OBD_FAIL_MDC_GLIMPSE_DDOS))
737 (matched->l_flags |= LDLM_FL_KMS_IGNORE);
739 if (mdc_set_dom_lock_data(matched, einfo->ei_cbdata)) {
740 *flags |= LDLM_FL_LVB_READY;
742 /* We already have a lock, and it's referenced. */
743 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
745 ldlm_lock_decref(&lockh, mode);
746 ldlm_lock_put(matched);
749 ldlm_lock_decref(&lockh, mode);
750 ldlm_lock_put(matched);
753 if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
756 /* Glimpse is intent on old server */
757 req = ptlrpc_request_alloc(class_exp2cliimp(exp), compat_glimpse ?
758 &RQF_LDLM_INTENT : &RQF_LDLM_ENQUEUE);
762 /* For WRITE lock cancel other locks on resource early if any */
763 if (einfo->ei_mode & LCK_PW)
764 count = mdc_resource_cancel_unused_res(exp, res_id, &cancels,
770 rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
772 ptlrpc_request_free(req);
776 if (compat_glimpse) {
777 /* pack the glimpse intent */
778 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
779 lit->opc = IT_GLIMPSE;
782 /* users of mdc_enqueue() can pass this flag for ldlm_lock_match() */
783 *flags &= ~LDLM_FL_BLOCK_GRANTED;
785 if (compat_glimpse) {
786 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 0);
787 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, 0);
790 lvb_size = sizeof(*lvb);
791 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
794 ptlrpc_request_set_replen(req);
796 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
797 lvb_size, LVB_T_OST, &lockh, async);
800 struct osc_enqueue_args *aa;
802 aa = ptlrpc_req_async_args(aa, req);
804 aa->oa_mode = einfo->ei_mode;
805 aa->oa_type = einfo->ei_type;
806 lustre_handle_copy(&aa->oa_lockh, &lockh);
807 aa->oa_upcall = upcall;
808 aa->oa_cookie = cookie;
809 aa->oa_speculative = false;
810 aa->oa_flags = flags;
811 aa->oa_lvb = compat_glimpse ? NULL : lvb;
813 req->rq_interpret_reply = mdc_enqueue_interpret;
814 ptlrpcd_add_req(req);
821 rc = mdc_enqueue_fini(exp, req, upcall, cookie, &lockh, einfo->ei_mode,
828 * Implementation of cl_lock_operations::clo_enqueue() method for osc
829 * layer. This initiates ldlm enqueue:
831 * - cancels conflicting locks early (osc_lock_enqueue_wait());
833 * - calls osc_enqueue_base() to do actual enqueue.
835 * osc_enqueue_base() is supplied with an upcall function that is executed
836 * when lock is received either after a local cached ldlm lock is matched, or
837 * when a reply from the server is received.
839 * This function does not wait for the network communication to complete.
841 static int mdc_lock_enqueue(const struct lu_env *env,
842 const struct cl_lock_slice *slice,
843 struct cl_io *unused, struct cl_sync_io *anchor)
845 struct osc_thread_info *info = osc_env_info(env);
846 struct osc_io *oio = osc_env_io(env);
847 struct osc_object *osc = cl2osc(slice->cls_obj);
848 struct osc_lock *oscl = cl2osc_lock(slice);
849 struct cl_lock *lock = slice->cls_lock;
850 struct ldlm_res_id *resname = &info->oti_resname;
851 union ldlm_policy_data *policy = &info->oti_policy;
852 osc_enqueue_upcall_f upcall = mdc_lock_upcall;
853 void *cookie = (void *)oscl;
859 LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
860 "lock = %px, ols = %px\n", lock, oscl);
862 if (oscl->ols_state == OLS_GRANTED)
865 /* Lockahead is not supported on MDT yet */
866 if (oscl->ols_flags & LDLM_FL_NO_EXPANSION) {
867 result = -EOPNOTSUPP;
871 if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
872 GOTO(enqueue_base, 0);
874 if (oscl->ols_glimpse) {
875 LASSERT(equi(oscl->ols_speculative, anchor == NULL));
877 GOTO(enqueue_base, 0);
880 result = osc_lock_enqueue_wait(env, osc, oscl);
884 /* we can grant lockless lock right after all conflicting locks
886 if (osc_lock_is_lockless(oscl)) {
887 oscl->ols_state = OLS_GRANTED;
888 oio->oi_lockless = 1;
893 oscl->ols_state = OLS_ENQUEUED;
894 if (anchor != NULL) {
895 atomic_inc(&anchor->csi_sync_nr);
896 oscl->ols_owner = anchor;
900 * DLM lock's ast data must be osc_object;
901 * DLM's enqueue callback set to osc_lock_upcall() with cookie as
904 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
905 mdc_lock_build_policy(env, lock, policy);
906 LASSERT(!oscl->ols_speculative);
907 result = mdc_enqueue_send(env, osc_export(osc), resname,
908 &oscl->ols_flags, policy, &oscl->ols_lvb,
909 upcall, cookie, &oscl->ols_einfo, async);
911 if (osc_lock_is_lockless(oscl)) {
912 oio->oi_lockless = 1;
914 LASSERT(oscl->ols_state == OLS_GRANTED);
915 LASSERT(oscl->ols_hold);
916 LASSERT(oscl->ols_dlmlock != NULL);
921 oscl->ols_state = OLS_CANCELLED;
922 osc_lock_wake_waiters(env, osc, oscl);
925 cl_sync_io_note(env, anchor, result);
930 static const struct cl_lock_operations mdc_lock_lockless_ops = {
931 .clo_fini = osc_lock_fini,
932 .clo_enqueue = mdc_lock_enqueue,
933 .clo_cancel = mdc_lock_lockless_cancel,
934 .clo_print = osc_lock_print
937 static const struct cl_lock_operations mdc_lock_ops = {
938 .clo_fini = osc_lock_fini,
939 .clo_enqueue = mdc_lock_enqueue,
940 .clo_cancel = osc_lock_cancel,
941 .clo_print = osc_lock_print,
944 static int mdc_lock_init(const struct lu_env *env, struct cl_object *obj,
945 struct cl_lock *lock, const struct cl_io *io)
947 struct osc_lock *ols;
948 __u32 enqflags = lock->cll_descr.cld_enq_flags;
949 __u64 flags = osc_enq2ldlm_flags(enqflags);
953 /* Ignore AGL for Data-on-MDT, stat returns size data */
954 if ((enqflags & CEF_SPECULATIVE) != 0)
957 OBD_SLAB_ALLOC_PTR_GFP(ols, osc_lock_kmem, GFP_NOFS);
958 if (unlikely(ols == NULL))
961 ols->ols_state = OLS_NEW;
962 spin_lock_init(&ols->ols_lock);
963 INIT_LIST_HEAD(&ols->ols_waiting_list);
964 INIT_LIST_HEAD(&ols->ols_wait_entry);
965 INIT_LIST_HEAD(&ols->ols_nextlock_oscobj);
966 ols->ols_lockless_ops = &mdc_lock_lockless_ops;
968 ols->ols_flags = flags;
969 ols->ols_speculative = !!(enqflags & CEF_SPECULATIVE);
971 if (ols->ols_flags & LDLM_FL_HAS_INTENT) {
972 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
973 ols->ols_glimpse = 1;
975 mdc_lock_build_einfo(env, lock, cl2osc(obj), &ols->ols_einfo);
977 cl_lock_slice_add(lock, &ols->ols_cl, obj, &mdc_lock_ops);
979 if (!(enqflags & CEF_MUST))
980 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
982 if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
983 osc_lock_set_writer(env, io, obj, ols);
984 else if (io->ci_type == CIT_READ ||
985 (io->ci_type == CIT_FAULT && !io->u.ci_fault.ft_mkwrite))
986 osc_lock_set_reader(env, io, obj, ols);
988 LDLM_DEBUG_NOLOCK("lock %p, mdc lock %p, flags %llx",
989 lock, ols, ols->ols_flags);
996 * An implementation of cl_io_operations specific methods for MDC layer.
999 static int mdc_async_upcall(void *a, int rc)
1001 struct osc_async_cbargs *args = a;
1004 complete(&args->opc_sync);
1008 static int mdc_get_lock_handle(const struct lu_env *env, struct osc_object *osc,
1009 pgoff_t index, struct lustre_handle *lh)
1011 struct ldlm_lock *lock;
1013 /* find DOM lock protecting object */
1014 lock = mdc_dlmlock_at_pgoff(env, osc, index,
1015 OSC_DAP_FL_TEST_LOCK |
1016 OSC_DAP_FL_CANCELING);
1018 struct ldlm_resource *res;
1019 struct ldlm_res_id *resname;
1021 resname = &osc_env_info(env)->oti_resname;
1022 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
1023 res = ldlm_resource_get(osc_export(osc)->
1024 exp_obd->obd_namespace,
1025 resname, LDLM_IBITS, 0);
1027 CERROR("No lock resource for "DFID"\n",
1028 PFID(lu_object_fid(osc2lu(osc))));
1030 ldlm_resource_dump(D_ERROR, res);
1034 *lh = lock->l_remote_handle;
1035 ldlm_lock_put(lock);
1040 static int mdc_io_setattr_start(const struct lu_env *env,
1041 const struct cl_io_slice *slice)
1043 struct cl_io *io = slice->cis_io;
1044 struct osc_io *oio = cl2osc_io(env, slice);
1045 struct cl_object *obj = slice->cis_obj;
1046 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
1047 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1048 struct obdo *oa = &oio->oi_oa;
1049 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1050 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
1051 unsigned int ia_avalid = io->u.ci_setattr.sa_avalid;
1052 enum op_xvalid ia_xvalid = io->u.ci_setattr.sa_xvalid;
1055 /* silently ignore non-truncate setattr for Data-on-MDT object */
1056 if (cl_io_is_trunc(io)) {
1057 /* truncate cache dirty pages first */
1058 rc = osc_cache_truncate_start(env, cl2osc(obj), size,
1060 } else if (cl_io_is_fallocate(io) &&
1061 (io->u.ci_setattr.sa_falloc_mode &
1062 (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))) {
1063 rc = osc_punch_start(env, io, obj);
1068 if (oio->oi_lockless == 0) {
1069 cl_object_attr_lock(obj);
1070 rc = cl_object_attr_get(env, obj, attr);
1072 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
1073 enum cl_attr_valid cl_valid = 0;
1075 if (ia_avalid & ATTR_SIZE) {
1076 attr->cat_size = size;
1077 attr->cat_kms = size;
1078 cl_valid = (CAT_SIZE | CAT_KMS);
1080 if (ia_avalid & ATTR_MTIME_SET) {
1081 attr->cat_mtime = lvb->lvb_mtime;
1082 cl_valid |= CAT_MTIME;
1084 if (ia_avalid & ATTR_ATIME_SET) {
1085 attr->cat_atime = lvb->lvb_atime;
1086 cl_valid |= CAT_ATIME;
1088 if (ia_xvalid & OP_XVALID_CTIME_SET) {
1089 attr->cat_ctime = lvb->lvb_ctime;
1090 cl_valid |= CAT_CTIME;
1092 rc = cl_object_attr_update(env, obj, attr, cl_valid);
1094 cl_object_attr_unlock(obj);
1099 if (!(ia_avalid & ATTR_SIZE) && !cl_io_is_fallocate(io))
1102 memset(oa, 0, sizeof(*oa));
1103 oa->o_oi = loi->loi_oi;
1104 oa->o_mtime = attr->cat_mtime;
1105 oa->o_atime = attr->cat_atime;
1106 oa->o_ctime = attr->cat_ctime;
1107 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
1108 OBD_MD_FLCTIME | OBD_MD_FLMTIME | OBD_MD_FLSIZE |
1111 if (oio->oi_lockless) {
1112 oa->o_flags = OBD_FL_SRVLOCK;
1113 oa->o_valid |= OBD_MD_FLFLAGS;
1115 rc = mdc_get_lock_handle(env, cl2osc(obj), CL_PAGE_EOF,
1118 oa->o_valid |= OBD_MD_FLHANDLE;
1121 init_completion(&cbargs->opc_sync);
1122 if (cl_io_is_fallocate(io)) {
1123 int falloc_mode = io->u.ci_setattr.sa_falloc_mode;
1125 oa->o_size = io->u.ci_setattr.sa_falloc_offset;
1126 oa->o_blocks = io->u.ci_setattr.sa_falloc_end;
1127 rc = osc_fallocate_base(osc_export(cl2osc(obj)), oa,
1128 mdc_async_upcall, cbargs, falloc_mode);
1131 oa->o_blocks = OBD_OBJECT_EOF;
1132 rc = osc_punch_send(osc_export(cl2osc(obj)), oa,
1133 mdc_async_upcall, cbargs);
1135 cbargs->opc_rpc_sent = rc == 0;
1139 static int mdc_io_read_ahead(const struct lu_env *env,
1140 const struct cl_io_slice *ios,
1141 pgoff_t start, struct cl_read_ahead *ra)
1143 struct osc_object *osc = cl2osc(ios->cis_obj);
1144 struct osc_io *oio = cl2osc_io(env, ios);
1145 struct ldlm_lock *dlmlock;
1149 dlmlock = mdc_dlmlock_at_pgoff(env, osc, start, 0);
1150 if (dlmlock == NULL)
1153 oio->oi_is_readahead = 1;
1154 if (dlmlock->l_req_mode != LCK_PR) {
1155 struct lustre_handle lockh;
1157 ldlm_lock2handle(dlmlock, &lockh);
1158 ldlm_lock_addref(&lockh, LCK_PR);
1159 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
1162 ra->cra_rpc_pages = osc_cli(osc)->cl_max_pages_per_rpc;
1163 ra->cra_end_idx = CL_PAGE_EOF;
1164 ra->cra_release = osc_read_ahead_release;
1165 ra->cra_dlmlock = dlmlock;
1171 static int mdc_io_fsync_start(const struct lu_env *env,
1172 const struct cl_io_slice *slice)
1174 struct cl_io *io = slice->cis_io;
1175 struct cl_fsync_io *fio = &io->u.ci_fsync;
1176 struct cl_object *obj = slice->cis_obj;
1177 struct osc_object *osc = cl2osc(obj);
1182 if (fio->fi_mode == CL_FSYNC_RECLAIM) {
1183 struct client_obd *cli = osc_cli(osc);
1185 if (!atomic_long_read(&cli->cl_unstable_count)) {
1186 /* Stop flush when there are no unstable pages? */
1187 CDEBUG(D_CACHE, "unstable count is zero\n");
1192 /* a MDC lock always covers whole object, do sync for whole
1193 * possible range despite of supplied start/end values.
1195 result = osc_cache_writeback_range(env, osc, 0, CL_PAGE_EOF, 0,
1196 fio->fi_mode == CL_FSYNC_DISCARD);
1198 fio->fi_nr_written += result;
1201 if (fio->fi_mode == CL_FSYNC_ALL || fio->fi_mode == CL_FSYNC_RECLAIM) {
1202 struct osc_io *oio = cl2osc_io(env, slice);
1203 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1206 if (fio->fi_mode == CL_FSYNC_ALL) {
1207 rc = osc_cache_wait_range(env, osc, 0, CL_PAGE_EOF);
1211 /* Use OSC sync code because it is asynchronous.
1212 * It is to be added into MDC and avoid the using of
1213 * OST_SYNC at both MDC and MDT.
1215 rc = osc_fsync_ost(env, osc, fio);
1217 cbargs->opc_rpc_sent = 1;
1225 struct mdc_data_version_args {
1226 struct osc_io *dva_oio;
1230 mdc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
1233 struct mdc_data_version_args *dva = args;
1234 struct osc_io *oio = dva->dva_oio;
1235 const struct mdt_body *body;
1241 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1243 GOTO(out, rc = -EPROTO);
1245 /* Prepare OBDO from mdt_body for CLIO */
1246 oio->oi_oa.o_valid = body->mbo_valid;
1247 oio->oi_oa.o_flags = body->mbo_flags;
1248 oio->oi_oa.o_data_version = body->mbo_version;
1249 oio->oi_oa.o_layout_version = body->mbo_layout_gen;
1252 oio->oi_cbarg.opc_rc = rc;
1253 complete(&oio->oi_cbarg.opc_sync);
1257 static int mdc_io_data_version_start(const struct lu_env *env,
1258 const struct cl_io_slice *slice)
1260 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
1261 struct osc_io *oio = cl2osc_io(env, slice);
1262 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1263 struct osc_object *obj = cl2osc(slice->cis_obj);
1264 struct obd_export *exp = osc_export(obj);
1265 struct ptlrpc_request *req;
1266 struct mdt_body *body;
1267 struct mdc_data_version_args *dva;
1272 memset(&oio->oi_oa, 0, sizeof(oio->oi_oa));
1273 oio->oi_oa.o_oi.oi_fid = *lu_object_fid(osc2lu(obj));
1274 oio->oi_oa.o_valid = OBD_MD_FLID;
1276 init_completion(&cbargs->opc_sync);
1278 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
1282 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
1284 ptlrpc_request_free(req);
1288 body = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY);
1289 body->mbo_fid1 = *lu_object_fid(osc2lu(obj));
1290 body->mbo_valid = OBD_MD_FLID;
1291 /* Indicate that data version is needed */
1292 body->mbo_valid |= OBD_MD_FLDATAVERSION;
1293 body->mbo_flags = 0;
1295 if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
1296 body->mbo_valid |= OBD_MD_FLFLAGS;
1297 body->mbo_flags |= OBD_FL_SRVLOCK;
1298 if (dv->dv_flags & LL_DV_WR_FLUSH)
1299 body->mbo_flags |= OBD_FL_FLUSH;
1302 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, 0);
1303 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 0);
1304 req_capsule_set_size(&req->rq_pill, &RMF_FILE_ENCCTX, RCL_SERVER, 0);
1305 ptlrpc_request_set_replen(req);
1307 req->rq_interpret_reply = mdc_data_version_interpret;
1308 dva = ptlrpc_req_async_args(dva, req);
1311 ptlrpcd_add_req(req);
1316 static void mdc_io_data_version_end(const struct lu_env *env,
1317 const struct cl_io_slice *slice)
1319 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
1320 struct osc_io *oio = cl2osc_io(env, slice);
1321 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1324 wait_for_completion(&cbargs->opc_sync);
1326 if (cbargs->opc_rc != 0) {
1327 slice->cis_io->ci_result = cbargs->opc_rc;
1329 slice->cis_io->ci_result = 0;
1330 if (!(oio->oi_oa.o_valid &
1331 (OBD_MD_LAYOUT_VERSION | OBD_MD_FLDATAVERSION)))
1332 slice->cis_io->ci_result = -EOPNOTSUPP;
1334 if (oio->oi_oa.o_valid & OBD_MD_LAYOUT_VERSION)
1335 dv->dv_layout_version = oio->oi_oa.o_layout_version;
1336 if (oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)
1337 dv->dv_data_version = oio->oi_oa.o_data_version;
1343 static const struct cl_io_operations mdc_io_ops = {
1346 .cio_iter_init = osc_io_iter_init,
1347 .cio_iter_fini = osc_io_rw_iter_fini,
1348 .cio_start = osc_io_read_start,
1351 .cio_iter_init = osc_io_iter_init,
1352 .cio_iter_fini = osc_io_rw_iter_fini,
1353 .cio_start = osc_io_write_start,
1354 .cio_end = osc_io_end,
1357 .cio_iter_init = osc_io_iter_init,
1358 .cio_iter_fini = osc_io_iter_fini,
1359 .cio_start = mdc_io_setattr_start,
1360 .cio_end = osc_io_setattr_end,
1362 [CIT_DATA_VERSION] = {
1363 .cio_start = mdc_io_data_version_start,
1364 .cio_end = mdc_io_data_version_end,
1367 .cio_iter_init = osc_io_iter_init,
1368 .cio_iter_fini = osc_io_iter_fini,
1369 .cio_start = osc_io_fault_start,
1370 .cio_end = osc_io_end,
1373 .cio_start = mdc_io_fsync_start,
1374 .cio_end = osc_io_fsync_end,
1377 .cio_start = osc_io_lseek_start,
1378 .cio_end = osc_io_lseek_end,
1381 .cio_read_ahead = mdc_io_read_ahead,
1382 .cio_lru_reserve = osc_io_lru_reserve,
1383 .cio_submit = osc_io_submit,
1384 .cio_dio_submit = osc_dio_submit,
1385 .cio_commit_async = osc_io_commit_async,
1386 .cio_extent_release = osc_io_extent_release,
1389 static int mdc_io_init(const struct lu_env *env, struct cl_object *obj,
1392 struct osc_io *oio = osc_env_io(env);
1394 CL_IO_SLICE_CLEAN(oio, oi_cl);
1395 cl_io_slice_add(io, &oio->oi_cl, obj, &mdc_io_ops);
1399 static void mdc_build_res_name(struct osc_object *osc,
1400 struct ldlm_res_id *resname)
1402 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
1406 * Implementation of struct cl_req_operations::cro_attr_set() for MDC
1407 * layer. MDC is responsible for struct obdo::o_id and struct obdo::o_seq
1410 static void mdc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1411 struct cl_req_attr *attr)
1413 u64 flags = attr->cra_flags;
1415 /* Copy object FID to cl_attr */
1416 attr->cra_oa->o_oi.oi_fid = *lu_object_fid(&obj->co_lu);
1418 if (flags & OBD_MD_FLGROUP)
1419 attr->cra_oa->o_valid |= OBD_MD_FLGROUP;
1421 if (flags & OBD_MD_FLID)
1422 attr->cra_oa->o_valid |= OBD_MD_FLID;
1424 if (flags & OBD_MD_FLHANDLE) {
1425 struct osc_page *opg;
1427 opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
1428 if (!opg->ops_srvlock) {
1431 rc = mdc_get_lock_handle(env, cl2osc(obj),
1433 &attr->cra_oa->o_handle);
1435 CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
1436 "uncovered page!\n");
1439 attr->cra_oa->o_valid |= OBD_MD_FLHANDLE;
1445 static int mdc_attr_get(const struct lu_env *env, struct cl_object *obj,
1446 struct cl_attr *attr)
1448 struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
1450 if (OST_LVB_IS_ERR(oinfo->loi_lvb.lvb_blocks))
1451 return OST_LVB_GET_ERR(oinfo->loi_lvb.lvb_blocks);
1453 return osc_attr_get(env, obj, attr);
1456 static int mdc_object_ast_clear(struct ldlm_lock *lock, void *data)
1458 struct osc_object *osc = (struct osc_object *)data;
1459 struct ost_lvb *lvb = &lock->l_ost_lvb;
1460 struct lov_oinfo *oinfo;
1463 if (lock->l_ast_data != data)
1464 RETURN(LDLM_ITER_CONTINUE);
1466 lock->l_ast_data = NULL;
1468 LASSERT(osc != NULL);
1469 LASSERT(osc->oo_oinfo != NULL);
1471 /* Updates lvb in lock by the cached oinfo */
1472 oinfo = osc->oo_oinfo;
1475 "update lock size %llu blocks %llu [cma]time: %llu %llu %llu by oinfo size %llu blocks %llu [cma]time %llu %llu %llu",
1476 lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_ctime,
1477 lvb->lvb_mtime, lvb->lvb_atime, oinfo->loi_lvb.lvb_size,
1478 oinfo->loi_lvb.lvb_blocks, oinfo->loi_lvb.lvb_ctime,
1479 oinfo->loi_lvb.lvb_mtime, oinfo->loi_lvb.lvb_atime);
1480 LASSERT(oinfo->loi_lvb.lvb_size >= oinfo->loi_kms);
1482 cl_object_attr_lock(&osc->oo_cl);
1483 /* l_ost_lvb is only in the LDLM_IBITS union **/
1484 LASSERT(lock->l_resource->lr_type == LDLM_IBITS);
1485 memcpy(lvb, &oinfo->loi_lvb, sizeof(oinfo->loi_lvb));
1486 cl_object_attr_unlock(&osc->oo_cl);
1487 (lock->l_flags &= ~LDLM_FL_LVB_CACHED);
1489 RETURN(LDLM_ITER_CONTINUE);
1492 static int mdc_object_prune(const struct lu_env *env, struct cl_object *obj)
1494 struct osc_object *osc = cl2osc(obj);
1495 struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
1497 /* DLM locks don't hold a reference of osc_object so we have to
1498 * clear it before the object is being destroyed. */
1499 osc_build_res_name(osc, resname);
1500 ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
1501 mdc_object_ast_clear, osc);
1505 static int mdc_object_flush(const struct lu_env *env, struct cl_object *obj,
1506 struct ldlm_lock *lock)
1508 /* if lock cancel is initiated from llite then it is combined
1509 * lock with DOM bit and it may have no l_ast_data initialized yet,
1510 * so init it here with given osc_object.
1512 mdc_set_dom_lock_data(lock, cl2osc(obj));
1513 RETURN(mdc_dlm_canceling(env, lock));
1516 static int mdc_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1517 struct ll_fiemap_info_key *fmkey,
1518 struct fiemap *fiemap, size_t *buflen)
1520 struct osc_thread_info *info = osc_env_info(env);
1521 struct osc_object *osc = cl2osc(obj);
1522 struct obd_export *exp = osc_export(osc);
1523 struct lustre_handle lockh;
1524 enum ldlm_mode mode = LCK_MINMODE;
1525 struct ptlrpc_request *req;
1526 struct fiemap *repbuf;
1527 struct ll_fiemap_info_key *rq_fmkey;
1534 fmkey->lfik_oa.o_oi = osc->oo_oinfo->loi_oi;
1536 if (fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC) {
1537 struct ldlm_res_id *resid = &osc_env_info(env)->oti_resname;
1538 union ldlm_policy_data *policy = &info->oti_policy;
1540 mdc_build_res_name(osc, resid);
1541 mdc_lock_build_policy(env, NULL, policy);
1542 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY;
1543 mode = mdc_dom_lock_match(env, exp, resid, LDLM_IBITS, policy,
1544 LCK_PR | LCK_PW | LCK_GROUP,
1545 &flags, osc, 0, &lockh);
1546 fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS;
1547 if (mode) { /* lock is cached on client */
1548 fmkey->lfik_oa.o_flags &= ~OBD_FL_SRVLOCK;
1549 if (mode != LCK_PR) {
1550 ldlm_lock_addref(&lockh, LCK_PR);
1551 ldlm_lock_decref(&lockh, mode);
1554 /* no cached lock, needs acquire lock on server side */
1555 fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK;
1559 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
1560 &RQF_OST_GET_INFO_FIEMAP);
1562 GOTO(drop_lock, rc = -ENOMEM);
1564 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT,
1566 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT,
1568 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER,
1571 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO);
1573 ptlrpc_request_free(req);
1574 GOTO(drop_lock, rc);
1576 rq_fmkey = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
1578 fmbuf = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
1579 memcpy(fmbuf, fiemap, *buflen);
1580 ptlrpc_request_set_replen(req);
1582 rc = ptlrpc_queue_wait(req);
1586 repbuf = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
1588 GOTO(fini_req, rc = -EPROTO);
1589 memcpy(fiemap, repbuf, *buflen);
1592 ptlrpc_req_put(req);
1595 ldlm_lock_decref(&lockh, LCK_PR);
1599 static const struct cl_object_operations mdc_ops = {
1600 .coo_page_init = osc_page_init,
1601 .coo_lock_init = mdc_lock_init,
1602 .coo_io_init = mdc_io_init,
1603 .coo_attr_get = mdc_attr_get,
1604 .coo_attr_update = osc_attr_update,
1605 .coo_glimpse = osc_object_glimpse,
1606 .coo_req_attr_set = mdc_req_attr_set,
1607 .coo_prune = mdc_object_prune,
1608 .coo_object_flush = mdc_object_flush,
1609 .coo_fiemap = mdc_object_fiemap,
1612 static const struct osc_object_operations mdc_object_ops = {
1613 .oto_build_res_name = mdc_build_res_name,
1614 .oto_dlmlock_at_pgoff = mdc_dlmlock_at_pgoff,
1617 static int mdc_object_init(const struct lu_env *env, struct lu_object *obj,
1618 const struct lu_object_conf *conf)
1620 struct osc_object *osc = lu2osc(obj);
1622 if (osc->oo_initialized)
1625 osc->oo_initialized = true;
1627 return osc_object_init(env, obj, conf);
1630 static void mdc_object_free(const struct lu_env *env, struct lu_object *obj)
1632 osc_object_free(env, obj);
1635 static const struct lu_object_operations mdc_lu_obj_ops = {
1636 .loo_object_init = mdc_object_init,
1637 .loo_object_delete = NULL,
1638 .loo_object_release = NULL,
1639 .loo_object_free = mdc_object_free,
1640 .loo_object_print = osc_object_print,
1641 .loo_object_invariant = NULL
1644 static struct lu_object *mdc_object_alloc(const struct lu_env *env,
1645 const struct lu_object_header *unused,
1646 struct lu_device *dev)
1648 struct osc_object *osc;
1649 struct lu_object *obj;
1651 OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, GFP_NOFS);
1654 lu_object_init(obj, NULL, dev);
1655 osc->oo_cl.co_ops = &mdc_ops;
1656 obj->lo_ops = &mdc_lu_obj_ops;
1657 osc->oo_obj_ops = &mdc_object_ops;
1658 osc->oo_initialized = false;
1665 static int mdc_process_config(const struct lu_env *env, struct lu_device *d,
1666 struct lustre_cfg *cfg)
1668 size_t count = class_modify_config(cfg, PARAM_MDC,
1669 &d->ld_obd->obd_kset.kobj);
1670 return count > 0 ? 0 : count;
1673 static const struct lu_device_operations mdc_lu_ops = {
1674 .ldo_object_alloc = mdc_object_alloc,
1675 .ldo_process_config = mdc_process_config,
1676 .ldo_recovery_complete = NULL,
1679 static struct lu_device *mdc_device_free(const struct lu_env *env,
1680 struct lu_device *lu)
1682 struct obd_device *obd = lu->ld_obd;
1683 struct client_obd *cli = &obd->u.cli;
1684 struct osc_device *osc = lu2osc_dev(lu);
1686 LASSERT(cli->cl_mod_rpcs_in_flight == 0);
1687 cl_device_fini(lu2cl_dev(lu));
1688 osc_cleanup_common(obd);
1694 static struct lu_device *mdc_device_alloc(const struct lu_env *env,
1695 struct lu_device_type *t,
1696 struct lustre_cfg *cfg)
1698 struct lu_device *d;
1699 struct osc_device *osc;
1700 struct obd_device *obd;
1705 RETURN(ERR_PTR(-ENOMEM));
1707 cl_device_init(&osc->osc_cl, t);
1708 d = osc2lu_dev(osc);
1709 d->ld_ops = &mdc_lu_ops;
1712 obd = class_name2obd(lustre_cfg_string(cfg, 0));
1714 RETURN(ERR_PTR(-ENODEV));
1716 rc = mdc_setup(obd, cfg);
1718 mdc_device_free(env, d);
1719 RETURN(ERR_PTR(rc));
1721 osc->osc_exp = obd->obd_self_export;
1722 osc->osc_stats.os_init = ktime_get_real();
1726 static int mdc_device_init(const struct lu_env *env, struct lu_device *d,
1727 const char *name, struct lu_device *next)
1732 static struct lu_device *mdc_device_fini(const struct lu_env *env,
1733 struct lu_device *lu)
1735 struct obd_device *obd = lu->ld_obd;
1739 osc_precleanup_common(obd);
1740 mdc_changelog_cdev_finish(obd);
1741 mdc_llog_finish(obd);
1742 lprocfs_free_md_stats(obd);
1743 ptlrpc_lprocfs_unregister_obd(obd);
1748 static const struct lu_device_type_operations mdc_device_type_ops = {
1749 .ldto_device_alloc = mdc_device_alloc,
1750 .ldto_device_free = mdc_device_free,
1751 .ldto_device_init = mdc_device_init,
1752 .ldto_device_fini = mdc_device_fini
1755 struct lu_device_type mdc_device_type = {
1756 .ldt_tags = LU_DEVICE_CL,
1757 .ldt_name = LUSTRE_MDC_NAME,
1758 .ldt_ops = &mdc_device_type_ops,
1759 .ldt_ctx_tags = LCT_CL_THREAD