4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2017, Intel Corporation.
26 * This file is part of Lustre, http://www.lustre.org/
28 * Implementation of cl_device, cl_req for MDC layer.
30 * Author: Mikhail Pershin <mike.pershin@intel.com>
33 #define DEBUG_SUBSYSTEM S_MDC
35 #include <obd_class.h>
36 #include <lustre_osc.h>
37 #include <linux/falloc.h>
38 #include <uapi/linux/lustre/lustre_param.h>
40 #include "mdc_internal.h"
42 static void mdc_lock_build_policy(const struct lu_env *env,
43 const struct cl_lock *lock,
44 union ldlm_policy_data *policy)
46 memset(policy, 0, sizeof *policy);
47 policy->l_inodebits.bits = MDS_INODELOCK_DOM;
49 policy->l_inodebits.li_gid = lock->cll_descr.cld_gid;
53 int mdc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
55 return osc_ldlm_glimpse_ast(dlmlock, data);
58 static void mdc_lock_build_einfo(const struct lu_env *env,
59 const struct cl_lock *lock,
60 struct osc_object *osc,
61 struct ldlm_enqueue_info *einfo)
63 einfo->ei_type = LDLM_IBITS;
64 einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
65 einfo->ei_cb_bl = mdc_ldlm_blocking_ast;
66 einfo->ei_cb_cp = ldlm_completion_ast;
67 einfo->ei_cb_gl = mdc_ldlm_glimpse_ast;
68 einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
69 einfo->ei_req_slot = 1;
72 static void mdc_lock_lvb_update(const struct lu_env *env,
73 struct osc_object *osc,
74 struct ldlm_lock *dlmlock,
77 static int mdc_set_dom_lock_data(struct ldlm_lock *lock, void *data)
81 LASSERT(lock != NULL);
82 LASSERT(lock->l_glimpse_ast == mdc_ldlm_glimpse_ast);
84 lock_res_and_lock(lock);
86 if (lock->l_ast_data == NULL)
87 lock->l_ast_data = data;
88 if (lock->l_ast_data == data)
91 unlock_res_and_lock(lock);
96 int mdc_dom_lock_match(const struct lu_env *env, struct obd_export *exp,
97 struct ldlm_res_id *res_id, enum ldlm_type type,
98 union ldlm_policy_data *policy, enum ldlm_mode mode,
99 __u64 *flags, struct osc_object *obj,
100 struct lustre_handle *lockh,
101 enum ldlm_match_flags match_flags)
103 struct obd_device *obd = exp->exp_obd;
104 __u64 lflags = *flags;
109 rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
110 res_id, type, policy, mode, lockh, match_flags);
111 if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
115 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
117 LASSERT(lock != NULL);
118 if (mdc_set_dom_lock_data(lock, obj)) {
119 lock_res_and_lock(lock);
120 if (!ldlm_is_lvb_cached(lock)) {
121 LASSERT(lock->l_ast_data == obj);
122 mdc_lock_lvb_update(env, obj, lock, NULL);
123 ldlm_set_lvb_cached(lock);
125 unlock_res_and_lock(lock);
127 ldlm_lock_decref(lockh, rc);
136 * Finds an existing lock covering a page with given index.
137 * Copy of osc_obj_dlmlock_at_pgoff() but for DoM IBITS lock.
139 struct ldlm_lock *mdc_dlmlock_at_pgoff(const struct lu_env *env,
140 struct osc_object *obj, pgoff_t index,
141 enum osc_dap_flags dap_flags)
143 struct osc_thread_info *info = osc_env_info(env);
144 struct ldlm_res_id *resname = &info->oti_resname;
145 union ldlm_policy_data *policy = &info->oti_policy;
146 struct lustre_handle lockh;
147 struct ldlm_lock *lock = NULL;
150 enum ldlm_match_flags match_flags = 0;
154 fid_build_reg_res_name(lu_object_fid(osc2lu(obj)), resname);
155 mdc_lock_build_policy(env, NULL, policy);
156 policy->l_inodebits.li_gid = LDLM_GID_ANY;
158 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
159 if (dap_flags & OSC_DAP_FL_TEST_LOCK)
160 flags |= LDLM_FL_TEST_LOCK;
162 if (dap_flags & OSC_DAP_FL_AST)
163 match_flags |= LDLM_MATCH_AST;
165 if (dap_flags & OSC_DAP_FL_CANCELING)
166 match_flags |= LDLM_MATCH_UNREF;
169 /* Next, search for already existing extent locks that will cover us */
170 /* If we're trying to read, we also search for an existing PW lock. The
171 * VFS and page cache already protect us locally, so lots of readers/
172 * writers can share a single PW lock. */
173 mode = mdc_dom_lock_match(env, osc_export(obj), resname, LDLM_IBITS,
174 policy, LCK_PR | LCK_PW | LCK_GROUP, &flags,
175 obj, &lockh, match_flags);
177 lock = ldlm_handle2lock(&lockh);
178 /* RACE: the lock is cancelled so let's try again */
179 if (unlikely(lock == NULL))
187 * Check if page @page is covered by an extra lock or discard it.
189 static bool mdc_check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
190 void **pvec, int count, void *cbdata)
192 struct osc_thread_info *info = osc_env_info(env);
193 struct osc_object *osc = cbdata;
197 for (i = 0; i < count; i++) {
198 struct osc_page *ops = pvec[i];
200 index = osc_index(ops);
201 if (index >= info->oti_fn_index) {
202 struct ldlm_lock *tmp;
203 struct cl_page *page = ops->ops_cl.cpl_page;
205 /* refresh non-overlapped index */
206 tmp = mdc_dlmlock_at_pgoff(env, osc, index,
207 OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_AST);
209 info->oti_fn_index = CL_PAGE_EOF;
211 } else if (cl_page_own(env, io, page) == 0) {
212 /* discard the page */
213 cl_page_discard(env, io, page);
214 cl_page_disown(env, io, page);
216 LASSERT(page->cp_state == CPS_FREEING);
220 info->oti_next_index = index + 1;
226 * Discard pages protected by the given lock. This function traverses radix
227 * tree to find all covering pages and discard them. If a page is being covered
228 * by other locks, it should remain in cache.
230 * If error happens on any step, the process continues anyway (the reasoning
231 * behind this being that lock cancellation cannot be delayed indefinitely).
233 static int mdc_lock_discard_pages(const struct lu_env *env,
234 struct osc_object *osc,
235 pgoff_t start, pgoff_t end,
238 struct osc_thread_info *info = osc_env_info(env);
239 struct cl_io *io = &info->oti_io;
240 osc_page_gang_cbt cb;
245 io->ci_obj = cl_object_top(osc2cl(osc));
246 io->ci_ignore_layout = 1;
247 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
251 cb = discard ? osc_discard_cb : mdc_check_and_discard_cb;
252 info->oti_fn_index = info->oti_next_index = start;
254 osc_page_gang_lookup(env, io, osc, info->oti_next_index,
255 end, cb, (void *)osc);
261 static int mdc_lock_flush(const struct lu_env *env, struct osc_object *obj,
262 pgoff_t start, pgoff_t end, enum cl_lock_mode mode,
270 if (mode == CLM_WRITE) {
271 result = osc_cache_writeback_range(env, obj, start, end, 1,
273 CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
274 obj, start, end, result,
275 discard ? "discarded" : "written back");
280 /* Avoid lock matching with CLM_WRITE, there can be no other locks */
281 rc = mdc_lock_discard_pages(env, obj, start, end,
282 mode == CLM_WRITE || discard);
283 if (result == 0 && rc < 0)
289 void mdc_lock_lockless_cancel(const struct lu_env *env,
290 const struct cl_lock_slice *slice)
292 struct osc_lock *ols = cl2osc_lock(slice);
293 struct osc_object *osc = cl2osc(slice->cls_obj);
294 struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
297 LASSERT(ols->ols_dlmlock == NULL);
298 rc = mdc_lock_flush(env, osc, descr->cld_start, descr->cld_end,
301 CERROR("Pages for lockless lock %p were not purged(%d)\n",
304 osc_lock_wake_waiters(env, osc, ols);
308 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
309 * and ldlm_lock caches.
311 static int mdc_dlm_canceling(const struct lu_env *env,
312 struct ldlm_lock *dlmlock)
314 struct cl_object *obj = NULL;
317 enum cl_lock_mode mode = CLM_READ;
321 lock_res_and_lock(dlmlock);
322 if (!ldlm_is_granted(dlmlock)) {
323 dlmlock->l_ast_data = NULL;
324 unlock_res_and_lock(dlmlock);
328 discard = ldlm_is_discard_data(dlmlock);
329 if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
332 if (dlmlock->l_ast_data != NULL) {
333 obj = osc2cl(dlmlock->l_ast_data);
336 unlock_res_and_lock(dlmlock);
338 /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
339 * the object has been destroyed. */
341 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
343 /* Destroy pages covered by the extent of the DLM lock */
344 result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
345 CL_PAGE_EOF, mode, discard);
346 /* Losing a lock, set KMS to 0.
347 * NB: assumed that DOM lock covers whole data on MDT.
349 /* losing a lock, update kms */
350 lock_res_and_lock(dlmlock);
351 dlmlock->l_ast_data = NULL;
352 cl_object_attr_lock(obj);
354 cl_object_attr_update(env, obj, attr, CAT_KMS);
355 cl_object_attr_unlock(obj);
356 unlock_res_and_lock(dlmlock);
357 cl_object_put(env, obj);
362 int mdc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
363 struct ldlm_lock_desc *new, void *data, int reason)
370 case LDLM_CB_BLOCKING: {
371 struct lustre_handle lockh;
373 ldlm_lock2handle(dlmlock, &lockh);
374 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
379 case LDLM_CB_CANCELING: {
384 * This can be called in the context of outer IO, e.g.,
386 * osc_enqueue_base()->...
387 * ->ldlm_prep_elc_req()->...
388 * ->ldlm_cancel_callback()->...
389 * ->osc_ldlm_blocking_ast()
391 * new environment has to be created to not corrupt outer
394 env = cl_env_get(&refcheck);
400 rc = mdc_dlm_canceling(env, dlmlock);
401 cl_env_put(env, &refcheck);
411 * Updates object attributes from a lock value block (lvb) received together
412 * with the DLM lock reply from the server.
413 * This can be optimized to not update attributes when lock is a result of a
416 * Called under lock and resource spin-locks.
418 void mdc_lock_lvb_update(const struct lu_env *env, struct osc_object *osc,
419 struct ldlm_lock *dlmlock, struct ost_lvb *lvb)
421 struct cl_object *obj = osc2cl(osc);
422 struct lov_oinfo *oinfo = osc->oo_oinfo;
423 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
424 unsigned valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME |
426 unsigned int setkms = 0;
431 LASSERT(dlmlock != NULL);
432 lvb = &dlmlock->l_ost_lvb;
434 cl_lvb2attr(attr, lvb);
436 cl_object_attr_lock(obj);
437 if (dlmlock != NULL) {
440 check_res_locked(dlmlock->l_resource);
441 size = lvb->lvb_size;
443 if (size >= oinfo->loi_kms) {
445 attr->cat_kms = size;
448 ldlm_lock_allow_match_locked(dlmlock);
451 /* The size should not be less than the kms */
452 if (attr->cat_size < oinfo->loi_kms)
453 attr->cat_size = oinfo->loi_kms;
455 LDLM_DEBUG(dlmlock, "acquired size %llu, setting rss=%llu;%s "
456 "kms=%llu, end=%llu", lvb->lvb_size, attr->cat_size,
457 setkms ? "" : " leaving",
458 setkms ? attr->cat_kms : oinfo->loi_kms,
459 dlmlock ? dlmlock->l_policy_data.l_extent.end : -1ull);
461 cl_object_attr_update(env, obj, attr, valid);
462 cl_object_attr_unlock(obj);
466 static void mdc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
467 struct lustre_handle *lockh)
469 struct osc_object *osc = cl2osc(oscl->ols_cl.cls_obj);
470 struct ldlm_lock *dlmlock;
474 dlmlock = ldlm_handle2lock_long(lockh, 0);
475 LASSERT(dlmlock != NULL);
477 /* lock reference taken by ldlm_handle2lock_long() is
478 * owned by osc_lock and released in osc_lock_detach()
480 lu_ref_add_atomic(&dlmlock->l_reference, "osc_lock", oscl);
481 oscl->ols_has_ref = 1;
483 LASSERT(oscl->ols_dlmlock == NULL);
484 oscl->ols_dlmlock = dlmlock;
486 /* This may be a matched lock for glimpse request, do not hold
487 * lock reference in that case. */
488 if (!oscl->ols_glimpse) {
489 /* hold a refc for non glimpse lock which will
490 * be released in osc_lock_cancel() */
491 lustre_handle_copy(&oscl->ols_handle, lockh);
492 ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
496 /* Lock must have been granted. */
497 lock_res_and_lock(dlmlock);
498 if (ldlm_is_granted(dlmlock)) {
499 struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
501 /* extend the lock extent, otherwise it will have problem when
502 * we decide whether to grant a lockless lock. */
503 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
504 descr->cld_start = cl_index(descr->cld_obj, 0);
505 descr->cld_end = CL_PAGE_EOF;
507 /* no lvb update for matched lock */
508 if (!ldlm_is_lvb_cached(dlmlock)) {
509 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
510 LASSERT(osc == dlmlock->l_ast_data);
511 mdc_lock_lvb_update(env, osc, dlmlock, NULL);
512 ldlm_set_lvb_cached(dlmlock);
515 unlock_res_and_lock(dlmlock);
517 LASSERT(oscl->ols_state != OLS_GRANTED);
518 oscl->ols_state = OLS_GRANTED;
523 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
524 * received from a server, or after mdc_enqueue_send() matched a local DLM
527 static int mdc_lock_upcall(void *cookie, struct lustre_handle *lockh,
530 struct osc_lock *oscl = cookie;
531 struct cl_lock_slice *slice = &oscl->ols_cl;
537 env = cl_env_percpu_get();
538 /* should never happen, similar to osc_ldlm_blocking_ast(). */
539 LASSERT(!IS_ERR(env));
541 rc = ldlm_error2errno(errcode);
542 if (oscl->ols_state == OLS_ENQUEUED) {
543 oscl->ols_state = OLS_UPCALL_RECEIVED;
544 } else if (oscl->ols_state == OLS_CANCELLED) {
547 CERROR("Impossible state: %d\n", oscl->ols_state);
551 CDEBUG(D_INODE, "rc %d, err %d\n", rc, errcode);
553 mdc_lock_granted(env, oscl, lockh);
555 /* Error handling, some errors are tolerable. */
556 if (oscl->ols_glimpse && rc == -ENAVAIL) {
557 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
558 mdc_lock_lvb_update(env, cl2osc(slice->cls_obj),
559 NULL, &oscl->ols_lvb);
560 /* Hide the error. */
564 if (oscl->ols_owner != NULL)
565 cl_sync_io_note(env, oscl->ols_owner, rc);
566 cl_env_percpu_put(env);
571 /* This is needed only for old servers (before 2.14) support */
572 int mdc_fill_lvb(struct req_capsule *pill, struct ost_lvb *lvb)
574 struct mdt_body *body;
576 /* get LVB data from mdt_body otherwise */
577 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
581 if (!(body->mbo_valid & OBD_MD_DOM_SIZE))
584 mdc_body2lvb(body, lvb);
588 int mdc_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
589 osc_enqueue_upcall_f upcall, void *cookie,
590 struct lustre_handle *lockh, enum ldlm_mode mode,
591 __u64 *flags, int errcode)
593 struct osc_lock *ols = cookie;
594 bool glimpse = *flags & LDLM_FL_HAS_INTENT;
599 /* needed only for glimpse from an old server (< 2.14) */
600 if (glimpse && !exp_connect_dom_lvb(exp))
601 rc = mdc_fill_lvb(&req->rq_pill, &ols->ols_lvb);
603 if (glimpse && errcode == ELDLM_LOCK_ABORTED) {
604 struct ldlm_reply *rep;
606 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
608 rep->lock_policy_res2 =
609 ptlrpc_status_ntoh(rep->lock_policy_res2);
610 if (rep->lock_policy_res2)
611 errcode = rep->lock_policy_res2;
615 *flags |= LDLM_FL_LVB_READY;
616 } else if (errcode == ELDLM_OK) {
617 struct ldlm_lock *lock;
619 /* Callers have references, should be valid always */
620 lock = ldlm_handle2lock(lockh);
622 /* At this point ols_lvb must be filled with correct LVB either
623 * by mdc_fill_lvb() above or by ldlm_cli_enqueue_fini().
624 * DoM uses l_ost_lvb to store LVB data, so copy it here from
625 * just updated ols_lvb.
627 lock_res_and_lock(lock);
628 memcpy(&lock->l_ost_lvb, &ols->ols_lvb,
629 sizeof(lock->l_ost_lvb));
630 unlock_res_and_lock(lock);
632 *flags |= LDLM_FL_LVB_READY;
635 /* Call the update callback. */
636 rc = (*upcall)(cookie, lockh, rc < 0 ? rc : errcode);
638 /* release the reference taken in ldlm_cli_enqueue() */
639 if (errcode == ELDLM_LOCK_MATCHED)
641 if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
642 ldlm_lock_decref(lockh, mode);
647 int mdc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
650 struct osc_enqueue_args *aa = args;
651 struct ldlm_lock *lock;
652 struct lustre_handle *lockh = &aa->oa_lockh;
653 enum ldlm_mode mode = aa->oa_mode;
654 struct ldlm_enqueue_info einfo = {
655 .ei_type = aa->oa_type,
661 LASSERT(!aa->oa_speculative);
663 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
665 lock = ldlm_handle2lock(lockh);
666 LASSERTF(lock != NULL,
667 "lockh %#llx, req %p, aa %p - client evicted?\n",
668 lockh->cookie, req, aa);
670 /* Take an additional reference so that a blocking AST that
671 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
672 * to arrive after an upcall has been executed by
673 * mdc_enqueue_fini().
675 ldlm_lock_addref(lockh, mode);
677 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
678 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
680 /* Let CP AST to grant the lock first. */
681 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
683 /* Complete obtaining the lock procedure. */
684 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
685 aa->oa_lvb, aa->oa_lvb ?
686 sizeof(*aa->oa_lvb) : 0, lockh, rc, true);
687 /* Complete mdc stuff. */
688 rc = mdc_enqueue_fini(aa->oa_exp, req, aa->oa_upcall, aa->oa_cookie,
689 lockh, mode, aa->oa_flags, rc);
691 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
693 ldlm_lock_decref(lockh, mode);
698 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
699 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
700 * other synchronous requests, however keeping some locks and trying to obtain
701 * others may take a considerable amount of time in a case of ost failure; and
702 * when other sync requests do not get released lock from a client, the client
703 * is excluded from the cluster -- such scenarious make the life difficult, so
704 * release locks just after they are obtained. */
705 int mdc_enqueue_send(const struct lu_env *env, struct obd_export *exp,
706 struct ldlm_res_id *res_id, __u64 *flags,
707 union ldlm_policy_data *policy, struct ost_lvb *lvb,
708 osc_enqueue_upcall_f upcall, void *cookie,
709 struct ldlm_enqueue_info *einfo, int async)
711 struct obd_device *obd = exp->exp_obd;
712 struct lustre_handle lockh = { 0 };
713 struct ptlrpc_request *req = NULL;
714 struct ldlm_intent *lit;
716 bool glimpse = *flags & LDLM_FL_HAS_INTENT;
717 __u64 match_flags = *flags;
721 bool compat_glimpse = glimpse && !exp_connect_dom_lvb(exp);
725 mode = einfo->ei_mode;
726 if (einfo->ei_mode == LCK_PR)
729 match_flags |= LDLM_FL_LVB_READY;
731 match_flags |= LDLM_FL_BLOCK_GRANTED;
732 mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
733 einfo->ei_type, policy, mode, &lockh);
735 struct ldlm_lock *matched;
737 if (*flags & LDLM_FL_TEST_LOCK)
740 matched = ldlm_handle2lock(&lockh);
742 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GLIMPSE_DDOS))
743 ldlm_set_kms_ignore(matched);
745 if (mdc_set_dom_lock_data(matched, einfo->ei_cbdata)) {
746 *flags |= LDLM_FL_LVB_READY;
748 /* We already have a lock, and it's referenced. */
749 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
751 ldlm_lock_decref(&lockh, mode);
752 LDLM_LOCK_PUT(matched);
755 ldlm_lock_decref(&lockh, mode);
756 LDLM_LOCK_PUT(matched);
759 if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
762 /* Glimpse is intent on old server */
763 req = ptlrpc_request_alloc(class_exp2cliimp(exp), compat_glimpse ?
764 &RQF_LDLM_INTENT : &RQF_LDLM_ENQUEUE);
768 /* For WRITE lock cancel other locks on resource early if any */
769 if (einfo->ei_mode & LCK_PW)
770 count = mdc_resource_get_unused_res(exp, res_id, &cancels,
776 rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
778 ptlrpc_request_free(req);
782 if (compat_glimpse) {
783 /* pack the glimpse intent */
784 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
785 lit->opc = IT_GLIMPSE;
788 /* users of mdc_enqueue() can pass this flag for ldlm_lock_match() */
789 *flags &= ~LDLM_FL_BLOCK_GRANTED;
791 if (compat_glimpse) {
792 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 0);
793 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, 0);
796 lvb_size = sizeof(*lvb);
797 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
800 ptlrpc_request_set_replen(req);
802 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
803 lvb_size, LVB_T_OST, &lockh, async);
806 struct osc_enqueue_args *aa;
808 aa = ptlrpc_req_async_args(aa, req);
810 aa->oa_mode = einfo->ei_mode;
811 aa->oa_type = einfo->ei_type;
812 lustre_handle_copy(&aa->oa_lockh, &lockh);
813 aa->oa_upcall = upcall;
814 aa->oa_cookie = cookie;
815 aa->oa_speculative = false;
816 aa->oa_flags = flags;
817 aa->oa_lvb = compat_glimpse ? NULL : lvb;
819 req->rq_interpret_reply = mdc_enqueue_interpret;
820 ptlrpcd_add_req(req);
822 ptlrpc_req_finished(req);
827 rc = mdc_enqueue_fini(exp, req, upcall, cookie, &lockh, einfo->ei_mode,
829 ptlrpc_req_finished(req);
834 * Implementation of cl_lock_operations::clo_enqueue() method for osc
835 * layer. This initiates ldlm enqueue:
837 * - cancels conflicting locks early (osc_lock_enqueue_wait());
839 * - calls osc_enqueue_base() to do actual enqueue.
841 * osc_enqueue_base() is supplied with an upcall function that is executed
842 * when lock is received either after a local cached ldlm lock is matched, or
843 * when a reply from the server is received.
845 * This function does not wait for the network communication to complete.
847 static int mdc_lock_enqueue(const struct lu_env *env,
848 const struct cl_lock_slice *slice,
849 struct cl_io *unused, struct cl_sync_io *anchor)
851 struct osc_thread_info *info = osc_env_info(env);
852 struct osc_io *oio = osc_env_io(env);
853 struct osc_object *osc = cl2osc(slice->cls_obj);
854 struct osc_lock *oscl = cl2osc_lock(slice);
855 struct cl_lock *lock = slice->cls_lock;
856 struct ldlm_res_id *resname = &info->oti_resname;
857 union ldlm_policy_data *policy = &info->oti_policy;
858 osc_enqueue_upcall_f upcall = mdc_lock_upcall;
859 void *cookie = (void *)oscl;
865 LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
866 "lock = %p, ols = %p\n", lock, oscl);
868 if (oscl->ols_state == OLS_GRANTED)
871 /* Lockahead is not supported on MDT yet */
872 if (oscl->ols_flags & LDLM_FL_NO_EXPANSION) {
873 result = -EOPNOTSUPP;
877 if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
878 GOTO(enqueue_base, 0);
880 if (oscl->ols_glimpse) {
881 LASSERT(equi(oscl->ols_speculative, anchor == NULL));
883 GOTO(enqueue_base, 0);
886 result = osc_lock_enqueue_wait(env, osc, oscl);
890 /* we can grant lockless lock right after all conflicting locks
892 if (osc_lock_is_lockless(oscl)) {
893 oscl->ols_state = OLS_GRANTED;
894 oio->oi_lockless = 1;
899 oscl->ols_state = OLS_ENQUEUED;
900 if (anchor != NULL) {
901 atomic_inc(&anchor->csi_sync_nr);
902 oscl->ols_owner = anchor;
906 * DLM lock's ast data must be osc_object;
907 * DLM's enqueue callback set to osc_lock_upcall() with cookie as
910 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
911 mdc_lock_build_policy(env, lock, policy);
912 LASSERT(!oscl->ols_speculative);
913 result = mdc_enqueue_send(env, osc_export(osc), resname,
914 &oscl->ols_flags, policy, &oscl->ols_lvb,
915 upcall, cookie, &oscl->ols_einfo, async);
917 if (osc_lock_is_lockless(oscl)) {
918 oio->oi_lockless = 1;
920 LASSERT(oscl->ols_state == OLS_GRANTED);
921 LASSERT(oscl->ols_hold);
922 LASSERT(oscl->ols_dlmlock != NULL);
927 oscl->ols_state = OLS_CANCELLED;
928 osc_lock_wake_waiters(env, osc, oscl);
931 cl_sync_io_note(env, anchor, result);
936 static const struct cl_lock_operations mdc_lock_lockless_ops = {
937 .clo_fini = osc_lock_fini,
938 .clo_enqueue = mdc_lock_enqueue,
939 .clo_cancel = mdc_lock_lockless_cancel,
940 .clo_print = osc_lock_print
943 static const struct cl_lock_operations mdc_lock_ops = {
944 .clo_fini = osc_lock_fini,
945 .clo_enqueue = mdc_lock_enqueue,
946 .clo_cancel = osc_lock_cancel,
947 .clo_print = osc_lock_print,
950 int mdc_lock_init(const struct lu_env *env, struct cl_object *obj,
951 struct cl_lock *lock, const struct cl_io *io)
953 struct osc_lock *ols;
954 __u32 enqflags = lock->cll_descr.cld_enq_flags;
955 __u64 flags = osc_enq2ldlm_flags(enqflags);
959 /* Ignore AGL for Data-on-MDT, stat returns size data */
960 if ((enqflags & CEF_SPECULATIVE) != 0)
963 OBD_SLAB_ALLOC_PTR_GFP(ols, osc_lock_kmem, GFP_NOFS);
964 if (unlikely(ols == NULL))
967 ols->ols_state = OLS_NEW;
968 spin_lock_init(&ols->ols_lock);
969 INIT_LIST_HEAD(&ols->ols_waiting_list);
970 INIT_LIST_HEAD(&ols->ols_wait_entry);
971 INIT_LIST_HEAD(&ols->ols_nextlock_oscobj);
972 ols->ols_lockless_ops = &mdc_lock_lockless_ops;
974 ols->ols_flags = flags;
975 ols->ols_speculative = !!(enqflags & CEF_SPECULATIVE);
976 if (lock->cll_descr.cld_mode == CLM_GROUP)
977 ols->ols_flags |= LDLM_FL_ATOMIC_CB;
979 if (ols->ols_flags & LDLM_FL_HAS_INTENT) {
980 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
981 ols->ols_glimpse = 1;
983 mdc_lock_build_einfo(env, lock, cl2osc(obj), &ols->ols_einfo);
985 cl_lock_slice_add(lock, &ols->ols_cl, obj, &mdc_lock_ops);
987 if (!(enqflags & CEF_MUST))
988 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
990 if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
991 osc_lock_set_writer(env, io, obj, ols);
993 LDLM_DEBUG_NOLOCK("lock %p, mdc lock %p, flags %llx\n",
994 lock, ols, ols->ols_flags);
1001 * An implementation of cl_io_operations specific methods for MDC layer.
1004 static int mdc_async_upcall(void *a, int rc)
1006 struct osc_async_cbargs *args = a;
1009 complete(&args->opc_sync);
1013 static int mdc_get_lock_handle(const struct lu_env *env, struct osc_object *osc,
1014 pgoff_t index, struct lustre_handle *lh)
1016 struct ldlm_lock *lock;
1018 /* find DOM lock protecting object */
1019 lock = mdc_dlmlock_at_pgoff(env, osc, index,
1020 OSC_DAP_FL_TEST_LOCK |
1021 OSC_DAP_FL_CANCELING);
1023 struct ldlm_resource *res;
1024 struct ldlm_res_id *resname;
1026 resname = &osc_env_info(env)->oti_resname;
1027 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
1028 res = ldlm_resource_get(osc_export(osc)->exp_obd->obd_namespace,
1029 NULL, resname, LDLM_IBITS, 0);
1031 CERROR("No lock resource for "DFID"\n",
1032 PFID(lu_object_fid(osc2lu(osc))));
1034 ldlm_resource_dump(D_ERROR, res);
1035 libcfs_debug_dumpstack(NULL);
1038 *lh = lock->l_remote_handle;
1039 LDLM_LOCK_PUT(lock);
1044 static int mdc_io_setattr_start(const struct lu_env *env,
1045 const struct cl_io_slice *slice)
1047 struct cl_io *io = slice->cis_io;
1048 struct osc_io *oio = cl2osc_io(env, slice);
1049 struct cl_object *obj = slice->cis_obj;
1050 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
1051 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1052 struct obdo *oa = &oio->oi_oa;
1053 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1054 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
1055 unsigned int ia_avalid = io->u.ci_setattr.sa_avalid;
1056 enum op_xvalid ia_xvalid = io->u.ci_setattr.sa_xvalid;
1059 /* silently ignore non-truncate setattr for Data-on-MDT object */
1060 if (cl_io_is_trunc(io)) {
1061 /* truncate cache dirty pages first */
1062 rc = osc_cache_truncate_start(env, cl2osc(obj), size,
1066 } else if (cl_io_is_fallocate(io) &&
1067 io->u.ci_setattr.sa_falloc_mode & FALLOC_FL_PUNCH_HOLE) {
1068 rc = osc_punch_start(env, io, obj);
1073 if (oio->oi_lockless == 0) {
1074 cl_object_attr_lock(obj);
1075 rc = cl_object_attr_get(env, obj, attr);
1077 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
1078 unsigned int cl_valid = 0;
1080 if (ia_avalid & ATTR_SIZE) {
1081 attr->cat_size = size;
1082 attr->cat_kms = size;
1083 cl_valid = (CAT_SIZE | CAT_KMS);
1085 if (ia_avalid & ATTR_MTIME_SET) {
1086 attr->cat_mtime = lvb->lvb_mtime;
1087 cl_valid |= CAT_MTIME;
1089 if (ia_avalid & ATTR_ATIME_SET) {
1090 attr->cat_atime = lvb->lvb_atime;
1091 cl_valid |= CAT_ATIME;
1093 if (ia_xvalid & OP_XVALID_CTIME_SET) {
1094 attr->cat_ctime = lvb->lvb_ctime;
1095 cl_valid |= CAT_CTIME;
1097 rc = cl_object_attr_update(env, obj, attr, cl_valid);
1099 cl_object_attr_unlock(obj);
1104 if (!(ia_avalid & ATTR_SIZE) && !cl_io_is_fallocate(io))
1107 memset(oa, 0, sizeof(*oa));
1108 oa->o_oi = loi->loi_oi;
1109 oa->o_mtime = attr->cat_mtime;
1110 oa->o_atime = attr->cat_atime;
1111 oa->o_ctime = attr->cat_ctime;
1112 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
1113 OBD_MD_FLCTIME | OBD_MD_FLMTIME | OBD_MD_FLSIZE |
1116 if (oio->oi_lockless) {
1117 oa->o_flags = OBD_FL_SRVLOCK;
1118 oa->o_valid |= OBD_MD_FLFLAGS;
1120 rc = mdc_get_lock_handle(env, cl2osc(obj), CL_PAGE_EOF,
1123 oa->o_valid |= OBD_MD_FLHANDLE;
1126 init_completion(&cbargs->opc_sync);
1127 if (cl_io_is_fallocate(io)) {
1128 int falloc_mode = io->u.ci_setattr.sa_falloc_mode;
1130 oa->o_size = io->u.ci_setattr.sa_falloc_offset;
1131 oa->o_blocks = io->u.ci_setattr.sa_falloc_end;
1132 rc = osc_fallocate_base(osc_export(cl2osc(obj)), oa,
1133 mdc_async_upcall, cbargs, falloc_mode);
1136 oa->o_blocks = OBD_OBJECT_EOF;
1137 rc = osc_punch_send(osc_export(cl2osc(obj)), oa,
1138 mdc_async_upcall, cbargs);
1140 cbargs->opc_rpc_sent = rc == 0;
1144 static int mdc_io_read_ahead(const struct lu_env *env,
1145 const struct cl_io_slice *ios,
1146 pgoff_t start, struct cl_read_ahead *ra)
1148 struct osc_object *osc = cl2osc(ios->cis_obj);
1149 struct osc_io *oio = cl2osc_io(env, ios);
1150 struct ldlm_lock *dlmlock;
1154 dlmlock = mdc_dlmlock_at_pgoff(env, osc, start, 0);
1155 if (dlmlock == NULL)
1158 oio->oi_is_readahead = 1;
1159 if (dlmlock->l_req_mode != LCK_PR) {
1160 struct lustre_handle lockh;
1162 ldlm_lock2handle(dlmlock, &lockh);
1163 ldlm_lock_addref(&lockh, LCK_PR);
1164 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
1167 ra->cra_rpc_pages = osc_cli(osc)->cl_max_pages_per_rpc;
1168 ra->cra_end_idx = CL_PAGE_EOF;
1169 ra->cra_release = osc_read_ahead_release;
1170 ra->cra_dlmlock = dlmlock;
1176 int mdc_io_fsync_start(const struct lu_env *env,
1177 const struct cl_io_slice *slice)
1179 struct cl_io *io = slice->cis_io;
1180 struct cl_fsync_io *fio = &io->u.ci_fsync;
1181 struct cl_object *obj = slice->cis_obj;
1182 struct osc_object *osc = cl2osc(obj);
1187 /* a MDC lock always covers whole object, do sync for whole
1188 * possible range despite of supplied start/end values.
1190 result = osc_cache_writeback_range(env, osc, 0, CL_PAGE_EOF, 0,
1191 fio->fi_mode == CL_FSYNC_DISCARD);
1193 fio->fi_nr_written += result;
1196 if (fio->fi_mode == CL_FSYNC_ALL) {
1199 rc = osc_cache_wait_range(env, osc, 0, CL_PAGE_EOF);
1202 /* Use OSC sync code because it is asynchronous.
1203 * It is to be added into MDC and avoid the using of
1204 * OST_SYNC at both MDC and MDT.
1206 rc = osc_fsync_ost(env, osc, fio);
1214 struct mdc_data_version_args {
1215 struct osc_io *dva_oio;
1219 mdc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
1222 struct mdc_data_version_args *dva = args;
1223 struct osc_io *oio = dva->dva_oio;
1224 const struct mdt_body *body;
1230 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1232 GOTO(out, rc = -EPROTO);
1234 /* Prepare OBDO from mdt_body for CLIO */
1235 oio->oi_oa.o_valid = body->mbo_valid;
1236 oio->oi_oa.o_flags = body->mbo_flags;
1237 oio->oi_oa.o_data_version = body->mbo_version;
1238 oio->oi_oa.o_layout_version = body->mbo_layout_gen;
1241 oio->oi_cbarg.opc_rc = rc;
1242 complete(&oio->oi_cbarg.opc_sync);
1246 static int mdc_io_data_version_start(const struct lu_env *env,
1247 const struct cl_io_slice *slice)
1249 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
1250 struct osc_io *oio = cl2osc_io(env, slice);
1251 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1252 struct osc_object *obj = cl2osc(slice->cis_obj);
1253 struct obd_export *exp = osc_export(obj);
1254 struct ptlrpc_request *req;
1255 struct mdt_body *body;
1256 struct mdc_data_version_args *dva;
1261 memset(&oio->oi_oa, 0, sizeof(oio->oi_oa));
1262 oio->oi_oa.o_oi.oi_fid = *lu_object_fid(osc2lu(obj));
1263 oio->oi_oa.o_valid = OBD_MD_FLID;
1265 init_completion(&cbargs->opc_sync);
1267 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
1271 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
1273 ptlrpc_request_free(req);
1277 body = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY);
1278 body->mbo_fid1 = *lu_object_fid(osc2lu(obj));
1279 body->mbo_valid = OBD_MD_FLID;
1280 /* Indicate that data version is needed */
1281 body->mbo_valid |= OBD_MD_FLDATAVERSION;
1282 body->mbo_flags = 0;
1284 if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
1285 body->mbo_valid |= OBD_MD_FLFLAGS;
1286 body->mbo_flags |= OBD_FL_SRVLOCK;
1287 if (dv->dv_flags & LL_DV_WR_FLUSH)
1288 body->mbo_flags |= OBD_FL_FLUSH;
1291 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, 0);
1292 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 0);
1293 ptlrpc_request_set_replen(req);
1295 req->rq_interpret_reply = mdc_data_version_interpret;
1296 dva = ptlrpc_req_async_args(dva, req);
1299 ptlrpcd_add_req(req);
1304 static void mdc_io_data_version_end(const struct lu_env *env,
1305 const struct cl_io_slice *slice)
1307 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
1308 struct osc_io *oio = cl2osc_io(env, slice);
1309 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1312 wait_for_completion(&cbargs->opc_sync);
1314 if (cbargs->opc_rc != 0) {
1315 slice->cis_io->ci_result = cbargs->opc_rc;
1317 slice->cis_io->ci_result = 0;
1318 if (!(oio->oi_oa.o_valid &
1319 (OBD_MD_LAYOUT_VERSION | OBD_MD_FLDATAVERSION)))
1320 slice->cis_io->ci_result = -ENOTSUPP;
1322 if (oio->oi_oa.o_valid & OBD_MD_LAYOUT_VERSION)
1323 dv->dv_layout_version = oio->oi_oa.o_layout_version;
1324 if (oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)
1325 dv->dv_data_version = oio->oi_oa.o_data_version;
1331 static const struct cl_io_operations mdc_io_ops = {
1334 .cio_iter_init = osc_io_iter_init,
1335 .cio_iter_fini = osc_io_rw_iter_fini,
1336 .cio_start = osc_io_read_start,
1339 .cio_iter_init = osc_io_iter_init,
1340 .cio_iter_fini = osc_io_rw_iter_fini,
1341 .cio_start = osc_io_write_start,
1342 .cio_end = osc_io_end,
1345 .cio_iter_init = osc_io_iter_init,
1346 .cio_iter_fini = osc_io_iter_fini,
1347 .cio_start = mdc_io_setattr_start,
1348 .cio_end = osc_io_setattr_end,
1350 [CIT_DATA_VERSION] = {
1351 .cio_start = mdc_io_data_version_start,
1352 .cio_end = mdc_io_data_version_end,
1355 .cio_iter_init = osc_io_iter_init,
1356 .cio_iter_fini = osc_io_iter_fini,
1357 .cio_start = osc_io_fault_start,
1358 .cio_end = osc_io_end,
1361 .cio_start = mdc_io_fsync_start,
1362 .cio_end = osc_io_fsync_end,
1365 .cio_start = osc_io_lseek_start,
1366 .cio_end = osc_io_lseek_end,
1369 .cio_read_ahead = mdc_io_read_ahead,
1370 .cio_lru_reserve = osc_io_lru_reserve,
1371 .cio_submit = osc_io_submit,
1372 .cio_commit_async = osc_io_commit_async,
1373 .cio_extent_release = osc_io_extent_release,
1376 int mdc_io_init(const struct lu_env *env, struct cl_object *obj,
1379 struct osc_io *oio = osc_env_io(env);
1381 CL_IO_SLICE_CLEAN(oio, oi_cl);
1382 cl_io_slice_add(io, &oio->oi_cl, obj, &mdc_io_ops);
1386 static void mdc_build_res_name(struct osc_object *osc,
1387 struct ldlm_res_id *resname)
1389 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
1393 * Implementation of struct cl_req_operations::cro_attr_set() for MDC
1394 * layer. MDC is responsible for struct obdo::o_id and struct obdo::o_seq
1397 static void mdc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1398 struct cl_req_attr *attr)
1400 u64 flags = attr->cra_flags;
1402 /* Copy object FID to cl_attr */
1403 attr->cra_oa->o_oi.oi_fid = *lu_object_fid(&obj->co_lu);
1405 if (flags & OBD_MD_FLGROUP)
1406 attr->cra_oa->o_valid |= OBD_MD_FLGROUP;
1408 if (flags & OBD_MD_FLID)
1409 attr->cra_oa->o_valid |= OBD_MD_FLID;
1411 if (flags & OBD_MD_FLHANDLE) {
1412 struct osc_page *opg;
1414 opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
1415 if (!opg->ops_srvlock) {
1418 rc = mdc_get_lock_handle(env, cl2osc(obj),
1420 &attr->cra_oa->o_handle);
1422 CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
1423 "uncovered page!\n");
1426 attr->cra_oa->o_valid |= OBD_MD_FLHANDLE;
1432 static int mdc_attr_get(const struct lu_env *env, struct cl_object *obj,
1433 struct cl_attr *attr)
1435 struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
1437 if (OST_LVB_IS_ERR(oinfo->loi_lvb.lvb_blocks))
1438 return OST_LVB_GET_ERR(oinfo->loi_lvb.lvb_blocks);
1440 return osc_attr_get(env, obj, attr);
1443 static int mdc_object_ast_clear(struct ldlm_lock *lock, void *data)
1445 struct osc_object *osc = (struct osc_object *)data;
1446 struct ost_lvb *lvb = &lock->l_ost_lvb;
1447 struct lov_oinfo *oinfo;
1450 if (lock->l_ast_data == data) {
1451 lock->l_ast_data = NULL;
1453 LASSERT(osc != NULL);
1454 LASSERT(osc->oo_oinfo != NULL);
1455 LASSERT(lvb != NULL);
1457 /* Updates lvb in lock by the cached oinfo */
1458 oinfo = osc->oo_oinfo;
1460 LDLM_DEBUG(lock, "update lock size %llu blocks %llu [cma]time: "
1461 "%llu %llu %llu by oinfo size %llu blocks %llu "
1462 "[cma]time %llu %llu %llu", lvb->lvb_size,
1463 lvb->lvb_blocks, lvb->lvb_ctime, lvb->lvb_mtime,
1464 lvb->lvb_atime, oinfo->loi_lvb.lvb_size,
1465 oinfo->loi_lvb.lvb_blocks, oinfo->loi_lvb.lvb_ctime,
1466 oinfo->loi_lvb.lvb_mtime, oinfo->loi_lvb.lvb_atime);
1467 LASSERT(oinfo->loi_lvb.lvb_size >= oinfo->loi_kms);
1469 cl_object_attr_lock(&osc->oo_cl);
1470 memcpy(lvb, &oinfo->loi_lvb, sizeof(oinfo->loi_lvb));
1471 cl_object_attr_unlock(&osc->oo_cl);
1472 ldlm_clear_lvb_cached(lock);
1474 RETURN(LDLM_ITER_CONTINUE);
1477 int mdc_object_prune(const struct lu_env *env, struct cl_object *obj)
1479 struct osc_object *osc = cl2osc(obj);
1480 struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
1482 /* DLM locks don't hold a reference of osc_object so we have to
1483 * clear it before the object is being destroyed. */
1484 osc_build_res_name(osc, resname);
1485 ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
1486 mdc_object_ast_clear, osc);
1490 static int mdc_object_flush(const struct lu_env *env, struct cl_object *obj,
1491 struct ldlm_lock *lock)
1493 /* if lock cancel is initiated from llite then it is combined
1494 * lock with DOM bit and it may have no l_ast_data initialized yet,
1495 * so init it here with given osc_object.
1497 mdc_set_dom_lock_data(lock, cl2osc(obj));
1498 RETURN(mdc_dlm_canceling(env, lock));
1501 static const struct cl_object_operations mdc_ops = {
1502 .coo_page_init = osc_page_init,
1503 .coo_lock_init = mdc_lock_init,
1504 .coo_io_init = mdc_io_init,
1505 .coo_attr_get = mdc_attr_get,
1506 .coo_attr_update = osc_attr_update,
1507 .coo_glimpse = osc_object_glimpse,
1508 .coo_req_attr_set = mdc_req_attr_set,
1509 .coo_prune = mdc_object_prune,
1510 .coo_object_flush = mdc_object_flush
1513 static const struct osc_object_operations mdc_object_ops = {
1514 .oto_build_res_name = mdc_build_res_name,
1515 .oto_dlmlock_at_pgoff = mdc_dlmlock_at_pgoff,
1518 static int mdc_object_init(const struct lu_env *env, struct lu_object *obj,
1519 const struct lu_object_conf *conf)
1521 struct osc_object *osc = lu2osc(obj);
1523 if (osc->oo_initialized)
1526 osc->oo_initialized = true;
1528 return osc_object_init(env, obj, conf);
1531 static void mdc_object_free(const struct lu_env *env, struct lu_object *obj)
1533 osc_object_free(env, obj);
1536 static const struct lu_object_operations mdc_lu_obj_ops = {
1537 .loo_object_init = mdc_object_init,
1538 .loo_object_delete = NULL,
1539 .loo_object_release = NULL,
1540 .loo_object_free = mdc_object_free,
1541 .loo_object_print = osc_object_print,
1542 .loo_object_invariant = NULL
1545 struct lu_object *mdc_object_alloc(const struct lu_env *env,
1546 const struct lu_object_header *unused,
1547 struct lu_device *dev)
1549 struct osc_object *osc;
1550 struct lu_object *obj;
1552 OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, GFP_NOFS);
1555 lu_object_init(obj, NULL, dev);
1556 osc->oo_cl.co_ops = &mdc_ops;
1557 obj->lo_ops = &mdc_lu_obj_ops;
1558 osc->oo_obj_ops = &mdc_object_ops;
1559 osc->oo_initialized = false;
1566 static int mdc_process_config(const struct lu_env *env, struct lu_device *d,
1567 struct lustre_cfg *cfg)
1569 size_t count = class_modify_config(cfg, PARAM_MDC,
1570 &d->ld_obd->obd_kset.kobj);
1571 return count > 0 ? 0 : count;
1574 const struct lu_device_operations mdc_lu_ops = {
1575 .ldo_object_alloc = mdc_object_alloc,
1576 .ldo_process_config = mdc_process_config,
1577 .ldo_recovery_complete = NULL,
1580 static struct lu_device *mdc_device_alloc(const struct lu_env *env,
1581 struct lu_device_type *t,
1582 struct lustre_cfg *cfg)
1584 struct lu_device *d;
1585 struct osc_device *od;
1586 struct obd_device *obd;
1591 RETURN(ERR_PTR(-ENOMEM));
1593 cl_device_init(&od->od_cl, t);
1595 d->ld_ops = &mdc_lu_ops;
1598 obd = class_name2obd(lustre_cfg_string(cfg, 0));
1600 RETURN(ERR_PTR(-ENODEV));
1602 rc = mdc_setup(obd, cfg);
1604 osc_device_free(env, d);
1605 RETURN(ERR_PTR(rc));
1607 od->od_exp = obd->obd_self_export;
1611 static const struct lu_device_type_operations mdc_device_type_ops = {
1612 .ldto_device_alloc = mdc_device_alloc,
1613 .ldto_device_free = osc_device_free,
1614 .ldto_device_init = osc_device_init,
1615 .ldto_device_fini = osc_device_fini
1618 struct lu_device_type mdc_device_type = {
1619 .ldt_tags = LU_DEVICE_CL,
1620 .ldt_name = LUSTRE_MDC_NAME,
1621 .ldt_ops = &mdc_device_type_ops,
1622 .ldt_ctx_tags = LCT_CL_THREAD