4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2017, Intel Corporation.
26 * This file is part of Lustre, http://www.lustre.org/
28 * Implementation of cl_device, cl_req for MDC layer.
30 * Author: Mikhail Pershin <mike.pershin@intel.com>
33 #define DEBUG_SUBSYSTEM S_MDC
35 #include <obd_class.h>
36 #include <lustre_osc.h>
37 #include <uapi/linux/lustre/lustre_param.h>
39 #include "mdc_internal.h"
41 static void mdc_lock_build_policy(const struct lu_env *env,
42 union ldlm_policy_data *policy)
44 memset(policy, 0, sizeof *policy);
45 policy->l_inodebits.bits = MDS_INODELOCK_DOM;
48 int mdc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
50 return osc_ldlm_glimpse_ast(dlmlock, data);
53 static void mdc_lock_build_einfo(const struct lu_env *env,
54 const struct cl_lock *lock,
55 struct osc_object *osc,
56 struct ldlm_enqueue_info *einfo)
58 einfo->ei_type = LDLM_IBITS;
59 einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
60 einfo->ei_cb_bl = mdc_ldlm_blocking_ast;
61 einfo->ei_cb_cp = ldlm_completion_ast;
62 einfo->ei_cb_gl = mdc_ldlm_glimpse_ast;
63 einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
66 static void mdc_lock_lvb_update(const struct lu_env *env,
67 struct osc_object *osc,
68 struct ldlm_lock *dlmlock,
71 static int mdc_set_dom_lock_data(const struct lu_env *env,
72 struct ldlm_lock *lock, void *data)
74 struct osc_object *obj = data;
77 LASSERT(lock != NULL);
78 LASSERT(lock->l_glimpse_ast == mdc_ldlm_glimpse_ast);
80 lock_res_and_lock(lock);
81 if (lock->l_ast_data == NULL) {
82 lock->l_ast_data = data;
83 mdc_lock_lvb_update(env, obj, lock, NULL);
86 if (lock->l_ast_data == data)
89 unlock_res_and_lock(lock);
94 int mdc_dom_lock_match(const struct lu_env *env, struct obd_export *exp,
95 struct ldlm_res_id *res_id,
96 enum ldlm_type type, union ldlm_policy_data *policy,
97 enum ldlm_mode mode, __u64 *flags, void *data,
98 struct lustre_handle *lockh, int unref)
100 struct obd_device *obd = exp->exp_obd;
101 __u64 lflags = *flags;
106 rc = ldlm_lock_match(obd->obd_namespace, lflags,
107 res_id, type, policy, mode, lockh, unref);
108 if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
112 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
114 LASSERT(lock != NULL);
115 if (!mdc_set_dom_lock_data(env, lock, data)) {
116 ldlm_lock_decref(lockh, rc);
125 * Finds an existing lock covering a page with given index.
126 * Copy of osc_obj_dlmlock_at_pgoff() but for DoM IBITS lock.
128 struct ldlm_lock *mdc_dlmlock_at_pgoff(const struct lu_env *env,
129 struct osc_object *obj, pgoff_t index,
130 enum osc_dap_flags dap_flags)
132 struct osc_thread_info *info = osc_env_info(env);
133 struct ldlm_res_id *resname = &info->oti_resname;
134 union ldlm_policy_data *policy = &info->oti_policy;
135 struct lustre_handle lockh;
136 struct ldlm_lock *lock = NULL;
142 fid_build_reg_res_name(lu_object_fid(osc2lu(obj)), resname);
143 mdc_lock_build_policy(env, policy);
145 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
146 if (dap_flags & OSC_DAP_FL_TEST_LOCK)
147 flags |= LDLM_FL_TEST_LOCK;
150 /* Next, search for already existing extent locks that will cover us */
151 /* If we're trying to read, we also search for an existing PW lock. The
152 * VFS and page cache already protect us locally, so lots of readers/
153 * writers can share a single PW lock. */
154 mode = mdc_dom_lock_match(env, osc_export(obj), resname, LDLM_IBITS,
155 policy, LCK_PR | LCK_PW | LCK_GROUP, &flags,
157 dap_flags & OSC_DAP_FL_CANCELING);
159 lock = ldlm_handle2lock(&lockh);
160 /* RACE: the lock is cancelled so let's try again */
161 if (unlikely(lock == NULL))
169 * Check if page @page is covered by an extra lock or discard it.
171 static int mdc_check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
172 struct osc_page *ops, void *cbdata)
174 struct osc_thread_info *info = osc_env_info(env);
175 struct osc_object *osc = cbdata;
178 index = osc_index(ops);
179 if (index >= info->oti_fn_index) {
180 struct ldlm_lock *tmp;
181 struct cl_page *page = ops->ops_cl.cpl_page;
183 /* refresh non-overlapped index */
184 tmp = mdc_dlmlock_at_pgoff(env, osc, index,
185 OSC_DAP_FL_TEST_LOCK);
187 info->oti_fn_index = CL_PAGE_EOF;
189 } else if (cl_page_own(env, io, page) == 0) {
190 /* discard the page */
191 cl_page_discard(env, io, page);
192 cl_page_disown(env, io, page);
194 LASSERT(page->cp_state == CPS_FREEING);
198 info->oti_next_index = index + 1;
199 return CLP_GANG_OKAY;
203 * Discard pages protected by the given lock. This function traverses radix
204 * tree to find all covering pages and discard them. If a page is being covered
205 * by other locks, it should remain in cache.
207 * If error happens on any step, the process continues anyway (the reasoning
208 * behind this being that lock cancellation cannot be delayed indefinitely).
210 static int mdc_lock_discard_pages(const struct lu_env *env,
211 struct osc_object *osc,
212 pgoff_t start, pgoff_t end,
215 struct osc_thread_info *info = osc_env_info(env);
216 struct cl_io *io = &info->oti_io;
217 osc_page_gang_cbt cb;
223 io->ci_obj = cl_object_top(osc2cl(osc));
224 io->ci_ignore_layout = 1;
225 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
229 cb = discard ? osc_discard_cb : mdc_check_and_discard_cb;
230 info->oti_fn_index = info->oti_next_index = start;
232 res = osc_page_gang_lookup(env, io, osc, info->oti_next_index,
233 end, cb, (void *)osc);
234 if (info->oti_next_index > end)
237 if (res == CLP_GANG_RESCHED)
239 } while (res != CLP_GANG_OKAY);
245 static int mdc_lock_flush(const struct lu_env *env, struct osc_object *obj,
246 pgoff_t start, pgoff_t end, enum cl_lock_mode mode,
254 if (mode == CLM_WRITE) {
255 result = osc_cache_writeback_range(env, obj, start, end, 1,
257 CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
258 obj, start, end, result,
259 discard ? "discarded" : "written back");
264 /* Avoid lock matching with CLM_WRITE, there can be no other locks */
265 rc = mdc_lock_discard_pages(env, obj, start, end,
266 mode == CLM_WRITE || discard);
267 if (result == 0 && rc < 0)
273 void mdc_lock_lockless_cancel(const struct lu_env *env,
274 const struct cl_lock_slice *slice)
276 struct osc_lock *ols = cl2osc_lock(slice);
277 struct osc_object *osc = cl2osc(slice->cls_obj);
278 struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
281 LASSERT(ols->ols_dlmlock == NULL);
282 rc = mdc_lock_flush(env, osc, descr->cld_start, descr->cld_end,
285 CERROR("Pages for lockless lock %p were not purged(%d)\n",
288 osc_lock_wake_waiters(env, osc, ols);
292 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
293 * and ldlm_lock caches.
295 static int mdc_dlm_blocking_ast0(const struct lu_env *env,
296 struct ldlm_lock *dlmlock,
297 void *data, int flag)
299 struct cl_object *obj = NULL;
302 enum cl_lock_mode mode = CLM_READ;
306 LASSERT(flag == LDLM_CB_CANCELING);
307 LASSERT(dlmlock != NULL);
309 lock_res_and_lock(dlmlock);
310 if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
311 dlmlock->l_ast_data = NULL;
312 unlock_res_and_lock(dlmlock);
316 discard = ldlm_is_discard_data(dlmlock);
317 if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
320 if (dlmlock->l_ast_data != NULL) {
321 obj = osc2cl(dlmlock->l_ast_data);
322 dlmlock->l_ast_data = NULL;
325 ldlm_set_kms_ignore(dlmlock);
326 unlock_res_and_lock(dlmlock);
328 /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
329 * the object has been destroyed. */
331 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
333 /* Destroy pages covered by the extent of the DLM lock */
334 result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
335 CL_PAGE_EOF, mode, discard);
336 /* Losing a lock, set KMS to 0.
337 * NB: assumed that DOM lock covers whole data on MDT.
339 /* losing a lock, update kms */
340 lock_res_and_lock(dlmlock);
341 cl_object_attr_lock(obj);
343 cl_object_attr_update(env, obj, attr, CAT_KMS);
344 cl_object_attr_unlock(obj);
345 unlock_res_and_lock(dlmlock);
346 cl_object_put(env, obj);
351 int mdc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
352 struct ldlm_lock_desc *new, void *data, int flag)
359 case LDLM_CB_BLOCKING: {
360 struct lustre_handle lockh;
362 ldlm_lock2handle(dlmlock, &lockh);
363 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
368 case LDLM_CB_CANCELING: {
373 * This can be called in the context of outer IO, e.g.,
375 * osc_enqueue_base()->...
376 * ->ldlm_prep_elc_req()->...
377 * ->ldlm_cancel_callback()->...
378 * ->osc_ldlm_blocking_ast()
380 * new environment has to be created to not corrupt outer
383 env = cl_env_get(&refcheck);
389 rc = mdc_dlm_blocking_ast0(env, dlmlock, data, flag);
390 cl_env_put(env, &refcheck);
400 * Updates object attributes from a lock value block (lvb) received together
401 * with the DLM lock reply from the server.
402 * This can be optimized to not update attributes when lock is a result of a
405 * Called under lock and resource spin-locks.
407 void mdc_lock_lvb_update(const struct lu_env *env, struct osc_object *osc,
408 struct ldlm_lock *dlmlock, struct ost_lvb *lvb)
410 struct cl_object *obj = osc2cl(osc);
411 struct lov_oinfo *oinfo = osc->oo_oinfo;
412 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
413 unsigned valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME |
419 LASSERT(dlmlock != NULL);
420 lvb = &dlmlock->l_ost_lvb;
422 cl_lvb2attr(attr, lvb);
424 cl_object_attr_lock(obj);
425 if (dlmlock != NULL) {
428 check_res_locked(dlmlock->l_resource);
429 size = lvb->lvb_size;
431 if (size >= oinfo->loi_kms) {
432 LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu,"
433 " kms=%llu", lvb->lvb_size, size);
435 attr->cat_kms = size;
437 LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu,"
439 lvb->lvb_size, oinfo->loi_kms);
442 cl_object_attr_update(env, obj, attr, valid);
443 cl_object_attr_unlock(obj);
447 static void mdc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
448 struct lustre_handle *lockh, bool lvb_update)
450 struct ldlm_lock *dlmlock;
454 dlmlock = ldlm_handle2lock_long(lockh, 0);
455 LASSERT(dlmlock != NULL);
457 /* lock reference taken by ldlm_handle2lock_long() is
458 * owned by osc_lock and released in osc_lock_detach()
460 lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
461 oscl->ols_has_ref = 1;
463 LASSERT(oscl->ols_dlmlock == NULL);
464 oscl->ols_dlmlock = dlmlock;
466 /* This may be a matched lock for glimpse request, do not hold
467 * lock reference in that case. */
468 if (!oscl->ols_glimpse) {
469 /* hold a refc for non glimpse lock which will
470 * be released in osc_lock_cancel() */
471 lustre_handle_copy(&oscl->ols_handle, lockh);
472 ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
476 /* Lock must have been granted. */
477 lock_res_and_lock(dlmlock);
478 if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
479 struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
481 /* extend the lock extent, otherwise it will have problem when
482 * we decide whether to grant a lockless lock. */
483 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
484 descr->cld_start = cl_index(descr->cld_obj, 0);
485 descr->cld_end = CL_PAGE_EOF;
487 /* no lvb update for matched lock */
489 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
490 mdc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
494 unlock_res_and_lock(dlmlock);
496 LASSERT(oscl->ols_state != OLS_GRANTED);
497 oscl->ols_state = OLS_GRANTED;
502 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
503 * received from a server, or after osc_enqueue_base() matched a local DLM
506 static int mdc_lock_upcall(void *cookie, struct lustre_handle *lockh,
509 struct osc_lock *oscl = cookie;
510 struct cl_lock_slice *slice = &oscl->ols_cl;
516 env = cl_env_percpu_get();
517 /* should never happen, similar to osc_ldlm_blocking_ast(). */
518 LASSERT(!IS_ERR(env));
520 rc = ldlm_error2errno(errcode);
521 if (oscl->ols_state == OLS_ENQUEUED) {
522 oscl->ols_state = OLS_UPCALL_RECEIVED;
523 } else if (oscl->ols_state == OLS_CANCELLED) {
526 CERROR("Impossible state: %d\n", oscl->ols_state);
530 CDEBUG(D_INODE, "rc %d, err %d\n", rc, errcode);
532 mdc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);
534 /* Error handling, some errors are tolerable. */
535 if (oscl->ols_locklessable && rc == -EUSERS) {
536 /* This is a tolerable error, turn this lock into
539 osc_object_set_contended(cl2osc(slice->cls_obj));
540 LASSERT(slice->cls_ops != oscl->ols_lockless_ops);
542 /* Change this lock to ldlmlock-less lock. */
543 osc_lock_to_lockless(env, oscl, 1);
544 oscl->ols_state = OLS_GRANTED;
546 } else if (oscl->ols_glimpse && rc == -ENAVAIL) {
547 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
548 mdc_lock_lvb_update(env, cl2osc(slice->cls_obj),
549 NULL, &oscl->ols_lvb);
550 /* Hide the error. */
554 if (oscl->ols_owner != NULL)
555 cl_sync_io_note(env, oscl->ols_owner, rc);
556 cl_env_percpu_put(env);
561 int mdc_fill_lvb(struct ptlrpc_request *req, struct ost_lvb *lvb)
563 struct mdt_body *body;
565 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
569 lvb->lvb_mtime = body->mbo_mtime;
570 lvb->lvb_atime = body->mbo_atime;
571 lvb->lvb_ctime = body->mbo_ctime;
572 lvb->lvb_blocks = body->mbo_dom_blocks;
573 lvb->lvb_size = body->mbo_dom_size;
578 int mdc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
579 void *cookie, struct lustre_handle *lockh,
580 enum ldlm_mode mode, __u64 *flags, int errcode)
582 struct osc_lock *ols = cookie;
583 struct ldlm_lock *lock;
588 /* The request was created before ldlm_cli_enqueue call. */
589 if (errcode == ELDLM_LOCK_ABORTED) {
590 struct ldlm_reply *rep;
592 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
593 LASSERT(rep != NULL);
595 rep->lock_policy_res2 =
596 ptlrpc_status_ntoh(rep->lock_policy_res2);
597 if (rep->lock_policy_res2)
598 errcode = rep->lock_policy_res2;
600 rc = mdc_fill_lvb(req, &ols->ols_lvb);
601 *flags |= LDLM_FL_LVB_READY;
602 } else if (errcode == ELDLM_OK) {
603 /* Callers have references, should be valid always */
604 lock = ldlm_handle2lock(lockh);
607 rc = mdc_fill_lvb(req, &lock->l_ost_lvb);
609 *flags |= LDLM_FL_LVB_READY;
612 /* Call the update callback. */
613 rc = (*upcall)(cookie, lockh, rc < 0 ? rc : errcode);
615 /* release the reference taken in ldlm_cli_enqueue() */
616 if (errcode == ELDLM_LOCK_MATCHED)
618 if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
619 ldlm_lock_decref(lockh, mode);
624 int mdc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
627 struct osc_enqueue_args *aa = args;
628 struct ldlm_lock *lock;
629 struct lustre_handle *lockh = &aa->oa_lockh;
630 enum ldlm_mode mode = aa->oa_mode;
634 LASSERT(!aa->oa_speculative);
636 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
638 lock = ldlm_handle2lock(lockh);
639 LASSERTF(lock != NULL,
640 "lockh %#llx, req %p, aa %p - client evicted?\n",
641 lockh->cookie, req, aa);
643 /* Take an additional reference so that a blocking AST that
644 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
645 * to arrive after an upcall has been executed by
646 * osc_enqueue_fini(). */
647 ldlm_lock_addref(lockh, mode);
649 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
650 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
652 /* Let CP AST to grant the lock first. */
653 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
655 /* Complete obtaining the lock procedure. */
656 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
657 aa->oa_mode, aa->oa_flags, NULL, 0,
659 /* Complete mdc stuff. */
660 rc = mdc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
663 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
665 ldlm_lock_decref(lockh, mode);
670 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
671 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
672 * other synchronous requests, however keeping some locks and trying to obtain
673 * others may take a considerable amount of time in a case of ost failure; and
674 * when other sync requests do not get released lock from a client, the client
675 * is excluded from the cluster -- such scenarious make the life difficult, so
676 * release locks just after they are obtained. */
677 int mdc_enqueue_send(const struct lu_env *env, struct obd_export *exp,
678 struct ldlm_res_id *res_id, __u64 *flags,
679 union ldlm_policy_data *policy,
680 struct ost_lvb *lvb, int kms_valid,
681 osc_enqueue_upcall_f upcall, void *cookie,
682 struct ldlm_enqueue_info *einfo, int async)
684 struct obd_device *obd = exp->exp_obd;
685 struct lustre_handle lockh = { 0 };
686 struct ptlrpc_request *req = NULL;
687 struct ldlm_intent *lit;
689 bool glimpse = *flags & LDLM_FL_HAS_INTENT;
690 __u64 match_flags = *flags;
691 struct list_head cancels = LIST_HEAD_INIT(cancels);
696 mode = einfo->ei_mode;
697 if (einfo->ei_mode == LCK_PR)
701 match_flags |= LDLM_FL_BLOCK_GRANTED;
702 /* DOM locking uses LDLM_FL_KMS_IGNORE to mark locks wich have no valid
703 * LVB information, e.g. canceled locks or locks of just pruned object,
704 * such locks should be skipped.
706 mode = ldlm_lock_match_with_skip(obd->obd_namespace, match_flags,
707 LDLM_FL_KMS_IGNORE, res_id,
708 einfo->ei_type, policy, mode,
711 struct ldlm_lock *matched;
713 if (*flags & LDLM_FL_TEST_LOCK)
716 matched = ldlm_handle2lock(&lockh);
717 /* this shouldn't happen but this check is kept to make
718 * related test fail if problem occurs
720 if (unlikely(ldlm_is_kms_ignore(matched))) {
721 LDLM_ERROR(matched, "matched lock has KMS ignore flag");
725 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GLIMPSE_DDOS))
726 ldlm_set_kms_ignore(matched);
728 if (mdc_set_dom_lock_data(env, matched, einfo->ei_cbdata)) {
729 *flags |= LDLM_FL_LVB_READY;
731 /* We already have a lock, and it's referenced. */
732 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
734 ldlm_lock_decref(&lockh, mode);
735 LDLM_LOCK_PUT(matched);
739 ldlm_lock_decref(&lockh, mode);
740 LDLM_LOCK_PUT(matched);
743 if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
746 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT);
750 /* For WRITE lock cancel other locks on resource early if any */
751 if (einfo->ei_mode & LCK_PW)
752 count = mdc_resource_get_unused_res(exp, res_id, &cancels,
758 rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
760 ptlrpc_request_free(req);
764 /* pack the intent */
765 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
766 lit->opc = glimpse ? IT_GLIMPSE : IT_BRW;
768 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 0);
769 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, 0);
770 ptlrpc_request_set_replen(req);
772 /* users of mdc_enqueue() can pass this flag for ldlm_lock_match() */
773 *flags &= ~LDLM_FL_BLOCK_GRANTED;
774 /* All MDC IO locks are intents */
775 *flags |= LDLM_FL_HAS_INTENT;
776 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, NULL,
777 0, LVB_T_NONE, &lockh, async);
780 struct osc_enqueue_args *aa;
782 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
783 aa = ptlrpc_req_async_args(req);
785 aa->oa_mode = einfo->ei_mode;
786 aa->oa_type = einfo->ei_type;
787 lustre_handle_copy(&aa->oa_lockh, &lockh);
788 aa->oa_upcall = upcall;
789 aa->oa_cookie = cookie;
790 aa->oa_speculative = false;
791 aa->oa_flags = flags;
794 req->rq_interpret_reply = mdc_enqueue_interpret;
795 ptlrpcd_add_req(req);
797 ptlrpc_req_finished(req);
802 rc = mdc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
804 ptlrpc_req_finished(req);
809 * Implementation of cl_lock_operations::clo_enqueue() method for osc
810 * layer. This initiates ldlm enqueue:
812 * - cancels conflicting locks early (osc_lock_enqueue_wait());
814 * - calls osc_enqueue_base() to do actual enqueue.
816 * osc_enqueue_base() is supplied with an upcall function that is executed
817 * when lock is received either after a local cached ldlm lock is matched, or
818 * when a reply from the server is received.
820 * This function does not wait for the network communication to complete.
822 static int mdc_lock_enqueue(const struct lu_env *env,
823 const struct cl_lock_slice *slice,
824 struct cl_io *unused, struct cl_sync_io *anchor)
826 struct osc_thread_info *info = osc_env_info(env);
827 struct osc_io *oio = osc_env_io(env);
828 struct osc_object *osc = cl2osc(slice->cls_obj);
829 struct osc_lock *oscl = cl2osc_lock(slice);
830 struct cl_lock *lock = slice->cls_lock;
831 struct ldlm_res_id *resname = &info->oti_resname;
832 union ldlm_policy_data *policy = &info->oti_policy;
833 osc_enqueue_upcall_f upcall = mdc_lock_upcall;
834 void *cookie = (void *)oscl;
840 LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
841 "lock = %p, ols = %p\n", lock, oscl);
843 if (oscl->ols_state == OLS_GRANTED)
846 /* Lockahead is not supported on MDT yet */
847 if (oscl->ols_flags & LDLM_FL_NO_EXPANSION) {
848 result = -EOPNOTSUPP;
852 if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
853 GOTO(enqueue_base, 0);
855 if (oscl->ols_glimpse) {
856 LASSERT(equi(oscl->ols_speculative, anchor == NULL));
858 GOTO(enqueue_base, 0);
861 result = osc_lock_enqueue_wait(env, osc, oscl);
865 /* we can grant lockless lock right after all conflicting locks
867 if (osc_lock_is_lockless(oscl)) {
868 oscl->ols_state = OLS_GRANTED;
869 oio->oi_lockless = 1;
874 oscl->ols_state = OLS_ENQUEUED;
875 if (anchor != NULL) {
876 atomic_inc(&anchor->csi_sync_nr);
877 oscl->ols_owner = anchor;
881 * DLM lock's ast data must be osc_object;
882 * DLM's enqueue callback set to osc_lock_upcall() with cookie as
885 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
886 mdc_lock_build_policy(env, policy);
887 LASSERT(!oscl->ols_speculative);
888 result = mdc_enqueue_send(env, osc_export(osc), resname,
889 &oscl->ols_flags, policy,
890 &oscl->ols_lvb, osc->oo_oinfo->loi_kms_valid,
891 upcall, cookie, &oscl->ols_einfo, async);
893 if (osc_lock_is_lockless(oscl)) {
894 oio->oi_lockless = 1;
896 LASSERT(oscl->ols_state == OLS_GRANTED);
897 LASSERT(oscl->ols_hold);
898 LASSERT(oscl->ols_dlmlock != NULL);
903 oscl->ols_state = OLS_CANCELLED;
904 osc_lock_wake_waiters(env, osc, oscl);
907 cl_sync_io_note(env, anchor, result);
912 static const struct cl_lock_operations mdc_lock_lockless_ops = {
913 .clo_fini = osc_lock_fini,
914 .clo_enqueue = mdc_lock_enqueue,
915 .clo_cancel = mdc_lock_lockless_cancel,
916 .clo_print = osc_lock_print
919 static const struct cl_lock_operations mdc_lock_ops = {
920 .clo_fini = osc_lock_fini,
921 .clo_enqueue = mdc_lock_enqueue,
922 .clo_cancel = osc_lock_cancel,
923 .clo_print = osc_lock_print,
926 int mdc_lock_init(const struct lu_env *env, struct cl_object *obj,
927 struct cl_lock *lock, const struct cl_io *io)
929 struct osc_lock *ols;
930 __u32 enqflags = lock->cll_descr.cld_enq_flags;
931 __u64 flags = osc_enq2ldlm_flags(enqflags);
935 /* Ignore AGL for Data-on-MDT, stat returns size data */
936 if ((enqflags & CEF_SPECULATIVE) != 0)
939 OBD_SLAB_ALLOC_PTR_GFP(ols, osc_lock_kmem, GFP_NOFS);
940 if (unlikely(ols == NULL))
943 ols->ols_state = OLS_NEW;
944 spin_lock_init(&ols->ols_lock);
945 INIT_LIST_HEAD(&ols->ols_waiting_list);
946 INIT_LIST_HEAD(&ols->ols_wait_entry);
947 INIT_LIST_HEAD(&ols->ols_nextlock_oscobj);
948 ols->ols_lockless_ops = &mdc_lock_lockless_ops;
950 ols->ols_flags = flags;
951 ols->ols_speculative = !!(enqflags & CEF_SPECULATIVE);
953 if (ols->ols_flags & LDLM_FL_HAS_INTENT) {
954 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
955 ols->ols_glimpse = 1;
957 mdc_lock_build_einfo(env, lock, cl2osc(obj), &ols->ols_einfo);
959 cl_lock_slice_add(lock, &ols->ols_cl, obj, &mdc_lock_ops);
961 if (!(enqflags & CEF_MUST))
962 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
963 if (ols->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
964 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
966 if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
967 osc_lock_set_writer(env, io, obj, ols);
969 LDLM_DEBUG_NOLOCK("lock %p, mdc lock %p, flags %llx\n",
970 lock, ols, ols->ols_flags);
977 * An implementation of cl_io_operations specific methods for MDC layer.
980 static int mdc_async_upcall(void *a, int rc)
982 struct osc_async_cbargs *args = a;
985 complete(&args->opc_sync);
989 static int mdc_get_lock_handle(const struct lu_env *env, struct osc_object *osc,
990 pgoff_t index, struct lustre_handle *lh)
992 struct ldlm_lock *lock;
994 /* find DOM lock protecting object */
995 lock = mdc_dlmlock_at_pgoff(env, osc, index,
996 OSC_DAP_FL_TEST_LOCK |
997 OSC_DAP_FL_CANCELING);
999 struct ldlm_resource *res;
1000 struct ldlm_res_id *resname;
1002 resname = &osc_env_info(env)->oti_resname;
1003 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
1004 res = ldlm_resource_get(osc_export(osc)->exp_obd->obd_namespace,
1005 NULL, resname, LDLM_IBITS, 0);
1006 ldlm_resource_dump(D_ERROR, res);
1007 libcfs_debug_dumpstack(NULL);
1010 *lh = lock->l_remote_handle;
1011 LDLM_LOCK_PUT(lock);
1016 static int mdc_io_setattr_start(const struct lu_env *env,
1017 const struct cl_io_slice *slice)
1019 struct cl_io *io = slice->cis_io;
1020 struct osc_io *oio = cl2osc_io(env, slice);
1021 struct cl_object *obj = slice->cis_obj;
1022 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
1023 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1024 struct obdo *oa = &oio->oi_oa;
1025 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1026 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
1027 unsigned int ia_avalid = io->u.ci_setattr.sa_avalid;
1028 enum op_xvalid ia_xvalid = io->u.ci_setattr.sa_xvalid;
1031 /* silently ignore non-truncate setattr for Data-on-MDT object */
1032 if (cl_io_is_trunc(io)) {
1033 /* truncate cache dirty pages first */
1034 rc = osc_cache_truncate_start(env, cl2osc(obj), size,
1040 if (oio->oi_lockless == 0) {
1041 cl_object_attr_lock(obj);
1042 rc = cl_object_attr_get(env, obj, attr);
1044 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
1045 unsigned int cl_valid = 0;
1047 if (ia_avalid & ATTR_SIZE) {
1048 attr->cat_size = size;
1049 attr->cat_kms = size;
1050 cl_valid = (CAT_SIZE | CAT_KMS);
1052 if (ia_avalid & ATTR_MTIME_SET) {
1053 attr->cat_mtime = lvb->lvb_mtime;
1054 cl_valid |= CAT_MTIME;
1056 if (ia_avalid & ATTR_ATIME_SET) {
1057 attr->cat_atime = lvb->lvb_atime;
1058 cl_valid |= CAT_ATIME;
1060 if (ia_xvalid & OP_XVALID_CTIME_SET) {
1061 attr->cat_ctime = lvb->lvb_ctime;
1062 cl_valid |= CAT_CTIME;
1064 rc = cl_object_attr_update(env, obj, attr, cl_valid);
1066 cl_object_attr_unlock(obj);
1071 if (!(ia_avalid & ATTR_SIZE))
1074 memset(oa, 0, sizeof(*oa));
1075 oa->o_oi = loi->loi_oi;
1076 oa->o_mtime = attr->cat_mtime;
1077 oa->o_atime = attr->cat_atime;
1078 oa->o_ctime = attr->cat_ctime;
1081 oa->o_blocks = OBD_OBJECT_EOF;
1082 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
1083 OBD_MD_FLCTIME | OBD_MD_FLMTIME | OBD_MD_FLSIZE |
1085 if (oio->oi_lockless) {
1086 oa->o_flags = OBD_FL_SRVLOCK;
1087 oa->o_valid |= OBD_MD_FLFLAGS;
1089 rc = mdc_get_lock_handle(env, cl2osc(obj), CL_PAGE_EOF,
1092 oa->o_valid |= OBD_MD_FLHANDLE;
1095 init_completion(&cbargs->opc_sync);
1097 rc = osc_punch_send(osc_export(cl2osc(obj)), oa,
1098 mdc_async_upcall, cbargs);
1099 cbargs->opc_rpc_sent = rc == 0;
1103 static int mdc_io_read_ahead(const struct lu_env *env,
1104 const struct cl_io_slice *ios,
1105 pgoff_t start, struct cl_read_ahead *ra)
1107 struct osc_object *osc = cl2osc(ios->cis_obj);
1108 struct ldlm_lock *dlmlock;
1112 dlmlock = mdc_dlmlock_at_pgoff(env, osc, start, 0);
1113 if (dlmlock == NULL)
1116 if (dlmlock->l_req_mode != LCK_PR) {
1117 struct lustre_handle lockh;
1119 ldlm_lock2handle(dlmlock, &lockh);
1120 ldlm_lock_addref(&lockh, LCK_PR);
1121 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
1124 ra->cra_rpc_size = osc_cli(osc)->cl_max_pages_per_rpc;
1125 ra->cra_end = CL_PAGE_EOF;
1126 ra->cra_release = osc_read_ahead_release;
1127 ra->cra_cbdata = dlmlock;
1132 int mdc_io_fsync_start(const struct lu_env *env,
1133 const struct cl_io_slice *slice)
1135 struct cl_io *io = slice->cis_io;
1136 struct cl_fsync_io *fio = &io->u.ci_fsync;
1137 struct cl_object *obj = slice->cis_obj;
1138 struct osc_object *osc = cl2osc(obj);
1143 /* a MDC lock always covers whole object, do sync for whole
1144 * possible range despite of supplied start/end values.
1146 result = osc_cache_writeback_range(env, osc, 0, CL_PAGE_EOF, 0,
1147 fio->fi_mode == CL_FSYNC_DISCARD);
1149 fio->fi_nr_written += result;
1152 if (fio->fi_mode == CL_FSYNC_ALL) {
1155 rc = osc_cache_wait_range(env, osc, 0, CL_PAGE_EOF);
1158 /* Use OSC sync code because it is asynchronous.
1159 * It is to be added into MDC and avoid the using of
1160 * OST_SYNC at both MDC and MDT.
1162 rc = osc_fsync_ost(env, osc, fio);
1170 struct mdc_data_version_args {
1171 struct osc_io *dva_oio;
1175 mdc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
1178 struct mdc_data_version_args *dva = args;
1179 struct osc_io *oio = dva->dva_oio;
1180 const struct mdt_body *body;
1186 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1188 GOTO(out, rc = -EPROTO);
1190 /* Prepare OBDO from mdt_body for CLIO */
1191 oio->oi_oa.o_valid = body->mbo_valid;
1192 oio->oi_oa.o_flags = body->mbo_flags;
1193 oio->oi_oa.o_data_version = body->mbo_version;
1194 oio->oi_oa.o_layout_version = body->mbo_layout_gen;
1197 oio->oi_cbarg.opc_rc = rc;
1198 complete(&oio->oi_cbarg.opc_sync);
1202 static int mdc_io_data_version_start(const struct lu_env *env,
1203 const struct cl_io_slice *slice)
1205 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
1206 struct osc_io *oio = cl2osc_io(env, slice);
1207 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1208 struct osc_object *obj = cl2osc(slice->cis_obj);
1209 struct obd_export *exp = osc_export(obj);
1210 struct ptlrpc_request *req;
1211 struct mdt_body *body;
1212 struct mdc_data_version_args *dva;
1217 memset(&oio->oi_oa, 0, sizeof(oio->oi_oa));
1218 oio->oi_oa.o_oi.oi_fid = *lu_object_fid(osc2lu(obj));
1219 oio->oi_oa.o_valid = OBD_MD_FLID;
1221 init_completion(&cbargs->opc_sync);
1223 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
1227 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
1229 ptlrpc_request_free(req);
1233 body = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY);
1234 body->mbo_fid1 = *lu_object_fid(osc2lu(obj));
1235 body->mbo_valid = OBD_MD_FLID;
1236 /* Indicate that data version is needed */
1237 body->mbo_valid |= OBD_MD_FLDATAVERSION;
1238 body->mbo_flags = 0;
1240 if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
1241 body->mbo_valid |= OBD_MD_FLFLAGS;
1242 body->mbo_flags |= OBD_FL_SRVLOCK;
1243 if (dv->dv_flags & LL_DV_WR_FLUSH)
1244 body->mbo_flags |= OBD_FL_FLUSH;
1247 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, 0);
1248 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 0);
1249 ptlrpc_request_set_replen(req);
1251 req->rq_interpret_reply = mdc_data_version_interpret;
1252 CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
1253 dva = ptlrpc_req_async_args(req);
1256 ptlrpcd_add_req(req);
1261 static void mdc_io_data_version_end(const struct lu_env *env,
1262 const struct cl_io_slice *slice)
1264 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
1265 struct osc_io *oio = cl2osc_io(env, slice);
1266 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1269 wait_for_completion(&cbargs->opc_sync);
1271 if (cbargs->opc_rc != 0) {
1272 slice->cis_io->ci_result = cbargs->opc_rc;
1274 slice->cis_io->ci_result = 0;
1275 if (!(oio->oi_oa.o_valid &
1276 (OBD_MD_LAYOUT_VERSION | OBD_MD_FLDATAVERSION)))
1277 slice->cis_io->ci_result = -ENOTSUPP;
1279 if (oio->oi_oa.o_valid & OBD_MD_LAYOUT_VERSION)
1280 dv->dv_layout_version = oio->oi_oa.o_layout_version;
1281 if (oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)
1282 dv->dv_data_version = oio->oi_oa.o_data_version;
1288 static struct cl_io_operations mdc_io_ops = {
1291 .cio_iter_init = osc_io_iter_init,
1292 .cio_iter_fini = osc_io_iter_fini,
1293 .cio_start = osc_io_read_start,
1296 .cio_iter_init = osc_io_write_iter_init,
1297 .cio_iter_fini = osc_io_write_iter_fini,
1298 .cio_start = osc_io_write_start,
1299 .cio_end = osc_io_end,
1302 .cio_iter_init = osc_io_iter_init,
1303 .cio_iter_fini = osc_io_iter_fini,
1304 .cio_start = mdc_io_setattr_start,
1305 .cio_end = osc_io_setattr_end,
1307 [CIT_DATA_VERSION] = {
1308 .cio_start = mdc_io_data_version_start,
1309 .cio_end = mdc_io_data_version_end,
1312 .cio_iter_init = osc_io_iter_init,
1313 .cio_iter_fini = osc_io_iter_fini,
1314 .cio_start = osc_io_fault_start,
1315 .cio_end = osc_io_end,
1318 .cio_start = mdc_io_fsync_start,
1319 .cio_end = osc_io_fsync_end,
1322 .cio_read_ahead = mdc_io_read_ahead,
1323 .cio_submit = osc_io_submit,
1324 .cio_commit_async = osc_io_commit_async,
1327 int mdc_io_init(const struct lu_env *env, struct cl_object *obj,
1330 struct osc_io *oio = osc_env_io(env);
1332 CL_IO_SLICE_CLEAN(oio, oi_cl);
1333 cl_io_slice_add(io, &oio->oi_cl, obj, &mdc_io_ops);
1337 static void mdc_build_res_name(struct osc_object *osc,
1338 struct ldlm_res_id *resname)
1340 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
1344 * Implementation of struct cl_req_operations::cro_attr_set() for MDC
1345 * layer. MDC is responsible for struct obdo::o_id and struct obdo::o_seq
1348 static void mdc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1349 struct cl_req_attr *attr)
1351 u64 flags = attr->cra_flags;
1353 /* Copy object FID to cl_attr */
1354 attr->cra_oa->o_oi.oi_fid = *lu_object_fid(&obj->co_lu);
1356 if (flags & OBD_MD_FLGROUP)
1357 attr->cra_oa->o_valid |= OBD_MD_FLGROUP;
1359 if (flags & OBD_MD_FLID)
1360 attr->cra_oa->o_valid |= OBD_MD_FLID;
1362 if (flags & OBD_MD_FLHANDLE) {
1363 struct osc_page *opg;
1365 opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
1366 if (!opg->ops_srvlock) {
1369 rc = mdc_get_lock_handle(env, cl2osc(obj),
1371 &attr->cra_oa->o_handle);
1373 CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
1374 "uncovered page!\n");
1377 attr->cra_oa->o_valid |= OBD_MD_FLHANDLE;
1383 static int mdc_attr_get(const struct lu_env *env, struct cl_object *obj,
1384 struct cl_attr *attr)
1386 struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
1388 if (OST_LVB_IS_ERR(oinfo->loi_lvb.lvb_blocks))
1389 return OST_LVB_GET_ERR(oinfo->loi_lvb.lvb_blocks);
1391 return osc_attr_get(env, obj, attr);
1394 static int mdc_object_ast_clear(struct ldlm_lock *lock, void *data)
1398 if (lock->l_ast_data == data)
1399 lock->l_ast_data = NULL;
1400 ldlm_set_kms_ignore(lock);
1401 RETURN(LDLM_ITER_CONTINUE);
1404 int mdc_object_prune(const struct lu_env *env, struct cl_object *obj)
1406 struct osc_object *osc = cl2osc(obj);
1407 struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
1409 /* DLM locks don't hold a reference of osc_object so we have to
1410 * clear it before the object is being destroyed. */
1411 osc_build_res_name(osc, resname);
1412 ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
1413 mdc_object_ast_clear, osc);
1417 static const struct cl_object_operations mdc_ops = {
1418 .coo_page_init = osc_page_init,
1419 .coo_lock_init = mdc_lock_init,
1420 .coo_io_init = mdc_io_init,
1421 .coo_attr_get = mdc_attr_get,
1422 .coo_attr_update = osc_attr_update,
1423 .coo_glimpse = osc_object_glimpse,
1424 .coo_req_attr_set = mdc_req_attr_set,
1425 .coo_prune = mdc_object_prune,
1428 static const struct osc_object_operations mdc_object_ops = {
1429 .oto_build_res_name = mdc_build_res_name,
1430 .oto_dlmlock_at_pgoff = mdc_dlmlock_at_pgoff,
1433 static int mdc_object_init(const struct lu_env *env, struct lu_object *obj,
1434 const struct lu_object_conf *conf)
1436 struct osc_object *osc = lu2osc(obj);
1438 if (osc->oo_initialized)
1441 osc->oo_initialized = true;
1443 return osc_object_init(env, obj, conf);
1446 static void mdc_object_free(const struct lu_env *env, struct lu_object *obj)
1448 osc_object_free(env, obj);
1451 static const struct lu_object_operations mdc_lu_obj_ops = {
1452 .loo_object_init = mdc_object_init,
1453 .loo_object_delete = NULL,
1454 .loo_object_release = NULL,
1455 .loo_object_free = mdc_object_free,
1456 .loo_object_print = osc_object_print,
1457 .loo_object_invariant = NULL
1460 struct lu_object *mdc_object_alloc(const struct lu_env *env,
1461 const struct lu_object_header *unused,
1462 struct lu_device *dev)
1464 struct osc_object *osc;
1465 struct lu_object *obj;
1467 OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, GFP_NOFS);
1470 lu_object_init(obj, NULL, dev);
1471 osc->oo_cl.co_ops = &mdc_ops;
1472 obj->lo_ops = &mdc_lu_obj_ops;
1473 osc->oo_obj_ops = &mdc_object_ops;
1474 osc->oo_initialized = false;
1481 static int mdc_process_config(const struct lu_env *env, struct lu_device *d,
1482 struct lustre_cfg *cfg)
1484 size_t count = class_modify_config(cfg, PARAM_MDC,
1485 &d->ld_obd->obd_kset.kobj);
1486 return count > 0 ? 0 : count;
1489 const struct lu_device_operations mdc_lu_ops = {
1490 .ldo_object_alloc = mdc_object_alloc,
1491 .ldo_process_config = mdc_process_config,
1492 .ldo_recovery_complete = NULL,
1495 static struct lu_device *mdc_device_alloc(const struct lu_env *env,
1496 struct lu_device_type *t,
1497 struct lustre_cfg *cfg)
1499 struct lu_device *d;
1500 struct osc_device *od;
1501 struct obd_device *obd;
1506 RETURN(ERR_PTR(-ENOMEM));
1508 cl_device_init(&od->od_cl, t);
1510 d->ld_ops = &mdc_lu_ops;
1513 obd = class_name2obd(lustre_cfg_string(cfg, 0));
1515 RETURN(ERR_PTR(-ENODEV));
1517 rc = mdc_setup(obd, cfg);
1519 osc_device_free(env, d);
1520 RETURN(ERR_PTR(rc));
1522 od->od_exp = obd->obd_self_export;
1526 static const struct lu_device_type_operations mdc_device_type_ops = {
1527 .ldto_device_alloc = mdc_device_alloc,
1528 .ldto_device_free = osc_device_free,
1529 .ldto_device_init = osc_device_init,
1530 .ldto_device_fini = osc_device_fini
1533 struct lu_device_type mdc_device_type = {
1534 .ldt_tags = LU_DEVICE_CL,
1535 .ldt_name = LUSTRE_MDC_NAME,
1536 .ldt_ops = &mdc_device_type_ops,
1537 .ldt_ctx_tags = LCT_CL_THREAD