4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2017 Intel Corporation.
26 * This file is part of Lustre, http://www.lustre.org/
28 * Implementation of cl_device, cl_req for MDC layer.
30 * Author: Mikhail Pershin <mike.pershin@intel.com>
33 #define DEBUG_SUBSYSTEM S_MDC
35 #include <obd_class.h>
36 #include <lustre_osc.h>
38 #include "mdc_internal.h"
40 static void mdc_lock_build_policy(const struct lu_env *env,
41 union ldlm_policy_data *policy)
43 memset(policy, 0, sizeof *policy);
44 policy->l_inodebits.bits = MDS_INODELOCK_DOM;
47 int mdc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
49 return osc_ldlm_glimpse_ast(dlmlock, data);
52 static void mdc_lock_build_einfo(const struct lu_env *env,
53 const struct cl_lock *lock,
54 struct osc_object *osc,
55 struct ldlm_enqueue_info *einfo)
57 einfo->ei_type = LDLM_IBITS;
58 einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
59 einfo->ei_cb_bl = mdc_ldlm_blocking_ast;
60 einfo->ei_cb_cp = ldlm_completion_ast;
61 einfo->ei_cb_gl = mdc_ldlm_glimpse_ast;
62 einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
65 static int mdc_set_dom_lock_data(struct ldlm_lock *lock, void *data)
69 LASSERT(lock != NULL);
71 lock_res_and_lock(lock);
73 if (lock->l_ast_data == NULL)
74 lock->l_ast_data = data;
75 if (lock->l_ast_data == data)
78 unlock_res_and_lock(lock);
83 int mdc_dom_lock_match(struct obd_export *exp, struct ldlm_res_id *res_id,
84 enum ldlm_type type, union ldlm_policy_data *policy,
85 enum ldlm_mode mode, __u64 *flags, void *data,
86 struct lustre_handle *lockh, int unref)
88 struct obd_device *obd = exp->exp_obd;
89 __u64 lflags = *flags;
94 rc = ldlm_lock_match(obd->obd_namespace, lflags,
95 res_id, type, policy, mode, lockh, unref);
96 if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
100 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
102 LASSERT(lock != NULL);
103 if (!mdc_set_dom_lock_data(lock, data)) {
104 ldlm_lock_decref(lockh, rc);
113 * Finds an existing lock covering a page with given index.
114 * Copy of osc_obj_dlmlock_at_pgoff() but for DoM IBITS lock.
116 struct ldlm_lock *mdc_dlmlock_at_pgoff(const struct lu_env *env,
117 struct osc_object *obj, pgoff_t index,
118 enum osc_dap_flags dap_flags)
120 struct osc_thread_info *info = osc_env_info(env);
121 struct ldlm_res_id *resname = &info->oti_resname;
122 union ldlm_policy_data *policy = &info->oti_policy;
123 struct lustre_handle lockh;
124 struct ldlm_lock *lock = NULL;
130 fid_build_reg_res_name(lu_object_fid(osc2lu(obj)), resname);
131 mdc_lock_build_policy(env, policy);
133 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
134 if (dap_flags & OSC_DAP_FL_TEST_LOCK)
135 flags |= LDLM_FL_TEST_LOCK;
138 /* Next, search for already existing extent locks that will cover us */
139 /* If we're trying to read, we also search for an existing PW lock. The
140 * VFS and page cache already protect us locally, so lots of readers/
141 * writers can share a single PW lock. */
142 mode = mdc_dom_lock_match(osc_export(obj), resname, LDLM_IBITS, policy,
143 LCK_PR | LCK_PW, &flags, obj, &lockh,
144 dap_flags & OSC_DAP_FL_CANCELING);
146 lock = ldlm_handle2lock(&lockh);
147 /* RACE: the lock is cancelled so let's try again */
148 if (unlikely(lock == NULL))
156 * Check if page @page is covered by an extra lock or discard it.
158 static int mdc_check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
159 struct osc_page *ops, void *cbdata)
161 struct osc_thread_info *info = osc_env_info(env);
162 struct osc_object *osc = cbdata;
165 index = osc_index(ops);
166 if (index >= info->oti_fn_index) {
167 struct ldlm_lock *tmp;
168 struct cl_page *page = ops->ops_cl.cpl_page;
170 /* refresh non-overlapped index */
171 tmp = mdc_dlmlock_at_pgoff(env, osc, index,
172 OSC_DAP_FL_TEST_LOCK);
174 info->oti_fn_index = CL_PAGE_EOF;
176 } else if (cl_page_own(env, io, page) == 0) {
177 /* discard the page */
178 cl_page_discard(env, io, page);
179 cl_page_disown(env, io, page);
181 LASSERT(page->cp_state == CPS_FREEING);
185 info->oti_next_index = index + 1;
186 return CLP_GANG_OKAY;
190 * Discard pages protected by the given lock. This function traverses radix
191 * tree to find all covering pages and discard them. If a page is being covered
192 * by other locks, it should remain in cache.
194 * If error happens on any step, the process continues anyway (the reasoning
195 * behind this being that lock cancellation cannot be delayed indefinitely).
197 static int mdc_lock_discard_pages(const struct lu_env *env,
198 struct osc_object *osc,
199 pgoff_t start, pgoff_t end,
202 struct osc_thread_info *info = osc_env_info(env);
203 struct cl_io *io = &info->oti_io;
204 osc_page_gang_cbt cb;
210 io->ci_obj = cl_object_top(osc2cl(osc));
211 io->ci_ignore_layout = 1;
212 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
216 cb = discard ? osc_discard_cb : mdc_check_and_discard_cb;
217 info->oti_fn_index = info->oti_next_index = start;
219 res = osc_page_gang_lookup(env, io, osc, info->oti_next_index,
220 end, cb, (void *)osc);
221 if (info->oti_next_index > end)
224 if (res == CLP_GANG_RESCHED)
226 } while (res != CLP_GANG_OKAY);
232 static int mdc_lock_flush(const struct lu_env *env, struct osc_object *obj,
233 pgoff_t start, pgoff_t end, enum cl_lock_mode mode,
241 if (mode == CLM_WRITE) {
242 result = osc_cache_writeback_range(env, obj, start, end, 1,
244 CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
245 obj, start, end, result,
246 discard ? "discarded" : "written back");
251 rc = mdc_lock_discard_pages(env, obj, start, end, discard);
252 if (result == 0 && rc < 0)
258 void mdc_lock_lockless_cancel(const struct lu_env *env,
259 const struct cl_lock_slice *slice)
261 struct osc_lock *ols = cl2osc_lock(slice);
262 struct osc_object *osc = cl2osc(slice->cls_obj);
263 struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
266 LASSERT(ols->ols_dlmlock == NULL);
267 rc = mdc_lock_flush(env, osc, descr->cld_start, descr->cld_end,
270 CERROR("Pages for lockless lock %p were not purged(%d)\n",
273 osc_lock_wake_waiters(env, osc, ols);
277 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
278 * and ldlm_lock caches.
280 static int mdc_dlm_blocking_ast0(const struct lu_env *env,
281 struct ldlm_lock *dlmlock,
282 void *data, int flag)
284 struct cl_object *obj = NULL;
287 enum cl_lock_mode mode = CLM_READ;
291 LASSERT(flag == LDLM_CB_CANCELING);
292 LASSERT(dlmlock != NULL);
294 lock_res_and_lock(dlmlock);
295 if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
296 dlmlock->l_ast_data = NULL;
297 unlock_res_and_lock(dlmlock);
301 discard = ldlm_is_discard_data(dlmlock);
302 if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
305 if (dlmlock->l_ast_data != NULL) {
306 obj = osc2cl(dlmlock->l_ast_data);
307 dlmlock->l_ast_data = NULL;
310 unlock_res_and_lock(dlmlock);
312 /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
313 * the object has been destroyed. */
315 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
317 /* Destroy pages covered by the extent of the DLM lock */
318 result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
319 CL_PAGE_EOF, mode, discard);
320 /* Losing a lock, set KMS to 0.
321 * NB: assumed that DOM lock covers whole data on MDT.
323 /* losing a lock, update kms */
324 lock_res_and_lock(dlmlock);
325 cl_object_attr_lock(obj);
327 cl_object_attr_update(env, obj, attr, CAT_KMS);
328 cl_object_attr_unlock(obj);
329 unlock_res_and_lock(dlmlock);
330 cl_object_put(env, obj);
335 int mdc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
336 struct ldlm_lock_desc *new, void *data, int flag)
343 case LDLM_CB_BLOCKING: {
344 struct lustre_handle lockh;
346 ldlm_lock2handle(dlmlock, &lockh);
347 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
352 case LDLM_CB_CANCELING: {
357 * This can be called in the context of outer IO, e.g.,
359 * osc_enqueue_base()->...
360 * ->ldlm_prep_elc_req()->...
361 * ->ldlm_cancel_callback()->...
362 * ->osc_ldlm_blocking_ast()
364 * new environment has to be created to not corrupt outer
367 env = cl_env_get(&refcheck);
373 rc = mdc_dlm_blocking_ast0(env, dlmlock, data, flag);
374 cl_env_put(env, &refcheck);
384 * Updates object attributes from a lock value block (lvb) received together
385 * with the DLM lock reply from the server.
386 * This can be optimized to not update attributes when lock is a result of a
389 * Called under lock and resource spin-locks.
391 static void mdc_lock_lvb_update(const struct lu_env *env,
392 struct osc_object *osc,
393 struct ldlm_lock *dlmlock,
396 struct cl_object *obj = osc2cl(osc);
397 struct lov_oinfo *oinfo = osc->oo_oinfo;
398 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
399 unsigned valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME |
405 LASSERT(dlmlock != NULL);
406 lvb = &dlmlock->l_ost_lvb;
408 cl_lvb2attr(attr, lvb);
410 cl_object_attr_lock(obj);
411 if (dlmlock != NULL) {
414 check_res_locked(dlmlock->l_resource);
415 size = lvb->lvb_size;
417 if (size >= oinfo->loi_kms) {
418 LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu,"
419 " kms=%llu", lvb->lvb_size, size);
421 attr->cat_kms = size;
423 LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu,"
424 " leaving kms=%llu, end=%llu",
425 lvb->lvb_size, oinfo->loi_kms,
426 dlmlock->l_policy_data.l_extent.end);
429 cl_object_attr_update(env, obj, attr, valid);
430 cl_object_attr_unlock(obj);
434 static void mdc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
435 struct lustre_handle *lockh, bool lvb_update)
437 struct ldlm_lock *dlmlock;
441 dlmlock = ldlm_handle2lock_long(lockh, 0);
442 LASSERT(dlmlock != NULL);
444 /* lock reference taken by ldlm_handle2lock_long() is
445 * owned by osc_lock and released in osc_lock_detach()
447 lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
448 oscl->ols_has_ref = 1;
450 LASSERT(oscl->ols_dlmlock == NULL);
451 oscl->ols_dlmlock = dlmlock;
453 /* This may be a matched lock for glimpse request, do not hold
454 * lock reference in that case. */
455 if (!oscl->ols_glimpse) {
456 /* hold a refc for non glimpse lock which will
457 * be released in osc_lock_cancel() */
458 lustre_handle_copy(&oscl->ols_handle, lockh);
459 ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
463 /* Lock must have been granted. */
464 lock_res_and_lock(dlmlock);
465 if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
466 struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
468 /* extend the lock extent, otherwise it will have problem when
469 * we decide whether to grant a lockless lock. */
470 descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
471 descr->cld_start = cl_index(descr->cld_obj, 0);
472 descr->cld_end = CL_PAGE_EOF;
474 /* no lvb update for matched lock */
476 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
477 mdc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
481 unlock_res_and_lock(dlmlock);
483 LASSERT(oscl->ols_state != OLS_GRANTED);
484 oscl->ols_state = OLS_GRANTED;
489 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
490 * received from a server, or after osc_enqueue_base() matched a local DLM
493 static int mdc_lock_upcall(void *cookie, struct lustre_handle *lockh,
496 struct osc_lock *oscl = cookie;
497 struct cl_lock_slice *slice = &oscl->ols_cl;
503 env = cl_env_percpu_get();
504 /* should never happen, similar to osc_ldlm_blocking_ast(). */
505 LASSERT(!IS_ERR(env));
507 rc = ldlm_error2errno(errcode);
508 if (oscl->ols_state == OLS_ENQUEUED) {
509 oscl->ols_state = OLS_UPCALL_RECEIVED;
510 } else if (oscl->ols_state == OLS_CANCELLED) {
513 CERROR("Impossible state: %d\n", oscl->ols_state);
517 CDEBUG(D_INODE, "rc %d, err %d\n", rc, errcode);
519 mdc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);
521 /* Error handling, some errors are tolerable. */
522 if (oscl->ols_locklessable && rc == -EUSERS) {
523 /* This is a tolerable error, turn this lock into
526 osc_object_set_contended(cl2osc(slice->cls_obj));
527 LASSERT(slice->cls_ops != oscl->ols_lockless_ops);
529 /* Change this lock to ldlmlock-less lock. */
530 osc_lock_to_lockless(env, oscl, 1);
531 oscl->ols_state = OLS_GRANTED;
533 } else if (oscl->ols_glimpse && rc == -ENAVAIL) {
534 LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
535 mdc_lock_lvb_update(env, cl2osc(slice->cls_obj),
536 NULL, &oscl->ols_lvb);
537 /* Hide the error. */
541 if (oscl->ols_owner != NULL)
542 cl_sync_io_note(env, oscl->ols_owner, rc);
543 cl_env_percpu_put(env);
548 int mdc_fill_lvb(struct ptlrpc_request *req, struct ost_lvb *lvb)
550 struct mdt_body *body;
552 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
556 lvb->lvb_mtime = body->mbo_mtime;
557 lvb->lvb_atime = body->mbo_atime;
558 lvb->lvb_ctime = body->mbo_ctime;
559 lvb->lvb_blocks = body->mbo_blocks;
560 lvb->lvb_size = body->mbo_size;
564 int mdc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
565 void *cookie, struct lustre_handle *lockh,
566 enum ldlm_mode mode, __u64 *flags, int errcode)
568 struct osc_lock *ols = cookie;
569 struct ldlm_lock *lock;
574 /* The request was created before ldlm_cli_enqueue call. */
575 if (errcode == ELDLM_LOCK_ABORTED) {
576 struct ldlm_reply *rep;
578 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
579 LASSERT(rep != NULL);
581 rep->lock_policy_res2 =
582 ptlrpc_status_ntoh(rep->lock_policy_res2);
583 if (rep->lock_policy_res2)
584 errcode = rep->lock_policy_res2;
586 rc = mdc_fill_lvb(req, &ols->ols_lvb);
587 *flags |= LDLM_FL_LVB_READY;
588 } else if (errcode == ELDLM_OK) {
589 /* Callers have references, should be valid always */
590 lock = ldlm_handle2lock(lockh);
593 rc = mdc_fill_lvb(req, &lock->l_ost_lvb);
595 *flags |= LDLM_FL_LVB_READY;
598 /* Call the update callback. */
599 rc = (*upcall)(cookie, lockh, rc < 0 ? rc : errcode);
601 /* release the reference taken in ldlm_cli_enqueue() */
602 if (errcode == ELDLM_LOCK_MATCHED)
604 if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
605 ldlm_lock_decref(lockh, mode);
610 int mdc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
611 struct osc_enqueue_args *aa, int rc)
613 struct ldlm_lock *lock;
614 struct lustre_handle *lockh = &aa->oa_lockh;
615 enum ldlm_mode mode = aa->oa_mode;
619 LASSERT(!aa->oa_speculative);
621 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
623 lock = ldlm_handle2lock(lockh);
624 LASSERTF(lock != NULL,
625 "lockh %#llx, req %p, aa %p - client evicted?\n",
626 lockh->cookie, req, aa);
628 /* Take an additional reference so that a blocking AST that
629 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
630 * to arrive after an upcall has been executed by
631 * osc_enqueue_fini(). */
632 ldlm_lock_addref(lockh, mode);
634 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
635 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
637 /* Let CP AST to grant the lock first. */
638 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
640 /* Complete obtaining the lock procedure. */
641 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
642 aa->oa_mode, aa->oa_flags, NULL, 0,
644 /* Complete mdc stuff. */
645 rc = mdc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
648 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
650 ldlm_lock_decref(lockh, mode);
655 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
656 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
657 * other synchronous requests, however keeping some locks and trying to obtain
658 * others may take a considerable amount of time in a case of ost failure; and
659 * when other sync requests do not get released lock from a client, the client
660 * is excluded from the cluster -- such scenarious make the life difficult, so
661 * release locks just after they are obtained. */
662 int mdc_enqueue_send(struct obd_export *exp, struct ldlm_res_id *res_id,
663 __u64 *flags, union ldlm_policy_data *policy,
664 struct ost_lvb *lvb, int kms_valid,
665 osc_enqueue_upcall_f upcall, void *cookie,
666 struct ldlm_enqueue_info *einfo, int async)
668 struct obd_device *obd = exp->exp_obd;
669 struct lustre_handle lockh = { 0 };
670 struct ptlrpc_request *req = NULL;
671 struct ldlm_intent *lit;
673 bool glimpse = *flags & LDLM_FL_HAS_INTENT;
674 __u64 match_flags = *flags;
682 mode = einfo->ei_mode;
683 if (einfo->ei_mode == LCK_PR)
687 match_flags |= LDLM_FL_BLOCK_GRANTED;
688 mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
689 einfo->ei_type, policy, mode, &lockh, 0);
691 struct ldlm_lock *matched;
693 if (*flags & LDLM_FL_TEST_LOCK)
696 matched = ldlm_handle2lock(&lockh);
697 if (mdc_set_dom_lock_data(matched, einfo->ei_cbdata)) {
698 *flags |= LDLM_FL_LVB_READY;
700 /* We already have a lock, and it's referenced. */
701 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
703 ldlm_lock_decref(&lockh, mode);
704 LDLM_LOCK_PUT(matched);
707 ldlm_lock_decref(&lockh, mode);
708 LDLM_LOCK_PUT(matched);
713 if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
716 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT);
720 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
722 ptlrpc_request_free(req);
726 /* pack the intent */
727 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
728 lit->opc = glimpse ? IT_GLIMPSE : IT_BRW;
730 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 0);
731 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, 0);
732 ptlrpc_request_set_replen(req);
734 /* users of mdc_enqueue() can pass this flag for ldlm_lock_match() */
735 *flags &= ~LDLM_FL_BLOCK_GRANTED;
736 /* All MDC IO locks are intents */
737 *flags |= LDLM_FL_HAS_INTENT;
738 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, NULL,
739 0, LVB_T_NONE, &lockh, async);
742 struct osc_enqueue_args *aa;
744 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
745 aa = ptlrpc_req_async_args(req);
747 aa->oa_mode = einfo->ei_mode;
748 aa->oa_type = einfo->ei_type;
749 lustre_handle_copy(&aa->oa_lockh, &lockh);
750 aa->oa_upcall = upcall;
751 aa->oa_cookie = cookie;
752 aa->oa_speculative = false;
753 aa->oa_flags = flags;
756 req->rq_interpret_reply =
757 (ptlrpc_interpterer_t)mdc_enqueue_interpret;
758 ptlrpcd_add_req(req);
760 ptlrpc_req_finished(req);
765 rc = mdc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
767 ptlrpc_req_finished(req);
772 * Implementation of cl_lock_operations::clo_enqueue() method for osc
773 * layer. This initiates ldlm enqueue:
775 * - cancels conflicting locks early (osc_lock_enqueue_wait());
777 * - calls osc_enqueue_base() to do actual enqueue.
779 * osc_enqueue_base() is supplied with an upcall function that is executed
780 * when lock is received either after a local cached ldlm lock is matched, or
781 * when a reply from the server is received.
783 * This function does not wait for the network communication to complete.
785 static int mdc_lock_enqueue(const struct lu_env *env,
786 const struct cl_lock_slice *slice,
787 struct cl_io *unused, struct cl_sync_io *anchor)
789 struct osc_thread_info *info = osc_env_info(env);
790 struct osc_io *oio = osc_env_io(env);
791 struct osc_object *osc = cl2osc(slice->cls_obj);
792 struct osc_lock *oscl = cl2osc_lock(slice);
793 struct cl_lock *lock = slice->cls_lock;
794 struct ldlm_res_id *resname = &info->oti_resname;
795 union ldlm_policy_data *policy = &info->oti_policy;
796 osc_enqueue_upcall_f upcall = mdc_lock_upcall;
797 void *cookie = (void *)oscl;
803 LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
804 "lock = %p, ols = %p\n", lock, oscl);
806 if (oscl->ols_state == OLS_GRANTED)
809 /* Lockahead is not supported on MDT yet */
810 if (oscl->ols_flags & LDLM_FL_NO_EXPANSION) {
811 result = -EOPNOTSUPP;
815 if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
816 GOTO(enqueue_base, 0);
818 if (oscl->ols_glimpse) {
819 LASSERT(equi(oscl->ols_speculative, anchor == NULL));
821 GOTO(enqueue_base, 0);
824 result = osc_lock_enqueue_wait(env, osc, oscl);
828 /* we can grant lockless lock right after all conflicting locks
830 if (osc_lock_is_lockless(oscl)) {
831 oscl->ols_state = OLS_GRANTED;
832 oio->oi_lockless = 1;
837 oscl->ols_state = OLS_ENQUEUED;
838 if (anchor != NULL) {
839 atomic_inc(&anchor->csi_sync_nr);
840 oscl->ols_owner = anchor;
844 * DLM lock's ast data must be osc_object;
845 * DLM's enqueue callback set to osc_lock_upcall() with cookie as
848 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
849 mdc_lock_build_policy(env, policy);
850 LASSERT(!oscl->ols_speculative);
851 result = mdc_enqueue_send(osc_export(osc), resname, &oscl->ols_flags,
852 policy, &oscl->ols_lvb,
853 osc->oo_oinfo->loi_kms_valid,
854 upcall, cookie, &oscl->ols_einfo, async);
856 if (osc_lock_is_lockless(oscl)) {
857 oio->oi_lockless = 1;
859 LASSERT(oscl->ols_state == OLS_GRANTED);
860 LASSERT(oscl->ols_hold);
861 LASSERT(oscl->ols_dlmlock != NULL);
866 oscl->ols_state = OLS_CANCELLED;
867 osc_lock_wake_waiters(env, osc, oscl);
870 cl_sync_io_note(env, anchor, result);
875 static const struct cl_lock_operations mdc_lock_lockless_ops = {
876 .clo_fini = osc_lock_fini,
877 .clo_enqueue = mdc_lock_enqueue,
878 .clo_cancel = mdc_lock_lockless_cancel,
879 .clo_print = osc_lock_print
882 static const struct cl_lock_operations mdc_lock_ops = {
883 .clo_fini = osc_lock_fini,
884 .clo_enqueue = mdc_lock_enqueue,
885 .clo_cancel = osc_lock_cancel,
886 .clo_print = osc_lock_print,
889 int mdc_lock_init(const struct lu_env *env, struct cl_object *obj,
890 struct cl_lock *lock, const struct cl_io *io)
892 struct osc_lock *ols;
893 __u32 enqflags = lock->cll_descr.cld_enq_flags;
894 __u64 flags = osc_enq2ldlm_flags(enqflags);
898 /* Ignore AGL for Data-on-MDT, stat returns size data */
899 if ((enqflags & CEF_SPECULATIVE) != 0)
902 OBD_SLAB_ALLOC_PTR_GFP(ols, osc_lock_kmem, GFP_NOFS);
903 if (unlikely(ols == NULL))
906 ols->ols_state = OLS_NEW;
907 spin_lock_init(&ols->ols_lock);
908 INIT_LIST_HEAD(&ols->ols_waiting_list);
909 INIT_LIST_HEAD(&ols->ols_wait_entry);
910 INIT_LIST_HEAD(&ols->ols_nextlock_oscobj);
911 ols->ols_lockless_ops = &mdc_lock_lockless_ops;
913 ols->ols_flags = flags;
914 ols->ols_speculative = !!(enqflags & CEF_SPECULATIVE);
916 if (ols->ols_flags & LDLM_FL_HAS_INTENT) {
917 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
918 ols->ols_glimpse = 1;
920 mdc_lock_build_einfo(env, lock, cl2osc(obj), &ols->ols_einfo);
922 cl_lock_slice_add(lock, &ols->ols_cl, obj, &mdc_lock_ops);
924 if (!(enqflags & CEF_MUST))
925 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
926 if (ols->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
927 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
929 if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
930 osc_lock_set_writer(env, io, obj, ols);
932 LDLM_DEBUG_NOLOCK("lock %p, mdc lock %p, flags %llx\n",
933 lock, ols, ols->ols_flags);
940 * An implementation of cl_io_operations specific methods for MDC layer.
943 static int mdc_async_upcall(void *a, int rc)
945 struct osc_async_cbargs *args = a;
948 complete(&args->opc_sync);
952 static int mdc_io_setattr_start(const struct lu_env *env,
953 const struct cl_io_slice *slice)
955 struct cl_io *io = slice->cis_io;
956 struct osc_io *oio = cl2osc_io(env, slice);
957 struct cl_object *obj = slice->cis_obj;
958 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
959 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
960 struct obdo *oa = &oio->oi_oa;
961 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
962 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
963 unsigned int ia_valid = io->u.ci_setattr.sa_valid;
966 /* silently ignore non-truncate setattr for Data-on-MDT object */
967 if (cl_io_is_trunc(io)) {
968 /* truncate cache dirty pages first */
969 rc = osc_cache_truncate_start(env, cl2osc(obj), size,
975 if (oio->oi_lockless == 0) {
976 cl_object_attr_lock(obj);
977 rc = cl_object_attr_get(env, obj, attr);
979 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
980 unsigned int cl_valid = 0;
982 if (ia_valid & ATTR_SIZE) {
983 attr->cat_size = attr->cat_kms = size;
984 cl_valid = (CAT_SIZE | CAT_KMS);
986 if (ia_valid & ATTR_MTIME_SET) {
987 attr->cat_mtime = lvb->lvb_mtime;
988 cl_valid |= CAT_MTIME;
990 if (ia_valid & ATTR_ATIME_SET) {
991 attr->cat_atime = lvb->lvb_atime;
992 cl_valid |= CAT_ATIME;
994 if (ia_valid & ATTR_CTIME_SET) {
995 attr->cat_ctime = lvb->lvb_ctime;
996 cl_valid |= CAT_CTIME;
998 rc = cl_object_attr_update(env, obj, attr, cl_valid);
1000 cl_object_attr_unlock(obj);
1005 if (!(ia_valid & ATTR_SIZE))
1008 memset(oa, 0, sizeof(*oa));
1009 oa->o_oi = loi->loi_oi;
1010 oa->o_mtime = attr->cat_mtime;
1011 oa->o_atime = attr->cat_atime;
1012 oa->o_ctime = attr->cat_ctime;
1015 oa->o_blocks = OBD_OBJECT_EOF;
1016 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
1017 OBD_MD_FLCTIME | OBD_MD_FLMTIME | OBD_MD_FLSIZE |
1019 if (oio->oi_lockless) {
1020 oa->o_flags = OBD_FL_SRVLOCK;
1021 oa->o_valid |= OBD_MD_FLFLAGS;
1024 init_completion(&cbargs->opc_sync);
1026 rc = osc_punch_send(osc_export(cl2osc(obj)), oa,
1027 mdc_async_upcall, cbargs);
1028 cbargs->opc_rpc_sent = rc == 0;
1032 static int mdc_io_read_ahead(const struct lu_env *env,
1033 const struct cl_io_slice *ios,
1034 pgoff_t start, struct cl_read_ahead *ra)
1036 struct osc_object *osc = cl2osc(ios->cis_obj);
1037 struct ldlm_lock *dlmlock;
1041 dlmlock = mdc_dlmlock_at_pgoff(env, osc, start, 0);
1042 if (dlmlock == NULL)
1045 if (dlmlock->l_req_mode != LCK_PR) {
1046 struct lustre_handle lockh;
1048 ldlm_lock2handle(dlmlock, &lockh);
1049 ldlm_lock_addref(&lockh, LCK_PR);
1050 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
1053 ra->cra_rpc_size = osc_cli(osc)->cl_max_pages_per_rpc;
1054 ra->cra_end = CL_PAGE_EOF;
1055 ra->cra_release = osc_read_ahead_release;
1056 ra->cra_cbdata = dlmlock;
1061 static struct cl_io_operations mdc_io_ops = {
1064 .cio_iter_init = osc_io_iter_init,
1065 .cio_iter_fini = osc_io_iter_fini,
1066 .cio_start = osc_io_read_start,
1069 .cio_iter_init = osc_io_write_iter_init,
1070 .cio_iter_fini = osc_io_write_iter_fini,
1071 .cio_start = osc_io_write_start,
1072 .cio_end = osc_io_end,
1075 .cio_iter_init = osc_io_iter_init,
1076 .cio_iter_fini = osc_io_iter_fini,
1077 .cio_start = mdc_io_setattr_start,
1078 .cio_end = osc_io_setattr_end,
1080 /* no support for data version so far */
1081 [CIT_DATA_VERSION] = {
1086 .cio_iter_init = osc_io_iter_init,
1087 .cio_iter_fini = osc_io_iter_fini,
1088 .cio_start = osc_io_fault_start,
1089 .cio_end = osc_io_end,
1092 .cio_start = osc_io_fsync_start,
1093 .cio_end = osc_io_fsync_end,
1096 .cio_read_ahead = mdc_io_read_ahead,
1097 .cio_submit = osc_io_submit,
1098 .cio_commit_async = osc_io_commit_async,
1101 int mdc_io_init(const struct lu_env *env, struct cl_object *obj,
1104 struct osc_io *oio = osc_env_io(env);
1106 CL_IO_SLICE_CLEAN(oio, oi_cl);
1107 cl_io_slice_add(io, &oio->oi_cl, obj, &mdc_io_ops);
1111 static void mdc_build_res_name(struct osc_object *osc,
1112 struct ldlm_res_id *resname)
1114 fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
1118 * Implementation of struct cl_req_operations::cro_attr_set() for MDC
1119 * layer. MDC is responsible for struct obdo::o_id and struct obdo::o_seq
1122 static void mdc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1123 struct cl_req_attr *attr)
1125 u64 flags = attr->cra_flags;
1127 /* Copy object FID to cl_attr */
1128 attr->cra_oa->o_oi.oi_fid = *lu_object_fid(&obj->co_lu);
1130 if (flags & OBD_MD_FLGROUP)
1131 attr->cra_oa->o_valid |= OBD_MD_FLGROUP;
1133 if (flags & OBD_MD_FLID)
1134 attr->cra_oa->o_valid |= OBD_MD_FLID;
1136 if (flags & OBD_MD_FLHANDLE) {
1137 struct ldlm_lock *lock; /* _some_ lock protecting @apage */
1138 struct osc_page *opg;
1140 opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
1141 lock = mdc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
1142 OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
1143 if (lock == NULL && !opg->ops_srvlock) {
1144 struct ldlm_resource *res;
1145 struct ldlm_res_id *resname;
1147 CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
1148 "uncovered page!\n");
1150 resname = &osc_env_info(env)->oti_resname;
1151 mdc_build_res_name(cl2osc(obj), resname);
1152 res = ldlm_resource_get(
1153 osc_export(cl2osc(obj))->exp_obd->obd_namespace,
1154 NULL, resname, LDLM_IBITS, 0);
1155 ldlm_resource_dump(D_ERROR, res);
1157 libcfs_debug_dumpstack(NULL);
1161 /* check for lockless io. */
1163 attr->cra_oa->o_handle = lock->l_remote_handle;
1164 attr->cra_oa->o_valid |= OBD_MD_FLHANDLE;
1165 LDLM_LOCK_PUT(lock);
1170 static int mdc_attr_get(const struct lu_env *env, struct cl_object *obj,
1171 struct cl_attr *attr)
1173 struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
1175 if (OST_LVB_IS_ERR(oinfo->loi_lvb.lvb_blocks))
1176 return OST_LVB_GET_ERR(oinfo->loi_lvb.lvb_blocks);
1178 return osc_attr_get(env, obj, attr);
1181 static const struct cl_object_operations mdc_ops = {
1182 .coo_page_init = osc_page_init,
1183 .coo_lock_init = mdc_lock_init,
1184 .coo_io_init = mdc_io_init,
1185 .coo_attr_get = mdc_attr_get,
1186 .coo_attr_update = osc_attr_update,
1187 .coo_glimpse = osc_object_glimpse,
1188 .coo_req_attr_set = mdc_req_attr_set,
1189 .coo_prune = osc_object_prune,
1192 static const struct osc_object_operations mdc_object_ops = {
1193 .oto_build_res_name = mdc_build_res_name,
1194 .oto_dlmlock_at_pgoff = mdc_dlmlock_at_pgoff,
1197 static int mdc_object_init(const struct lu_env *env, struct lu_object *obj,
1198 const struct lu_object_conf *conf)
1200 struct osc_object *osc = lu2osc(obj);
1202 if (osc->oo_initialized)
1205 osc->oo_initialized = true;
1207 return osc_object_init(env, obj, conf);
1210 static void mdc_object_free(const struct lu_env *env, struct lu_object *obj)
1212 osc_object_free(env, obj);
1215 static const struct lu_object_operations mdc_lu_obj_ops = {
1216 .loo_object_init = mdc_object_init,
1217 .loo_object_delete = NULL,
1218 .loo_object_release = NULL,
1219 .loo_object_free = mdc_object_free,
1220 .loo_object_print = osc_object_print,
1221 .loo_object_invariant = NULL
1224 struct lu_object *mdc_object_alloc(const struct lu_env *env,
1225 const struct lu_object_header *unused,
1226 struct lu_device *dev)
1228 struct osc_object *osc;
1229 struct lu_object *obj;
1231 OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, GFP_NOFS);
1234 lu_object_init(obj, NULL, dev);
1235 osc->oo_cl.co_ops = &mdc_ops;
1236 obj->lo_ops = &mdc_lu_obj_ops;
1237 osc->oo_obj_ops = &mdc_object_ops;
1238 osc->oo_initialized = false;
1245 static int mdc_cl_process_config(const struct lu_env *env,
1246 struct lu_device *d, struct lustre_cfg *cfg)
1248 return mdc_process_config(d->ld_obd, 0, cfg);
1251 const struct lu_device_operations mdc_lu_ops = {
1252 .ldo_object_alloc = mdc_object_alloc,
1253 .ldo_process_config = mdc_cl_process_config,
1254 .ldo_recovery_complete = NULL,
1257 static struct lu_device *mdc_device_alloc(const struct lu_env *env,
1258 struct lu_device_type *t,
1259 struct lustre_cfg *cfg)
1261 struct lu_device *d;
1262 struct osc_device *od;
1263 struct obd_device *obd;
1268 RETURN(ERR_PTR(-ENOMEM));
1270 cl_device_init(&od->od_cl, t);
1272 d->ld_ops = &mdc_lu_ops;
1275 obd = class_name2obd(lustre_cfg_string(cfg, 0));
1277 RETURN(ERR_PTR(-ENODEV));
1279 rc = mdc_setup(obd, cfg);
1281 osc_device_free(env, d);
1282 RETURN(ERR_PTR(rc));
1284 od->od_exp = obd->obd_self_export;
1288 static const struct lu_device_type_operations mdc_device_type_ops = {
1289 .ldto_device_alloc = mdc_device_alloc,
1290 .ldto_device_free = osc_device_free,
1291 .ldto_device_init = osc_device_init,
1292 .ldto_device_fini = osc_device_fini
1295 struct lu_device_type mdc_device_type = {
1296 .ldt_tags = LU_DEVICE_CL,
1297 .ldt_name = LUSTRE_MDC_NAME,
1298 .ldt_ops = &mdc_device_type_ops,
1299 .ldt_ctx_tags = LCT_CL_THREAD