1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5 * Use is subject to license terms.
7 * Copyright (c) 2010, 2017, Intel Corporation.
11 * This file is part of Lustre, http://www.lustre.org/
13 * Lustre Metadata Target (mdt) request handler
15 * Author: Peter Braam <braam@clusterfs.com>
16 * Author: Andreas Dilger <adilger@clusterfs.com>
17 * Author: Phil Schwan <phil@clusterfs.com>
18 * Author: Mike Shaver <shaver@clusterfs.com>
19 * Author: Nikita Danilov <nikita@clusterfs.com>
20 * Author: Huang Hua <huanghua@clusterfs.com>
21 * Author: Yury Umanets <umka@clusterfs.com>
24 #define DEBUG_SUBSYSTEM S_MDS
26 #include <linux/module.h>
27 #include <linux/pagemap.h>
29 #include <dt_object.h>
30 #include <lustre_acl.h>
31 #include <lustre_export.h>
32 #include <uapi/linux/lustre/lustre_ioctl.h>
33 #include <lustre_lfsck.h>
34 #include <lustre_log.h>
35 #include <lustre_nodemap.h>
36 #include <lustre_mds.h>
37 #include <uapi/linux/lustre/lustre_param.h>
38 #include <lustre_quota.h>
39 #include <lustre_swab.h>
40 #include <lustre_lmv.h>
42 #include <obd_support.h>
43 #include <lustre_barrier.h>
44 #include <obd_cksum.h>
45 #include <llog_swab.h>
46 #include <lustre_crypto.h>
48 #include "mdt_internal.h"
50 #if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
51 static int mdt_max_mod_rpcs_per_client_set(const char *val,
52 cfs_kernel_param_arg_t *kp)
57 rc = kstrtouint(val, 0, &num);
61 if (num < 1 || num > OBD_MAX_RIF_MAX)
64 CWARN("max_mod_rpcs_per_client is deprecated, set mdt.*.max_mod_rpcs_in_flight parameter instead\n");
66 max_mod_rpcs_per_client = num;
69 static const struct kernel_param_ops
70 param_ops_max_mod_rpcs_per_client = {
71 .set = mdt_max_mod_rpcs_per_client_set,
72 .get = param_get_uint,
75 #define param_check_max_mod_rpcs_per_client(name, p) \
76 __param_check(name, p, unsigned int)
78 module_param_cb(max_mod_rpcs_per_client,
79 ¶m_ops_max_mod_rpcs_per_client,
80 &max_mod_rpcs_per_client, 0644);
82 MODULE_PARM_DESC(max_mod_rpcs_per_client,
83 "maximum number of modify RPCs in flight allowed per client (Deprecated)");
86 static struct mdt_device *mdt_dev(struct lu_device *d);
88 static const struct lu_object_operations mdt_obj_ops;
90 /* Slab for MDT object allocation */
91 static struct kmem_cache *mdt_object_kmem;
93 /* For HSM restore handles */
94 struct kmem_cache *mdt_hsm_cdt_kmem;
96 /* For HSM request handles */
97 struct kmem_cache *mdt_hsm_car_kmem;
99 static struct lu_kmem_descr mdt_caches[] = {
101 .ckd_cache = &mdt_object_kmem,
102 .ckd_name = "mdt_obj",
103 .ckd_size = sizeof(struct mdt_object)
106 .ckd_cache = &mdt_hsm_cdt_kmem,
107 .ckd_name = "mdt_cdt_restore_handle",
108 .ckd_size = sizeof(struct cdt_restore_handle)
111 .ckd_cache = &mdt_hsm_car_kmem,
112 .ckd_name = "mdt_cdt_agent_req",
113 .ckd_size = sizeof(struct cdt_agent_req)
120 __u64 mdt_get_disposition(struct ldlm_reply *rep, __u64 op_flag)
124 return rep->lock_policy_res1 & op_flag;
127 void mdt_clear_disposition(struct mdt_thread_info *info,
128 struct ldlm_reply *rep, __u64 op_flag)
131 info->mti_opdata &= ~op_flag;
132 tgt_opdata_clear(info->mti_env, op_flag);
135 rep->lock_policy_res1 &= ~op_flag;
138 void mdt_set_disposition(struct mdt_thread_info *info,
139 struct ldlm_reply *rep, __u64 op_flag)
142 info->mti_opdata |= op_flag;
143 tgt_opdata_set(info->mti_env, op_flag);
146 rep->lock_policy_res1 |= op_flag;
149 /* assert lock is unlocked before reuse */
150 static inline void mdt_lock_handle_assert(struct mdt_lock_handle *lh)
152 LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
153 LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
154 LASSERT(!lustre_handle_is_used(&lh->mlh_rreg_lh));
157 void mdt_lock_reg_init(struct mdt_lock_handle *lh, enum ldlm_mode lm)
159 mdt_lock_handle_assert(lh);
160 lh->mlh_pdo_hash = 0;
161 lh->mlh_reg_mode = lm;
162 lh->mlh_rreg_mode = lm;
163 lh->mlh_type = MDT_REG_LOCK;
166 void mdt_lh_reg_init(struct mdt_lock_handle *lh, struct ldlm_lock *lock)
168 mdt_lock_reg_init(lh, lock->l_req_mode);
169 if (lock->l_req_mode == LCK_GROUP)
170 lh->mlh_gid = lock->l_policy_data.l_inodebits.li_gid;
173 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, enum ldlm_mode lock_mode,
174 const struct lu_name *lname)
176 mdt_lock_handle_assert(lh);
177 lh->mlh_reg_mode = lock_mode;
178 lh->mlh_pdo_mode = LCK_MODE_MIN;
179 lh->mlh_rreg_mode = lock_mode;
180 lh->mlh_type = MDT_PDO_LOCK;
182 if (lu_name_is_valid(lname)) {
183 lh->mlh_pdo_hash = ll_full_name_hash(NULL, lname->ln_name,
185 /* XXX Workaround for LU-2856
187 * Zero is a valid return value of full_name_hash, but several
188 * users of mlh_pdo_hash assume a non-zero hash value. We
189 * therefore map zero onto an arbitrary, but consistent
190 * value (1) to avoid problems further down the road.
192 if (unlikely(lh->mlh_pdo_hash == 0))
193 lh->mlh_pdo_hash = 1;
195 lh->mlh_pdo_hash = 0;
199 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
200 struct mdt_lock_handle *lh)
207 * Any dir access needs couple of locks:
209 * 1) on part of dir we gonna take lookup/modify;
211 * 2) on whole dir to protect it from concurrent splitting and/or to
212 * flush client's cache for readdir().
214 * so, for a given mode and object this routine decides what lock mode
215 * to use for lock #2:
217 * 1) if caller's gonna lookup in dir then we need to protect dir from
218 * being splitted only - LCK_CR
220 * 2) if caller's gonna modify dir then we need to protect dir from
221 * being splitted and to flush cache - LCK_CW
223 * 3) if caller's gonna modify dir and that dir seems ready for
224 * splitting then we need to protect it from any type of access
225 * (lookup/modify/split) - LCK_EX --bzzz
228 LASSERT(lh->mlh_reg_mode != LCK_MODE_MIN);
229 LASSERT(lh->mlh_pdo_mode == LCK_MODE_MIN);
232 * Ask underlaying level its opinion about preferable PDO lock mode
233 * having access type passed as regular lock mode:
235 * - LCK_MODE_MIN means that lower layer does not want to specify lock
238 * - LCK_NL means that no PDO lock should be taken. This is used in some
239 * cases. Say, for non-splittable directories no need to use PDO locks
242 mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
245 if (mode != LCK_MODE_MIN) {
246 lh->mlh_pdo_mode = mode;
249 * Lower layer does not want to specify locking mode. We do it
250 * our selves. No special protection is needed, just flush
251 * client's cache on modification and allow concurrent
254 switch (lh->mlh_reg_mode) {
256 lh->mlh_pdo_mode = LCK_EX;
259 lh->mlh_pdo_mode = LCK_CR;
262 lh->mlh_pdo_mode = LCK_CW;
265 CERROR("Not expected lock type (0x%x)\n",
266 (int)lh->mlh_reg_mode);
271 LASSERT(lh->mlh_pdo_mode != LCK_MODE_MIN);
276 * Check whether \a o is directory stripe object.
278 * \param[in] info thread environment
279 * \param[in] o MDT object
281 * \retval 1 is directory stripe.
282 * \retval 0 isn't directory stripe.
283 * \retval < 1 error code
285 static int mdt_is_dir_stripe(struct mdt_thread_info *info,
286 struct mdt_object *o)
288 struct md_attr *ma = &info->mti_attr;
289 struct lmv_mds_md_v1 *lmv;
292 rc = mdt_stripe_get(info, o, ma, XATTR_NAME_LMV);
296 if (!(ma->ma_valid & MA_LMV))
299 lmv = &ma->ma_lmv->lmv_md_v1;
301 if (!lmv_is_sane2(lmv))
304 if (le32_to_cpu(lmv->lmv_magic) == LMV_MAGIC_STRIPE)
310 static int mdt_lookup_fileset(struct mdt_thread_info *info, const char *fileset,
313 struct mdt_device *mdt = info->mti_mdt;
314 struct lu_name *lname = &info->mti_name;
315 const char *start = fileset;
316 char *filename = info->mti_filename;
317 struct mdt_object *obj;
320 LASSERT(!info->mti_cross_ref);
323 * We may want to allow this to mount a completely separate
324 * fileset from the MDT in the future, but keeping it to
325 * ROOT/ only for now avoid potential security issues.
327 *fid = mdt->mdt_md_root_fid;
329 while (rc == 0 && start != NULL && *start != '\0') {
330 const char *s1 = start;
336 while (*s2 != '/' && *s2 != '\0')
344 lname->ln_namelen = s2 - s1;
345 if (lname->ln_namelen > NAME_MAX) {
350 /* reject .. as a path component */
351 if (lname->ln_namelen == 2 && strncmp(s1, "..", 2) == 0) {
356 strncpy(filename, s1, lname->ln_namelen);
357 filename[lname->ln_namelen] = '\0';
358 lname->ln_name = filename;
360 obj = mdt_object_find(info->mti_env, mdt, fid);
365 /* Only got the fid of this obj by name */
367 rc = mdo_lookup(info->mti_env, mdt_object_child(obj), lname,
368 fid, &info->mti_spec);
369 if (!rc && !S_ISDIR(lu_object_attr(&obj->mot_obj)))
371 mdt_object_put(info->mti_env, obj);
377 static int mdt_get_root(struct tgt_session_info *tsi)
379 struct mdt_thread_info *info = tsi2mdt_info(tsi);
380 struct obd_export *exp = info->mti_exp;
381 struct mdt_body *repbody;
382 struct lu_nodemap *nodemap = NULL;
383 struct mdt_device *mdt = info->mti_mdt;
384 char *fileset = NULL, *buffer = NULL;
385 char *nodemap_fileset = NULL;
390 rc = mdt_check_ucred(info);
392 GOTO(out, rc = err_serious(rc));
394 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_GET_ROOT_PACK))
395 GOTO(out, rc = err_serious(-ENOMEM));
397 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
398 if (req_capsule_get_size(info->mti_pill, &RMF_NAME, RCL_CLIENT) > 0) {
399 fileset = req_capsule_client_get(info->mti_pill, &RMF_NAME);
401 GOTO(out, rc = err_serious(-EFAULT));
404 /* refuse access if this nodemap is set to deny mounts */
405 nodemap = nodemap_get_from_exp(exp);
406 if (!IS_ERR_OR_NULL(nodemap)) {
407 if (nodemap->nmf_deny_mount)
408 GOTO(out, rc = err_serious(-EPERM));
409 nodemap_fileset = nodemap_get_fileset(nodemap);
412 if (nodemap_fileset != NULL && nodemap_fileset[0]) {
413 CDEBUG(D_INFO, "nodemap fileset is %s\n", nodemap_fileset);
415 /* consider fileset from client as a sub-fileset
418 OBD_ALLOC(buffer, PATH_MAX + 1);
420 GOTO(out, rc = err_serious(-ENOMEM));
421 if (snprintf(buffer, PATH_MAX + 1, "%s/%s",
422 nodemap_fileset, fileset) >= PATH_MAX + 1)
423 GOTO(out, rc = err_serious(-EINVAL));
426 /* enforce fileset as specified in the nodemap */
427 fileset = nodemap_fileset;
432 CDEBUG(D_INFO, "Getting fileset %s\n", fileset);
433 rc = mdt_lookup_fileset(info, fileset, &repbody->mbo_fid1);
435 GOTO(out, rc = err_serious(rc));
437 repbody->mbo_fid1 = mdt->mdt_md_root_fid;
439 exp->exp_root_fid = repbody->mbo_fid1;
440 repbody->mbo_valid |= OBD_MD_FLID;
444 mdt_thread_info_fini(info);
445 OBD_FREE(buffer, PATH_MAX+1);
447 if (!IS_ERR_OR_NULL(nodemap))
448 nodemap_putref(nodemap);
453 static int mdt_statfs(struct tgt_session_info *tsi)
455 struct ptlrpc_request *req = tgt_ses_req(tsi);
456 struct mdt_thread_info *info = tsi2mdt_info(tsi);
457 struct mdt_device *mdt = info->mti_mdt;
458 struct tg_grants_data *tgd = &mdt->mdt_lut.lut_tgd;
459 struct md_device *next = mdt->mdt_child;
460 struct ptlrpc_service_part *svcpt;
461 struct obd_statfs *osfs;
462 struct mdt_body *reqbody = NULL;
463 struct mdt_statfs_cache *msf;
464 ktime_t kstart = ktime_get();
465 int current_blockbits;
471 svcpt = req->rq_rqbd->rqbd_svcpt;
473 /* This will trigger a watchdog timeout */
474 at_est = obd_at_get(mdt->mdt_lu_dev.ld_obd, &svcpt->scp_at_estimate);
475 CFS_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
476 (MDT_SERVICE_WATCHDOG_FACTOR * at_est) + 1);
478 rc = mdt_check_ucred(info);
480 GOTO(out, rc = err_serious(rc));
482 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
483 GOTO(out, rc = err_serious(-ENOMEM));
485 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
487 GOTO(out, rc = -EPROTO);
489 if (mdt_is_sum_statfs_client(req->rq_export) &&
490 lustre_packed_msg_size(req->rq_reqmsg) ==
491 req_capsule_fmt_size(req->rq_reqmsg->lm_magic,
492 &RQF_MDS_STATFS_NEW, RCL_CLIENT)) {
493 req_capsule_extend(info->mti_pill, &RQF_MDS_STATFS_NEW);
494 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
497 if (reqbody && reqbody->mbo_valid & OBD_MD_FLAGSTATFS)
498 msf = &mdt->mdt_sum_osfs;
500 msf = &mdt->mdt_osfs;
502 if (msf->msf_age + OBD_STATFS_CACHE_SECONDS <= ktime_get_seconds()) {
503 /** statfs data is too old, get up-to-date one */
504 if (reqbody && reqbody->mbo_valid & OBD_MD_FLAGSTATFS)
505 rc = next->md_ops->mdo_statfs(info->mti_env, next,
508 rc = dt_statfs(info->mti_env, mdt->mdt_bottom, osfs);
511 spin_lock(&mdt->mdt_lock);
512 msf->msf_osfs = *osfs;
513 msf->msf_age = ktime_get_seconds();
514 spin_unlock(&mdt->mdt_lock);
516 /** use cached statfs data */
517 spin_lock(&mdt->mdt_lock);
518 *osfs = msf->msf_osfs;
519 spin_unlock(&mdt->mdt_lock);
522 /* tgd_blockbit is recordsize bits set during mkfs.
523 * This once set does not change. However, 'zfs set'
524 * can be used to change the MDT blocksize. Instead
525 * of using cached value of 'tgd_blockbit' always
526 * calculate the blocksize bits which may have
529 current_blockbits = fls64(osfs->os_bsize) - 1;
531 /* Account for cached pages. its still racy and might be under-reporting
532 * if clients haven't announced their caches with brw recently
534 CDEBUG(D_SUPER | D_CACHE, "blocks cached %llu granted %llu pending %llu free %llu avail %llu\n",
535 tgd->tgd_tot_dirty, tgd->tgd_tot_granted,
536 tgd->tgd_tot_pending,
537 osfs->os_bfree << current_blockbits,
538 osfs->os_bavail << current_blockbits);
540 osfs->os_bavail -= min_t(u64, osfs->os_bavail,
541 ((tgd->tgd_tot_dirty + tgd->tgd_tot_pending +
542 osfs->os_bsize - 1) >> current_blockbits));
544 tgt_grant_sanity_check(mdt->mdt_lu_dev.ld_obd, __func__);
545 if (mdt->mdt_lut.lut_no_create)
546 osfs->os_state |= OS_STATFS_NOCREATE;
547 CDEBUG(D_CACHE, "%llu blocks: %llu free, %llu avail; "
548 "%llu objects: %llu free; state %x\n",
549 osfs->os_blocks, osfs->os_bfree, osfs->os_bavail,
550 osfs->os_files, osfs->os_ffree, osfs->os_state);
552 if (!exp_grant_param_supp(tsi->tsi_exp) &&
553 current_blockbits > COMPAT_BSIZE_SHIFT) {
554 /* clients which don't support OBD_CONNECT_GRANT_PARAM
555 * should not see a block size > page size, otherwise
556 * cl_lost_grant goes mad. Therefore, we emulate a 4KB (=2^12)
557 * block size which is the biggest block size known to work
558 * with all client's page size.
560 osfs->os_blocks <<= current_blockbits - COMPAT_BSIZE_SHIFT;
561 osfs->os_bfree <<= current_blockbits - COMPAT_BSIZE_SHIFT;
562 osfs->os_bavail <<= current_blockbits - COMPAT_BSIZE_SHIFT;
563 osfs->os_bsize = 1 << COMPAT_BSIZE_SHIFT;
566 mdt_counter_incr(req, LPROC_MDT_STATFS,
567 ktime_us_delta(ktime_get(), kstart));
569 mdt_thread_info_fini(info);
573 __u32 mdt_lmm_dom_entry_check(struct lov_mds_md *lmm, int *is_dom_only)
575 struct lov_comp_md_v1 *comp_v1;
576 struct lov_mds_md *v1;
578 __u32 dom_stripesize = 0;
580 bool has_ost_stripes = false;
587 if (le32_to_cpu(lmm->lmm_magic) != LOV_MAGIC_COMP_V1)
590 comp_v1 = (struct lov_comp_md_v1 *)lmm;
591 off = le32_to_cpu(comp_v1->lcm_entries[0].lcme_offset);
592 v1 = (struct lov_mds_md *)((char *)comp_v1 + off);
594 /* Fast check for DoM entry with no mirroring, should be the first */
595 if (le16_to_cpu(comp_v1->lcm_mirror_count) == 0 &&
596 !(lov_pattern(le32_to_cpu(v1->lmm_pattern)) & LOV_PATTERN_MDT))
599 /* check all entries otherwise */
600 for (i = 0; i < le16_to_cpu(comp_v1->lcm_entry_count); i++) {
601 struct lov_comp_md_entry_v1 *lcme;
603 lcme = &comp_v1->lcm_entries[i];
604 if (!(le32_to_cpu(lcme->lcme_flags) & LCME_FL_INIT))
607 off = le32_to_cpu(lcme->lcme_offset);
608 v1 = (struct lov_mds_md *)((char *)comp_v1 + off);
610 if (lov_pattern(le32_to_cpu(v1->lmm_pattern)) &
612 dom_stripesize = le32_to_cpu(v1->lmm_stripe_size);
614 has_ost_stripes = true;
616 if (dom_stripesize && has_ost_stripes)
617 RETURN(dom_stripesize);
619 /* DoM-only case exits here */
620 if (is_dom_only && dom_stripesize)
622 RETURN(dom_stripesize);
625 /* Pack size attributes into the reply. */
626 int mdt_pack_size2body(struct mdt_thread_info *info,
627 const struct lu_fid *fid, struct lustre_handle *lh)
630 struct md_attr *ma = &info->mti_attr;
632 bool dom_lock = false;
636 LASSERT(ma->ma_attr.la_valid & LA_MODE);
638 if (!S_ISREG(ma->ma_attr.la_mode) ||
639 !(ma->ma_valid & MA_LOV && ma->ma_lmm != NULL))
642 dom_stripe = mdt_lmm_dom_stripesize(ma->ma_lmm);
643 /* no DoM stripe, no size in reply */
647 if (lustre_handle_is_used(lh)) {
648 struct ldlm_lock *lock;
650 lock = ldlm_handle2lock(lh);
652 dom_lock = ldlm_has_dom(lock);
657 /* no DoM lock, no size in reply */
661 /* Either DoM lock exists or LMM has only DoM stripe then
662 * return size on body.
664 b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
666 mdt_dom_object_size(info->mti_env, info->mti_mdt, fid, b, dom_lock);
670 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
672 * Pack ACL data into the reply. UIDs/GIDs are mapped and filtered by nodemap.
674 * \param info thread info object
675 * \param repbody reply to pack ACLs into
676 * \param o mdt object of file to examine
677 * \param nodemap nodemap of client to reply to
679 * \retval -errno error getting or parsing ACL from disk
681 int mdt_pack_acl2body(struct mdt_thread_info *info, struct mdt_body *repbody,
682 struct mdt_object *o, struct lu_nodemap *nodemap)
684 const struct lu_env *env = info->mti_env;
685 struct md_object *next = mdt_object_child(o);
686 struct lu_buf *buf = &info->mti_buf;
687 struct mdt_device *mdt = info->mti_mdt;
688 struct req_capsule *pill = info->mti_pill;
693 buf->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
694 buf->lb_len = req_capsule_get_size(pill, &RMF_ACL, RCL_SERVER);
695 if (buf->lb_len == 0)
698 LASSERT(!info->mti_big_acl_used);
700 rc = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_ACCESS);
702 if (rc == -ENODATA) {
703 repbody->mbo_aclsize = 0;
704 repbody->mbo_valid |= OBD_MD_FLACL;
706 } else if (rc == -EOPNOTSUPP) {
708 } else if (rc == -ERANGE) {
709 if (exp_connect_large_acl(info->mti_exp) &&
710 !info->mti_big_acl_used) {
711 if (info->mti_big_acl == NULL) {
712 info->mti_big_aclsize =
714 mdt->mdt_max_ea_size,
716 OBD_ALLOC_LARGE(info->mti_big_acl,
717 info->mti_big_aclsize);
718 if (info->mti_big_acl == NULL) {
719 info->mti_big_aclsize = 0;
720 CERROR("%s: unable to grow "
723 PFID(mdt_object_fid(o)));
728 CDEBUG(D_INODE, "%s: grow the "DFID
729 " ACL buffer to size %d\n",
731 PFID(mdt_object_fid(o)),
732 info->mti_big_aclsize);
734 buf->lb_buf = info->mti_big_acl;
735 buf->lb_len = info->mti_big_aclsize;
736 info->mti_big_acl_used = 1;
739 /* FS has ACL bigger that our limits */
740 CDEBUG(D_INODE, "%s: "DFID" ACL can't fit into %d\n",
741 mdt_obd_name(mdt), PFID(mdt_object_fid(o)),
742 info->mti_big_aclsize);
745 CERROR("%s: unable to read "DFID" ACL: rc = %d\n",
746 mdt_obd_name(mdt), PFID(mdt_object_fid(o)), rc);
749 rc = nodemap_map_acl(nodemap, buf->lb_buf,
750 rc, NODEMAP_FS_TO_CLIENT);
751 /* if all ACLs mapped out, rc is still >= 0 */
753 CERROR("%s: nodemap_map_acl unable to parse "DFID
754 " ACL: rc = %d\n", mdt_obd_name(mdt),
755 PFID(mdt_object_fid(o)), rc);
756 repbody->mbo_aclsize = 0;
757 repbody->mbo_valid &= ~OBD_MD_FLACL;
759 repbody->mbo_aclsize = rc;
760 repbody->mbo_valid |= OBD_MD_FLACL;
769 /* XXX Look into layout in MDT layer. */
770 static inline bool mdt_hsm_is_released(struct lov_mds_md *lmm)
772 struct lov_comp_md_v1 *comp_v1;
773 struct lov_mds_md *v1;
776 if (lmm->lmm_magic == LOV_MAGIC_COMP_V1) {
777 comp_v1 = (struct lov_comp_md_v1 *)lmm;
779 for (i = 0; i < comp_v1->lcm_entry_count; i++) {
780 v1 = (struct lov_mds_md *)((char *)comp_v1 +
781 comp_v1->lcm_entries[i].lcme_offset);
782 /* We don't support partial release for now */
783 if (!(v1->lmm_pattern & LOV_PATTERN_F_RELEASED))
788 return (lmm->lmm_pattern & LOV_PATTERN_F_RELEASED) ?
793 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
794 const struct lu_attr *attr, const struct lu_fid *fid)
796 struct mdt_device *mdt = info->mti_mdt;
797 struct obd_export *exp = info->mti_exp;
798 struct md_attr *ma = &info->mti_attr;
799 struct lu_nodemap *nodemap = NULL;
801 LASSERT(ma->ma_valid & MA_INODE);
803 if (attr->la_valid & LA_ATIME) {
804 b->mbo_atime = attr->la_atime;
805 b->mbo_valid |= OBD_MD_FLATIME;
807 if (attr->la_valid & LA_MTIME) {
808 b->mbo_mtime = attr->la_mtime;
809 b->mbo_valid |= OBD_MD_FLMTIME;
811 if (attr->la_valid & LA_CTIME) {
812 b->mbo_ctime = attr->la_ctime;
813 b->mbo_valid |= OBD_MD_FLCTIME;
815 if (attr->la_valid & LA_BTIME) {
816 b->mbo_btime = attr->la_btime;
817 b->mbo_valid |= OBD_MD_FLBTIME;
819 if (attr->la_valid & LA_FLAGS) {
820 b->mbo_flags = attr->la_flags;
821 b->mbo_valid |= OBD_MD_FLFLAGS;
823 if (attr->la_valid & LA_NLINK) {
824 b->mbo_nlink = attr->la_nlink;
825 b->mbo_valid |= OBD_MD_FLNLINK;
827 if (attr->la_valid & (LA_UID|LA_GID|LA_PROJID)) {
828 nodemap = nodemap_get_from_exp(exp);
832 if (attr->la_valid & LA_UID) {
833 b->mbo_uid = nodemap_map_id(nodemap, NODEMAP_UID,
834 NODEMAP_FS_TO_CLIENT,
836 b->mbo_valid |= OBD_MD_FLUID;
838 if (attr->la_valid & LA_GID) {
839 b->mbo_gid = nodemap_map_id(nodemap, NODEMAP_GID,
840 NODEMAP_FS_TO_CLIENT,
842 b->mbo_valid |= OBD_MD_FLGID;
845 if (attr->la_valid & LA_PROJID) {
846 b->mbo_projid = nodemap_map_id(nodemap, NODEMAP_PROJID,
847 NODEMAP_FS_TO_CLIENT,
849 b->mbo_valid |= OBD_MD_FLPROJID;
852 b->mbo_mode = attr->la_mode;
853 if (attr->la_valid & LA_MODE)
854 b->mbo_valid |= OBD_MD_FLMODE;
855 if (attr->la_valid & LA_TYPE)
856 b->mbo_valid |= OBD_MD_FLTYPE;
860 b->mbo_valid |= OBD_MD_FLID;
861 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, valid=%#llx\n",
862 PFID(fid), b->mbo_nlink, b->mbo_mode, b->mbo_valid);
865 if (!(attr->la_valid & LA_TYPE))
868 b->mbo_rdev = attr->la_rdev;
869 b->mbo_size = attr->la_size;
870 b->mbo_blocks = attr->la_blocks;
872 if (!S_ISREG(attr->la_mode)) {
873 b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
874 } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
875 /* means no objects are allocated on osts. */
876 LASSERT(!(ma->ma_valid & MA_LOV));
877 /* just ignore blocks occupied by extend attributes on MDS */
879 /* if no object is allocated on osts, the size on mds is valid.
882 b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
883 } else if ((ma->ma_valid & MA_LOV) && ma->ma_lmm != NULL) {
884 if (mdt_hsm_is_released(ma->ma_lmm)) {
885 /* A released file stores its size on MDS. */
886 /* But return 1 block for released file, unless tools
887 * like tar will consider it fully sparse. (LU-3864)
889 if (unlikely(b->mbo_size == 0))
893 b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
894 } else if (info->mti_som_strict && mdt->mdt_enable_strict_som) {
895 /* use SOM for size*/
896 b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
897 } else if (ma->ma_valid & MA_SOM) { /* lsom is valid */
898 b->mbo_valid |= OBD_MD_FLLAZYSIZE | OBD_MD_FLLAZYBLOCKS;
899 b->mbo_size = ma->ma_som.ms_size;
900 b->mbo_blocks = ma->ma_som.ms_blocks;
904 if (fid != NULL && (b->mbo_valid & OBD_MD_FLSIZE ||
905 b->mbo_valid & OBD_MD_FLLAZYSIZE))
906 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
907 PFID(fid), (unsigned long long)b->mbo_size);
910 if (!IS_ERR_OR_NULL(nodemap))
911 nodemap_putref(nodemap);
914 static inline int mdt_body_has_lov(const struct lu_attr *la,
915 const struct mdt_body *body)
917 return (S_ISREG(la->la_mode) && (body->mbo_valid & OBD_MD_FLEASIZE)) ||
918 (S_ISDIR(la->la_mode) && (body->mbo_valid & OBD_MD_FLDIREA));
921 void mdt_client_compatibility(struct mdt_thread_info *info)
923 struct mdt_body *body;
924 struct ptlrpc_request *req = mdt_info_req(info);
925 struct obd_export *exp = req->rq_export;
926 struct md_attr *ma = &info->mti_attr;
927 struct lu_attr *la = &ma->ma_attr;
931 if (exp_connect_layout(exp))
932 /* the client can deal with 16-bit lmm_stripe_count */
935 body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
937 if (!mdt_body_has_lov(la, body))
940 /* now we have a reply with a lov for a client not compatible with the
941 * layout lock so we have to clean the layout generation number
943 if (S_ISREG(la->la_mode))
944 ma->ma_lmm->lmm_layout_gen = 0;
948 static int mdt_attr_get_eabuf_size(struct mdt_thread_info *info,
949 struct mdt_object *o)
951 const struct lu_env *env = info->mti_env;
954 rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL,
963 /* Is it a directory? Let's check for the LMV as well */
964 if (S_ISDIR(lu_object_attr(&mdt_object_child(o)->mo_lu))) {
965 rc2 = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL,
969 rc2 = mo_xattr_get(env, mdt_object_child(o),
971 XATTR_NAME_DEFAULT_LMV);
973 if ((rc2 < 0 && rc2 != -ENODATA) || (rc2 > rc))
981 int mdt_big_xattr_get(struct mdt_thread_info *info, struct mdt_object *o,
984 const struct lu_env *env = info->mti_env;
991 rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL, name);
995 if (strcmp(name, XATTR_NAME_LMV) == 0) {
996 LASSERT(info->mti_big_lmv_used == 0);
997 big_size = info->mti_big_lmvsize;
998 big_lmm = info->mti_big_lmv;
999 using_big_lmv = true;
1001 LASSERT(info->mti_big_lov_used == 0);
1002 big_size = info->mti_big_lovsize;
1003 big_lmm = info->mti_big_lov;
1004 using_big_lmv = false;
1007 /* big_lmm may need to be grown */
1008 if (big_size < rc) {
1009 int size = size_roundup_power2(rc);
1012 /* free old buffer */
1014 OBD_FREE_LARGE(big_lmm, big_size);
1019 OBD_ALLOC_LARGE(big_lmm, size);
1020 if (big_lmm == NULL)
1021 GOTO(out, rc = -ENOMEM);
1024 LASSERT(big_size >= rc);
1026 info->mti_buf.lb_buf = big_lmm;
1027 info->mti_buf.lb_len = big_size;
1028 rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf, name);
1030 if (using_big_lmv) {
1031 info->mti_big_lmvsize = big_size;
1032 info->mti_big_lmv = big_lmm;
1034 info->mti_big_lovsize = big_size;
1035 info->mti_big_lov = big_lmm;
1040 int __mdt_stripe_get(struct mdt_thread_info *info, struct mdt_object *o,
1041 struct md_attr *ma, const char *name)
1043 struct md_object *next = mdt_object_child(o);
1044 struct lu_buf *buf = &info->mti_buf;
1046 bool is_lov = false;
1048 if (strcmp(name, XATTR_NAME_LOV) == 0) {
1049 buf->lb_buf = ma->ma_lmm;
1050 buf->lb_len = ma->ma_lmm_size;
1052 LASSERT(!(ma->ma_valid & MA_LOV));
1053 } else if (strcmp(name, XATTR_NAME_LMV) == 0) {
1054 buf->lb_buf = ma->ma_lmv;
1055 buf->lb_len = ma->ma_lmv_size;
1056 LASSERT(!(ma->ma_valid & MA_LMV));
1057 } else if (strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0) {
1058 buf->lb_buf = ma->ma_default_lmv;
1059 buf->lb_len = ma->ma_default_lmv_size;
1060 LASSERT(!(ma->ma_valid & MA_LMV_DEF));
1065 LASSERT(buf->lb_buf);
1067 if (!mdt_object_exists(o))
1070 if (mdt_object_remote(o) && S_ISDIR(lu_object_attr(&o->mot_obj)))
1071 /* force reload layout for remote dir in case layout changed */
1072 mo_invalidate(info->mti_env, mdt_object_child(o));
1074 rc = mo_xattr_get(info->mti_env, next, buf, name);
1078 if (strcmp(name, XATTR_NAME_LOV) == 0) {
1079 /* NOT return LOV EA with hole to old client. */
1080 if (unlikely(le32_to_cpu(ma->ma_lmm->lmm_pattern) &
1081 LOV_PATTERN_F_HOLE) &&
1082 !(exp_connect_flags(info->mti_exp) &
1083 OBD_CONNECT_LFSCK)) {
1086 if (info->mti_big_lov_used) {
1087 LASSERT(info->mti_big_lovsize >= rc);
1088 ma->ma_lmm = info->mti_big_lov;
1090 ma->ma_lmm_size = rc;
1091 ma->ma_valid |= MA_LOV;
1092 } else if (strcmp(name, XATTR_NAME_LMV) == 0) {
1093 if (info->mti_big_lmv_used) {
1094 LASSERT(info->mti_big_lmvsize >= rc);
1095 ma->ma_lmv = info->mti_big_lmv;
1097 ma->ma_lmv_size = rc;
1098 ma->ma_valid |= MA_LMV;
1099 } else if (strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0) {
1100 ma->ma_default_lmv_size = rc;
1101 ma->ma_valid |= MA_LMV_DEF;
1104 /* Update mdt_max_mdsize so all clients will be aware that */
1105 if (info->mti_mdt->mdt_max_mdsize < rc)
1106 info->mti_mdt->mdt_max_mdsize = rc;
1109 } else if (rc == -ENODATA) {
1112 } else if (rc == -ERANGE) {
1113 /* Default LMV has fixed size, so it must be able to fit
1114 * in the original buffer
1116 if (strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0)
1118 rc = mdt_big_xattr_get(info, o, name);
1121 info->mti_big_lov_used = 1;
1123 info->mti_big_lmv_used = 1;
1131 int mdt_stripe_get(struct mdt_thread_info *info, struct mdt_object *o,
1132 struct md_attr *ma, const char *name)
1138 if (strcmp(name, XATTR_NAME_LOV) == 0) {
1139 big_size = info->mti_big_lovsize;
1140 big_lmm = info->mti_big_lov;
1142 } else if (strcmp(name, XATTR_NAME_LMV) == 0) {
1143 big_size = info->mti_big_lmvsize;
1144 big_lmm = info->mti_big_lmv;
1151 OBD_ALLOC_LARGE(big_lmm, PAGE_SIZE);
1154 big_size = PAGE_SIZE;
1158 info->mti_big_lmvsize = ma->ma_lmv_size = big_size;
1159 info->mti_big_lmv = ma->ma_lmv = big_lmm;
1160 ma->ma_valid &= ~MA_LMV;
1162 info->mti_big_lovsize = ma->ma_lmm_size = big_size;
1163 info->mti_big_lov = ma->ma_lmm = big_lmm;
1164 ma->ma_valid &= ~MA_LOV;
1167 rc = __mdt_stripe_get(info, o, ma, name);
1172 int mdt_attr_get_pfid(struct mdt_thread_info *info, struct mdt_object *o,
1173 struct lu_fid *pfid)
1175 struct lu_buf *buf = &info->mti_buf;
1176 struct link_ea_header *leh;
1177 struct link_ea_entry *lee;
1182 buf->lb_buf = info->mti_xattr_buf;
1183 buf->lb_len = sizeof(info->mti_xattr_buf);
1184 rc = mo_xattr_get(info->mti_env, mdt_object_child(o),
1185 buf, XATTR_NAME_LINK);
1186 /* ignore errors, MA_PFID won't be set and it is
1187 * up to the caller to treat this as an error
1189 if (rc == -ERANGE || buf->lb_len == 0) {
1190 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LINK);
1191 buf->lb_buf = info->mti_big_lov;
1192 buf->lb_len = info->mti_big_lovsize;
1197 if (rc < sizeof(*leh)) {
1198 CERROR("short LinkEA on "DFID": rc = %d\n",
1199 PFID(mdt_object_fid(o)), rc);
1203 leh = (struct link_ea_header *) buf->lb_buf;
1204 lee = (struct link_ea_entry *)(leh + 1);
1205 if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
1206 leh->leh_magic = LINK_EA_MAGIC;
1207 leh->leh_reccount = __swab32(leh->leh_reccount);
1208 leh->leh_len = __swab64(leh->leh_len);
1210 if (leh->leh_magic != LINK_EA_MAGIC)
1212 if (leh->leh_reccount == 0)
1215 memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
1216 fid_be_to_cpu(pfid, pfid);
1221 int mdt_attr_get_pfid_name(struct mdt_thread_info *info, struct mdt_object *o,
1222 struct lu_fid *pfid, struct lu_name *lname)
1224 struct lu_buf *buf = &info->mti_buf;
1225 struct link_ea_header *leh;
1226 struct link_ea_entry *lee;
1230 buf->lb_buf = info->mti_xattr_buf;
1231 buf->lb_len = sizeof(info->mti_xattr_buf);
1232 rc = mo_xattr_get(info->mti_env, mdt_object_child(o), buf,
1234 if (rc == -ERANGE) {
1235 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LINK);
1236 buf->lb_buf = info->mti_big_lov;
1237 buf->lb_len = info->mti_big_lovsize;
1242 if (rc < sizeof(*leh)) {
1243 CERROR("short LinkEA on "DFID": rc = %d\n",
1244 PFID(mdt_object_fid(o)), rc);
1248 leh = (struct link_ea_header *)buf->lb_buf;
1249 lee = (struct link_ea_entry *)(leh + 1);
1250 if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
1251 leh->leh_magic = LINK_EA_MAGIC;
1252 leh->leh_reccount = __swab32(leh->leh_reccount);
1253 leh->leh_len = __swab64(leh->leh_len);
1255 if (leh->leh_magic != LINK_EA_MAGIC)
1258 if (leh->leh_reccount == 0)
1261 linkea_entry_unpack(lee, &reclen, lname, pfid);
1266 int mdt_attr_get_complex(struct mdt_thread_info *info,
1267 struct mdt_object *o, struct md_attr *ma)
1269 const struct lu_env *env = info->mti_env;
1270 struct md_object *next = mdt_object_child(o);
1271 struct lu_buf *buf = &info->mti_buf;
1272 int need = ma->ma_need;
1280 if (mdt_object_exists(o) == 0)
1281 GOTO(out, rc = -ENOENT);
1282 mode = lu_object_attr(&next->mo_lu);
1284 if (need & MA_INODE) {
1285 ma->ma_need = MA_INODE;
1286 if (need & MA_DIRENT_CNT)
1287 ma->ma_attr.la_valid |= LA_DIRENT_CNT;
1289 ma->ma_attr.la_valid &= ~LA_DIRENT_CNT;
1290 rc = mo_attr_get(env, next, ma);
1295 (void) mdt_get_som(info, o, ma);
1296 ma->ma_valid |= MA_INODE;
1299 if (need & MA_PFID) {
1300 rc = mdt_attr_get_pfid(info, o, &ma->ma_pfid);
1302 ma->ma_valid |= MA_PFID;
1303 /* ignore this error, parent fid is not mandatory */
1307 if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
1308 rc = __mdt_stripe_get(info, o, ma, XATTR_NAME_LOV);
1313 if (need & MA_LMV && S_ISDIR(mode)) {
1314 rc = __mdt_stripe_get(info, o, ma, XATTR_NAME_LMV);
1319 if (need & MA_LMV_DEF && S_ISDIR(mode)) {
1320 rc = __mdt_stripe_get(info, o, ma, XATTR_NAME_DEFAULT_LMV);
1325 /* In the handle of MA_INODE, we may already get the SOM attr. */
1326 if (need & MA_SOM && S_ISREG(mode) && !(ma->ma_valid & MA_SOM)) {
1327 rc = mdt_get_som(info, o, ma);
1332 if (need & MA_HSM && S_ISREG(mode)) {
1333 buf->lb_buf = info->mti_xattr_buf;
1334 buf->lb_len = sizeof(info->mti_xattr_buf);
1335 BUILD_BUG_ON(sizeof(struct hsm_attrs) >
1336 sizeof(info->mti_xattr_buf));
1337 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_HSM);
1338 rc2 = lustre_buf2hsm(info->mti_xattr_buf, rc2, &ma->ma_hsm);
1340 ma->ma_valid |= MA_HSM;
1341 else if (rc2 < 0 && rc2 != -ENODATA)
1342 GOTO(out, rc = rc2);
1345 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
1346 if (need & MA_ACL_DEF && S_ISDIR(mode)) {
1347 buf->lb_buf = ma->ma_acl;
1348 buf->lb_len = ma->ma_acl_size;
1349 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
1351 ma->ma_acl_size = rc2;
1352 ma->ma_valid |= MA_ACL_DEF;
1353 } else if (rc2 == -ENODATA) {
1355 ma->ma_acl_size = 0;
1357 GOTO(out, rc = rc2);
1362 CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = %#llx ma_lmm=%p\n",
1363 rc, ma->ma_valid, ma->ma_lmm);
1367 static void mdt_preset_encctx_size(struct mdt_thread_info *info)
1369 struct req_capsule *pill = info->mti_pill;
1372 if (req_capsule_has_field(pill, &RMF_FILE_ENCCTX,
1374 /* pre-set size in server part with max size */
1375 req_capsule_set_size(pill, &RMF_FILE_ENCCTX,
1377 info->mti_mdt->mdt_max_mdsize);
1381 static int mdt_getattr_internal(struct mdt_thread_info *info,
1382 struct mdt_object *o, int ma_need)
1384 struct mdt_device *mdt = info->mti_mdt;
1385 struct md_object *next = mdt_object_child(o);
1386 const struct mdt_body *reqbody = info->mti_body;
1387 struct ptlrpc_request *req = mdt_info_req(info);
1388 struct md_attr *ma = &info->mti_attr;
1389 struct lu_attr *la = &ma->ma_attr;
1390 struct req_capsule *pill = info->mti_pill;
1391 const struct lu_env *env = info->mti_env;
1392 struct mdt_body *repbody;
1393 struct lu_buf *buffer = &info->mti_buf;
1394 struct obd_export *exp = info->mti_exp;
1395 ktime_t kstart = ktime_get();
1400 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
1401 RETURN(err_serious(-ENOMEM));
1403 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1407 if (mdt_object_remote(o)) {
1408 /* obj is located on remote node Return -ENOTSUPP(old client) */
1409 if (!mdt_is_dne_client(req->rq_export))
1410 GOTO(out, rc = -ENOTSUPP);
1412 repbody->mbo_fid1 = *mdt_object_fid(o);
1413 repbody->mbo_valid = OBD_MD_FLID | OBD_MD_MDS;
1417 if (reqbody->mbo_eadatasize > 0) {
1418 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
1419 if (buffer->lb_buf == NULL)
1420 GOTO(out, rc = -EPROTO);
1421 buffer->lb_len = req_capsule_get_size(pill, &RMF_MDT_MD,
1424 buffer->lb_buf = NULL;
1426 ma_need &= ~(MA_LOV | MA_LMV);
1427 CDEBUG(D_INFO, "%s: RPC from %s: does not need LOVEA.\n",
1428 mdt_obd_name(info->mti_mdt),
1429 req->rq_export->exp_client_uuid.uuid);
1432 /* from 2.12.58 intent_getattr pack default LMV in reply */
1433 if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
1434 ((reqbody->mbo_valid & (OBD_MD_MEA | OBD_MD_DEFAULT_MEA)) ==
1435 (OBD_MD_MEA | OBD_MD_DEFAULT_MEA)) &&
1436 req_capsule_has_field(&req->rq_pill, &RMF_DEFAULT_MDT_MD,
1438 ma->ma_lmv = buffer->lb_buf;
1439 ma->ma_lmv_size = buffer->lb_len;
1440 ma->ma_default_lmv = req_capsule_server_get(pill,
1441 &RMF_DEFAULT_MDT_MD);
1442 ma->ma_default_lmv_size = req_capsule_get_size(pill,
1443 &RMF_DEFAULT_MDT_MD,
1445 ma->ma_need = MA_INODE;
1446 if (ma->ma_lmv_size > 0)
1447 ma->ma_need |= MA_LMV;
1448 if (ma->ma_default_lmv_size > 0)
1449 ma->ma_need |= MA_LMV_DEF;
1450 } else if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
1451 (reqbody->mbo_valid & (OBD_MD_MEA | OBD_MD_DEFAULT_MEA))) {
1452 /* If it is dir and client require MEA, then we got MEA */
1453 /* Assumption: MDT_MD size is enough for lmv size. */
1454 ma->ma_lmv = buffer->lb_buf;
1455 ma->ma_lmv_size = buffer->lb_len;
1456 ma->ma_need = MA_INODE;
1457 if (ma->ma_lmv_size > 0) {
1458 if (reqbody->mbo_valid & OBD_MD_MEA) {
1459 ma->ma_need |= MA_LMV;
1460 } else if (reqbody->mbo_valid & OBD_MD_DEFAULT_MEA) {
1461 ma->ma_need |= MA_LMV_DEF;
1462 ma->ma_default_lmv = buffer->lb_buf;
1464 ma->ma_default_lmv_size = buffer->lb_len;
1465 ma->ma_lmv_size = 0;
1469 ma->ma_lmm = buffer->lb_buf;
1470 ma->ma_lmm_size = buffer->lb_len;
1471 ma->ma_need = MA_INODE | MA_HSM;
1472 if (ma->ma_lmm_size > 0) {
1473 ma->ma_need |= MA_LOV;
1474 /* Older clients may crash if they getattr overstriped
1477 if (!exp_connect_overstriping(exp) &&
1478 mdt_lmm_is_overstriping(ma->ma_lmm))
1479 RETURN(-EOPNOTSUPP);
1483 if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
1484 reqbody->mbo_valid & OBD_MD_FLDIREA &&
1485 lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
1486 /* get default stripe info for this dir. */
1487 ma->ma_need |= MA_LOV_DEF;
1489 ma->ma_need |= ma_need;
1491 rc = mdt_attr_get_complex(info, o, ma);
1493 CDEBUG_LIMIT(rc == -ENOENT ? D_OTHER : D_ERROR,
1494 "%s: getattr error for "DFID": rc = %d\n",
1495 mdt_obd_name(info->mti_mdt),
1496 PFID(mdt_object_fid(o)), rc);
1500 /* return immutable attr on fscrypt metadata files
1501 * if fscrypt admin is not permitted
1503 if (o->mot_obj.lo_header->loh_attr & LOHA_FSCRYPT_MD &&
1504 !mdt_ucred(info)->uc_rbac_fscrypt_admin)
1505 la->la_flags |= LUSTRE_IMMUTABLE_FL;
1507 /* if file is released, check if a restore is running */
1508 if (ma->ma_valid & MA_HSM) {
1509 repbody->mbo_valid |= OBD_MD_TSTATE;
1510 if ((ma->ma_hsm.mh_flags & HS_RELEASED) &&
1511 mdt_hsm_restore_is_running(info, mdt_object_fid(o)))
1512 repbody->mbo_t_state = MS_RESTORE;
1515 if (unlikely(!(ma->ma_valid & MA_INODE)))
1518 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
1520 if (mdt_body_has_lov(la, reqbody)) {
1521 u32 stripe_count = 1;
1522 bool fixed_layout = false;
1524 if (ma->ma_valid & MA_LOV) {
1525 LASSERT(ma->ma_lmm_size);
1526 repbody->mbo_eadatasize = ma->ma_lmm_size;
1527 if (S_ISDIR(la->la_mode))
1528 repbody->mbo_valid |= OBD_MD_FLDIREA;
1530 repbody->mbo_valid |= OBD_MD_FLEASIZE;
1531 mdt_dump_lmm(D_INFO, ma->ma_lmm, repbody->mbo_valid);
1533 if (ma->ma_valid & MA_LMV) {
1534 struct lmv_mds_md_v1 *lmv = &ma->ma_lmv->lmv_md_v1;
1535 u32 magic = le32_to_cpu(lmv->lmv_magic);
1537 /* Return -ENOTSUPP for old client */
1538 if (!mdt_is_striped_client(req->rq_export))
1541 LASSERT(S_ISDIR(la->la_mode));
1542 mdt_dump_lmv(D_INFO, ma->ma_lmv);
1543 repbody->mbo_eadatasize = ma->ma_lmv_size;
1544 repbody->mbo_valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
1546 stripe_count = le32_to_cpu(lmv->lmv_stripe_count);
1547 fixed_layout = lmv_is_fixed(lmv);
1548 if (magic == LMV_MAGIC_STRIPE && lmv_is_restriping(lmv))
1549 mdt_restripe_migrate_add(info, o);
1550 else if (magic == LMV_MAGIC_V1 &&
1551 lmv_is_restriping(lmv))
1552 mdt_restripe_update_add(info, o);
1554 if (ma->ma_valid & MA_LMV_DEF) {
1555 /* Return -ENOTSUPP for old client */
1556 if (!mdt_is_striped_client(req->rq_export))
1558 LASSERT(S_ISDIR(la->la_mode));
1560 * when ll_dir_getstripe() gets default LMV, it
1561 * checks mbo_eadatasize.
1563 if (!(ma->ma_valid & MA_LMV))
1564 repbody->mbo_eadatasize =
1565 ma->ma_default_lmv_size;
1566 repbody->mbo_valid |= (OBD_MD_FLDIREA |
1567 OBD_MD_DEFAULT_MEA);
1570 "dirent count %llu stripe count %u MDT count %d\n",
1571 ma->ma_attr.la_dirent_count, stripe_count,
1572 atomic_read(&mdt->mdt_mds_mds_conns) + 1);
1573 if (ma->ma_attr.la_dirent_count != LU_DIRENT_COUNT_UNSET &&
1574 ma->ma_attr.la_dirent_count >
1575 mdt->mdt_restriper.mdr_dir_split_count &&
1576 !fid_is_root(mdt_object_fid(o)) &&
1577 mdt->mdt_enable_dir_auto_split &&
1578 !o->mot_restriping &&
1579 stripe_count < atomic_read(&mdt->mdt_mds_mds_conns) + 1 &&
1581 mdt_auto_split_add(info, o);
1582 } else if (S_ISLNK(la->la_mode) &&
1583 reqbody->mbo_valid & OBD_MD_LINKNAME) {
1584 buffer->lb_buf = ma->ma_lmm;
1585 /* eadatasize from client includes NULL-terminator, so
1586 * there is no need to read it
1589 if (reqbody->mbo_eadatasize > 0)
1590 buffer->lb_len = reqbody->mbo_eadatasize - 1;
1591 rc = mo_readlink(env, next, buffer);
1592 if (unlikely(rc <= 0)) {
1593 CERROR("%s: readlink failed for "DFID": rc = %d\n",
1594 mdt_obd_name(info->mti_mdt),
1595 PFID(mdt_object_fid(o)), rc);
1598 int print_limit = min_t(int, PAGE_SIZE - 128, rc);
1600 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
1602 repbody->mbo_valid |= OBD_MD_LINKNAME;
1603 /* we need to report back size with NULL-terminator
1604 * because client expects that
1606 repbody->mbo_eadatasize = rc + 1;
1607 if (repbody->mbo_eadatasize != reqbody->mbo_eadatasize)
1608 CDEBUG(D_INODE, "%s: Read shorter symlink %d on "
1609 DFID ", expected %d\n",
1610 mdt_obd_name(info->mti_mdt),
1611 rc, PFID(mdt_object_fid(o)),
1612 reqbody->mbo_eadatasize - 1);
1613 /* NULL terminate */
1614 ((char *)ma->ma_lmm)[rc] = 0;
1616 /* If the total CDEBUG() size is larger than a page, it
1617 * will print a warning to the console, avoid this by
1618 * printing just the last part of the symlink.
1620 CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
1621 print_limit < rc ? "..." : "", print_limit,
1622 (char *)ma->ma_lmm + rc - print_limit, rc);
1627 if (reqbody->mbo_valid & OBD_MD_FLMODEASIZE) {
1628 repbody->mbo_max_mdsize = info->mti_mdt->mdt_max_mdsize;
1629 repbody->mbo_valid |= OBD_MD_FLMODEASIZE;
1630 CDEBUG(D_INODE, "changing the max MD size to %u\n",
1631 repbody->mbo_max_mdsize);
1634 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
1635 if ((exp_connect_flags(req->rq_export) & OBD_CONNECT_ACL) &&
1636 (reqbody->mbo_valid & OBD_MD_FLACL)) {
1637 struct lu_nodemap *nodemap = nodemap_get_from_exp(exp);
1639 if (IS_ERR(nodemap))
1640 RETURN(PTR_ERR(nodemap));
1642 rc = mdt_pack_acl2body(info, repbody, o, nodemap);
1643 nodemap_putref(nodemap);
1649 mdt_counter_incr(req, LPROC_MDT_GETATTR,
1650 ktime_us_delta(ktime_get(), kstart));
1655 static int mdt_getattr(struct tgt_session_info *tsi)
1657 struct mdt_thread_info *info = tsi2mdt_info(tsi);
1658 struct mdt_object *obj = info->mti_object;
1659 struct req_capsule *pill = info->mti_pill;
1660 struct mdt_body *reqbody;
1661 struct mdt_body *repbody;
1666 if (unlikely(info->mti_object == NULL))
1669 reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
1671 LASSERT(lu_object_exists(&obj->mot_obj));
1673 /* Special case for Data-on-MDT files to get data version */
1674 if (unlikely(reqbody->mbo_valid & OBD_MD_FLDATAVERSION)) {
1675 rc = mdt_data_version_get(tsi);
1679 /* Unlike intent case where we need to pre-fill out buffers early on
1680 * in intent policy for ldlm reasons, here we can have a much better
1681 * guess at EA size by just reading it from disk.
1682 * Exceptions are readdir and (missing) directory striping
1684 if (reqbody->mbo_valid & OBD_MD_LINKNAME) { /* Readlink */
1685 /* No easy way to know how long is the symlink, but it cannot
1686 * be more than PATH_MAX, so we allocate +1
1689 /* A special case for fs ROOT: getattr there might fetch
1690 * default EA for entire fs, not just for this dir!
1692 } else if (lu_fid_eq(mdt_object_fid(obj),
1693 &info->mti_mdt->mdt_md_root_fid) &&
1694 (reqbody->mbo_valid & OBD_MD_FLDIREA) &&
1695 (lustre_msg_get_opc(mdt_info_req(info)->rq_reqmsg) ==
1697 /* Should the default strping be bigger, mdt_fix_reply
1700 rc = DEF_REP_MD_SIZE;
1702 /* Read the actual EA size from disk */
1703 rc = mdt_attr_get_eabuf_size(info, obj);
1707 GOTO(out, rc = err_serious(rc));
1709 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, rc);
1711 /* Set ACL reply buffer size as LUSTRE_POSIX_ACL_MAX_SIZE_OLD
1712 * by default. If the target object has more ACL entries, then
1713 * enlarge the buffer when necessary.
1715 req_capsule_set_size(pill, &RMF_ACL, RCL_SERVER,
1716 LUSTRE_POSIX_ACL_MAX_SIZE_OLD);
1717 mdt_preset_encctx_size(info);
1719 rc = req_capsule_server_pack(pill);
1720 if (unlikely(rc != 0))
1721 GOTO(out, rc = err_serious(rc));
1723 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1724 LASSERT(repbody != NULL);
1725 repbody->mbo_eadatasize = 0;
1726 repbody->mbo_aclsize = 0;
1728 rc = mdt_check_ucred(info);
1730 GOTO(out_shrink, rc);
1732 info->mti_cross_ref = !!(reqbody->mbo_valid & OBD_MD_FLCROSSREF);
1734 rc = mdt_init_ucred(info, reqbody);
1736 GOTO(out_shrink, rc);
1738 rc = mdt_getattr_internal(info, obj, 0);
1740 GOTO(out_ucred, rc);
1742 rc = mdt_pack_encctx_in_reply(info, obj);
1745 mdt_exit_ucred(info);
1747 mdt_client_compatibility(info);
1748 rc2 = mdt_fix_reply(info);
1752 mdt_thread_info_fini(info);
1757 * Handler of layout intent RPC requiring the layout modification
1759 * \param[in] info thread environment
1760 * \param[in] obj object
1761 * \param[out] lhc object ldlm lock handle
1762 * \param[in] layout layout change descriptor
1764 * \retval 0 on success
1765 * \retval < 0 error code
1767 int mdt_layout_change(struct mdt_thread_info *info, struct mdt_object *obj,
1768 struct mdt_lock_handle *lhc,
1769 struct md_layout_change *layout)
1775 if (!mdt_object_exists(obj))
1778 if (!S_ISREG(lu_object_attr(&obj->mot_obj)))
1781 rc = mo_permission(info->mti_env, NULL, mdt_object_child(obj), NULL,
1786 rc = mdt_check_resent_lock(info, obj, lhc);
1792 __u64 lockpart = MDS_INODELOCK_LAYOUT;
1794 /* take layout lock to prepare layout change */
1795 if (layout->mlc_opc == MD_LAYOUT_WRITE)
1796 lockpart |= MDS_INODELOCK_UPDATE;
1798 rc = mdt_object_lock(info, obj, lhc, lockpart, LCK_EX);
1802 CFS_FAIL_TIMEOUT(OBD_FAIL_MDS_LL_PCCRO, cfs_fail_val);
1805 mutex_lock(&obj->mot_som_mutex);
1806 rc = mo_layout_change(info->mti_env, mdt_object_child(obj), layout);
1807 mutex_unlock(&obj->mot_som_mutex);
1810 mdt_object_unlock(info, obj, lhc, 1);
1816 * Exchange MOF_LOV_CREATED flags between two objects after a
1817 * layout swap. No assumption is made on whether o1 or o2 have
1818 * created objects or not.
1820 * \param[in,out] o1 First swap layout object
1821 * \param[in,out] o2 Second swap layout object
1823 static void mdt_swap_lov_flag(struct mdt_object *o1, struct mdt_object *o2)
1825 unsigned int o1_lov_created = o1->mot_lov_created;
1827 mutex_lock(&o1->mot_lov_mutex);
1828 mutex_lock(&o2->mot_lov_mutex);
1830 o1->mot_lov_created = o2->mot_lov_created;
1831 o2->mot_lov_created = o1_lov_created;
1833 mutex_unlock(&o2->mot_lov_mutex);
1834 mutex_unlock(&o1->mot_lov_mutex);
1837 static int mdt_swap_layouts(struct tgt_session_info *tsi)
1839 struct mdt_thread_info *info;
1840 struct ptlrpc_request *req = tgt_ses_req(tsi);
1841 struct obd_export *exp = req->rq_export;
1842 struct mdt_object *o1, *o2, *o;
1843 struct mdt_lock_handle *lh1, *lh2;
1844 struct mdc_swap_layouts *msl;
1849 /* client does not support layout lock, so layout swaping
1851 * FIXME: there is a problem for old clients which don't support
1852 * layout lock yet. If those clients have already opened the file
1853 * they won't be notified at all so that old layout may still be
1854 * used to do IO. This can be fixed after file release is landed by
1855 * doing exclusive open and taking full EX ibits lock. - Jinshan
1857 if (!exp_connect_layout(exp))
1858 RETURN(-EOPNOTSUPP);
1860 info = tsi2mdt_info(tsi);
1861 if (unlikely(info->mti_object == NULL))
1864 if (info->mti_dlm_req != NULL)
1865 ldlm_request_cancel(req, info->mti_dlm_req, 0, LATF_SKIP);
1867 o1 = info->mti_object;
1868 o = o2 = mdt_object_find(info->mti_env, info->mti_mdt,
1869 &info->mti_body->mbo_fid2);
1871 GOTO(out, rc = PTR_ERR(o));
1873 if (mdt_object_remote(o) || !mdt_object_exists(o)) /* remote object */
1874 GOTO(put, rc = -ENOENT);
1876 rc = lu_fid_cmp(&info->mti_body->mbo_fid1, &info->mti_body->mbo_fid2);
1877 if (unlikely(rc == 0)) /* same file, you kidding me? no-op. */
1883 /* permission check. Make sure the calling process having permission
1884 * to write both files.
1886 rc = mo_permission(info->mti_env, NULL, mdt_object_child(o1), NULL,
1891 rc = mo_permission(info->mti_env, NULL, mdt_object_child(o2), NULL,
1896 msl = req_capsule_client_get(info->mti_pill, &RMF_SWAP_LAYOUTS);
1898 GOTO(put, rc = -EPROTO);
1900 lh1 = &info->mti_lh[MDT_LH_NEW];
1901 lh2 = &info->mti_lh[MDT_LH_OLD];
1902 rc = mdt_object_lock(info, o1, lh1, MDS_INODELOCK_LAYOUT |
1903 MDS_INODELOCK_XATTR, LCK_EX);
1907 rc = mdt_object_lock(info, o2, lh2, MDS_INODELOCK_LAYOUT |
1908 MDS_INODELOCK_XATTR, LCK_EX);
1912 rc = mo_swap_layouts(info->mti_env, mdt_object_child(o1),
1913 mdt_object_child(o2), 0, 0, msl->msl_flags);
1917 mdt_swap_lov_flag(o1, o2);
1920 mdt_object_unlock(info, o2, lh2, rc);
1922 mdt_object_unlock(info, o1, lh1, rc);
1924 mdt_object_put(info->mti_env, o);
1926 mdt_thread_info_fini(info);
1930 static int mdt_raw_lookup(struct mdt_thread_info *info,
1931 struct mdt_object *parent,
1932 const struct lu_name *lname)
1934 struct lu_fid *fid = &info->mti_tmp_fid1;
1935 struct mdt_body *repbody;
1936 bool is_dotdot = false;
1937 bool is_old_parent_stripe = false;
1938 bool is_new_parent_checked = false;
1943 LASSERT(!info->mti_cross_ref);
1944 /* Always allow to lookup ".." */
1945 if (lname->ln_namelen == 2 &&
1946 lname->ln_name[0] == '.' && lname->ln_name[1] == '.') {
1947 info->mti_spec.sp_permitted = 1;
1949 if (mdt_is_dir_stripe(info, parent) == 1)
1950 is_old_parent_stripe = true;
1953 mdt_object_get(info->mti_env, parent);
1955 /* Only got the fid of this obj by name */
1957 rc = mdo_lookup(info->mti_env, mdt_object_child(parent), lname, fid,
1959 mdt_object_put(info->mti_env, parent);
1963 /* getattr_name("..") should return master object FID for striped dir */
1964 if (is_dotdot && (is_old_parent_stripe || !is_new_parent_checked)) {
1965 parent = mdt_object_find(info->mti_env, info->mti_mdt, fid);
1967 RETURN(PTR_ERR(parent));
1969 /* old client getattr_name("..") with stripe FID */
1970 if (unlikely(is_old_parent_stripe)) {
1971 is_old_parent_stripe = false;
1975 /* ".." may be a stripe */
1976 if (unlikely(mdt_is_dir_stripe(info, parent) == 1)) {
1977 is_new_parent_checked = true;
1981 mdt_object_put(info->mti_env, parent);
1984 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1985 repbody->mbo_fid1 = *fid;
1986 repbody->mbo_valid = OBD_MD_FLID;
1992 * Find name matching hash
1994 * We search \a child LinkEA for a name whose hash matches \a lname
1995 * (it contains an encoded hash).
1997 * \param info mdt thread info
1998 * \param lname encoded hash to find
1999 * \param parent parent object
2000 * \param child object to search with LinkEA
2002 * \retval 1 match found
2003 * \retval 0 no match found
2004 * \retval -ev negative errno upon error
2006 int find_name_matching_hash(struct mdt_thread_info *info, struct lu_name *lname,
2007 struct mdt_object *parent, struct mdt_object *child)
2009 /* Here, lname is an encoded hash of on-disk name, and
2010 * client is doing access without encryption key.
2011 * So we need to get LinkEA, check parent fid is correct and
2012 * compare name hash with the one in the request.
2014 struct lu_buf *buf = &info->mti_big_buf;
2015 struct lu_name name;
2017 struct linkea_data ldata = { NULL };
2018 struct link_ea_header *leh;
2019 struct link_ea_entry *lee;
2020 struct lu_buf link = { 0 };
2022 int reclen, count, rc;
2025 if (lname->ln_namelen < LL_CRYPTO_BLOCK_SIZE)
2028 buf = lu_buf_check_and_alloc(buf, PATH_MAX);
2033 rc = mdt_links_read(info, child, &ldata);
2037 hash = kmalloc(lname->ln_namelen, GFP_NOFS);
2040 rc = critical_decode(lname->ln_name, lname->ln_namelen, hash);
2043 lee = (struct link_ea_entry *)(leh + 1);
2044 for (count = 0; count < leh->leh_reccount; count++) {
2045 linkea_entry_unpack(lee, &reclen, &name, &pfid);
2046 if (!parent || lu_fid_eq(&pfid, mdt_object_fid(parent))) {
2047 lu_buf_check_and_alloc(&link, name.ln_namelen);
2049 GOTO(out_match, rc = -ENOMEM);
2050 rc = critical_decode(name.ln_name, name.ln_namelen,
2053 if (memcmp(LLCRYPT_EXTRACT_DIGEST(link.lb_buf, rc),
2054 hash, LL_CRYPTO_BLOCK_SIZE) == 0) {
2059 lee = (struct link_ea_entry *) ((char *)lee + reclen);
2061 if (count == leh->leh_reccount)
2074 * UPDATE lock should be taken against parent, and be released before exit;
2075 * child_bits lock should be taken against child, and be returned back:
2076 * (1)normal request should release the child lock;
2077 * (2)intent request will grant the lock to client.
2079 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
2080 struct mdt_lock_handle *lhc,
2081 enum mds_ibits_locks child_bits,
2082 struct ldlm_reply *ldlm_rep)
2084 struct ptlrpc_request *req = mdt_info_req(info);
2085 struct mdt_body *reqbody = NULL;
2086 struct mdt_object *parent = info->mti_object;
2087 struct mdt_object *child = NULL;
2088 struct lu_fid *child_fid = &info->mti_tmp_fid1;
2089 struct lu_name *lname = NULL;
2090 struct mdt_lock_handle *lhp = NULL;
2091 struct ldlm_lock *lock;
2092 struct req_capsule *pill = info->mti_pill;
2093 bool fscrypt_md = false;
2094 enum mds_ibits_locks try_bits = MDS_INODELOCK_NONE;
2101 is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
2102 LASSERT(ergo(is_resent,
2103 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
2108 if (info->mti_mdt->mdt_enable_dir_auto_split)
2109 ma_need |= MA_DIRENT_CNT;
2111 if (CFS_FAIL_TIMEOUT(OBD_FAIL_MDS_PAUSE_GETATTR, cfs_fail_val))
2112 req->rq_pause_after_reply = 1;
2114 if (info->mti_cross_ref) {
2115 /* Only getattr on the child. Parent is on another node. */
2116 mdt_set_disposition(info, ldlm_rep,
2117 DISP_LOOKUP_EXECD | DISP_LOOKUP_POS);
2119 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", ldlm_rep = %p\n",
2120 PFID(mdt_object_fid(child)), ldlm_rep);
2122 rc = mdt_check_resent_lock(info, child, lhc);
2125 } else if (rc > 0) {
2127 * Object's name entry is on another MDS, it will
2128 * request PERM lock only because LOOKUP lock is owned
2129 * by the MDS where name entry resides.
2131 * TODO: it should try layout lock too. - Jinshan
2133 child_bits &= ~(MDS_INODELOCK_LOOKUP |
2134 MDS_INODELOCK_LAYOUT);
2135 child_bits |= MDS_INODELOCK_PERM;
2136 rc = mdt_object_lock(info, child, lhc, child_bits,
2142 /* Finally, we can get attr for child. */
2143 if (!mdt_object_exists(child)) {
2144 LU_OBJECT_DEBUG(D_INFO, info->mti_env,
2146 "remote object doesn't exist.");
2147 mdt_object_unlock(info, child, lhc, 1);
2151 rc = mdt_getattr_internal(info, child, ma_need);
2152 if (unlikely(rc != 0)) {
2153 mdt_object_unlock(info, child, lhc, 1);
2157 rc = mdt_pack_secctx_in_reply(info, child);
2159 mdt_object_unlock(info, child, lhc, 1);
2163 rc = mdt_pack_encctx_in_reply(info, child);
2165 mdt_object_unlock(info, child, lhc, 1);
2169 lname = &info->mti_name;
2170 mdt_name_unpack(pill, &RMF_NAME, lname, MNF_FIX_ANON);
2172 if (info->mti_body->mbo_valid & OBD_MD_NAMEHASH) {
2173 reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
2174 if (unlikely(reqbody == NULL))
2175 RETURN(err_serious(-EPROTO));
2177 *child_fid = reqbody->mbo_fid2;
2178 if (unlikely(!fid_is_sane(child_fid)))
2179 RETURN(err_serious(-EINVAL));
2181 if (lu_fid_eq(mdt_object_fid(parent), child_fid)) {
2182 mdt_object_get(info->mti_env, parent);
2185 child = mdt_object_find(info->mti_env, info->mti_mdt,
2188 RETURN(PTR_ERR(child));
2191 CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", ldlm_rep = %p\n",
2192 PFID(mdt_object_fid(parent)),
2193 PFID(&reqbody->mbo_fid2), ldlm_rep);
2194 } else if (lu_name_is_valid(lname)) {
2195 if (mdt_object_remote(parent)) {
2196 CERROR("%s: parent "DFID" is on remote target\n",
2197 mdt_obd_name(info->mti_mdt),
2198 PFID(mdt_object_fid(parent)));
2202 CDEBUG(D_INODE, "getattr with lock for "DFID"/"DNAME", ldlm_rep = %p\n",
2203 PFID(mdt_object_fid(parent)),
2204 encode_fn_luname(lname), ldlm_rep);
2206 if (parent->mot_obj.lo_header->loh_attr & LOHA_FSCRYPT_MD ||
2207 (fid_is_root(mdt_object_fid(parent)) &&
2208 lname->ln_namelen == strlen(dot_fscrypt_name) &&
2209 strncmp(lname->ln_name, dot_fscrypt_name,
2210 lname->ln_namelen) == 0))
2213 reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
2214 if (unlikely(reqbody == NULL))
2215 RETURN(err_serious(-EPROTO));
2217 *child_fid = reqbody->mbo_fid2;
2218 if (unlikely(!fid_is_sane(child_fid)))
2219 RETURN(err_serious(-EINVAL));
2221 if (lu_fid_eq(mdt_object_fid(parent), child_fid)) {
2222 mdt_object_get(info->mti_env, parent);
2225 child = mdt_object_find(info->mti_env, info->mti_mdt,
2228 RETURN(PTR_ERR(child));
2231 if (mdt_object_remote(child)) {
2232 CERROR("%s: child "DFID" is on remote target\n",
2233 mdt_obd_name(info->mti_mdt),
2234 PFID(mdt_object_fid(child)));
2235 GOTO(out_child, rc = -EPROTO);
2238 /* don't fetch LOOKUP lock if it's remote object */
2239 rc = mdt_is_remote_object(info, parent, child);
2241 GOTO(out_child, rc);
2243 child_bits &= ~MDS_INODELOCK_LOOKUP;
2245 CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", ldlm_rep = %p\n",
2246 PFID(mdt_object_fid(parent)),
2247 PFID(&reqbody->mbo_fid2), ldlm_rep);
2250 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
2252 if (unlikely(!mdt_object_exists(parent)) &&
2253 !(info->mti_body->mbo_valid & OBD_MD_NAMEHASH) &&
2254 lu_name_is_valid(lname)) {
2255 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
2257 "Parent doesn't exist!");
2258 GOTO(out_child, rc = -ESTALE);
2261 if (!child && is_resent) {
2262 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
2264 /* Lock is pinned by ldlm_handle_enqueue0() as it is
2265 * a resend case, however, it could be already destroyed
2266 * due to client eviction or a raced cancel RPC.
2268 LDLM_DEBUG_NOLOCK("Invalid lock handle %#llx",
2269 lhc->mlh_reg_lh.cookie);
2272 fid_extract_from_res_name(child_fid,
2273 &lock->l_resource->lr_name);
2274 ldlm_lock_put(lock);
2275 child = mdt_object_find(info->mti_env, info->mti_mdt,
2278 RETURN(PTR_ERR(child));
2279 } else if (!(info->mti_body->mbo_valid & OBD_MD_NAMEHASH) &&
2280 lu_name_is_valid(lname)) {
2281 if (info->mti_body->mbo_valid == OBD_MD_FLID) {
2282 rc = mdt_raw_lookup(info, parent, lname);
2287 /* step 1: lock parent only if parent is a directory */
2288 if (S_ISDIR(lu_object_attr(&parent->mot_obj))) {
2289 lhp = &info->mti_lh[MDT_LH_PARENT];
2290 rc = mdt_parent_lock(info, parent, lhp, lname, LCK_PR);
2291 if (unlikely(rc != 0))
2295 /* step 2: lookup child's fid by name */
2296 fid_zero(child_fid);
2297 rc = mdo_lookup(info->mti_env, mdt_object_child(parent), lname,
2298 child_fid, &info->mti_spec);
2300 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
2303 GOTO(unlock_parent, rc);
2305 child = mdt_object_find(info->mti_env, info->mti_mdt,
2307 if (unlikely(IS_ERR(child)))
2308 GOTO(unlock_parent, rc = PTR_ERR(child));
2311 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
2313 /* step 3: lock child regardless if it is local or remote. */
2316 if (info->mti_body->mbo_valid & OBD_MD_NAMEHASH) {
2317 /* Here, lname is an encoded hash of on-disk name, and
2318 * client is doing access without encryption key.
2319 * So we need to compare name hash with the one in the request.
2321 if (!find_name_matching_hash(info, lname, parent,
2323 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
2324 mdt_clear_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
2325 GOTO(out_child, rc = -ENOENT);
2329 CFS_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout * 2);
2330 if (!mdt_object_exists(child)) {
2331 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
2333 "Object doesn't exist!");
2334 GOTO(out_child, rc = -ENOENT);
2337 rc = mdt_check_resent_lock(info, child, lhc);
2339 GOTO(out_child, rc);
2340 } else if (rc > 0) {
2341 bool hardlink_check = lhp && info->mti_batch_env &&
2342 S_ISREG(lu_object_attr(&child->mot_obj));
2344 if ((!(child_bits & MDS_INODELOCK_UPDATE) &&
2345 !mdt_object_remote(child)) || hardlink_check) {
2346 struct md_attr *ma = &info->mti_attr;
2349 ma->ma_need = MA_INODE;
2350 rc = mdt_attr_get_complex(info, child, ma);
2351 if (unlikely(rc != 0))
2352 GOTO(out_child, rc);
2355 * There is a possible deadlock between link() and batch
2356 * stat-ahead on hardlinks.
2358 * - Take parent DLM lock: mdt_parent_lock PW
2359 * - Take object DLM lock: mdt_object_lock EX
2361 * - Already hold the DLM lock on one link of the
2362 * object which will return to the client in previous
2363 * stat operation on MDT.
2364 * - Take parent DLM lock: mdt_parent_lock PR
2367 * The link operation, which is holding the parent PW
2368 * lock, is waiting for the batch stat-ahead to release
2369 * the DLM lock on one link of the file.
2370 * The batch statahead, which is holding the DLM lock on
2371 * the file in the previous sub stat operation in the
2372 * batch RPC, currently is trying to acquire the PR DLM
2373 * lock on the parent.
2374 * To avoid this deadlock, we simply cancel the
2375 * statahead on the hardlink in a batch RPC.
2376 * Without this fix, it failed lustre-rsync-test/test_6.
2378 if (hardlink_check && (ma->ma_valid & MA_INODE) &&
2379 (ma->ma_attr.la_valid & LA_NLINK) &&
2380 ma->ma_attr.la_nlink > 1)
2381 GOTO(out_child, rc = -ECANCELED);
2383 /* If the file has not been changed for some time, we
2384 * return not only a LOOKUP lock, but also an UPDATE
2385 * lock and this might save us RPC on later STAT. For
2386 * directories, it also let negative dentry cache start
2387 * working for this dir.
2389 if (ma->ma_valid & MA_INODE &&
2390 ma->ma_attr.la_valid & LA_CTIME &&
2391 info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
2392 ma->ma_attr.la_ctime < ktime_get_real_seconds())
2393 child_bits |= MDS_INODELOCK_UPDATE;
2396 /* layout lock must be granted in a best-effort way
2399 LASSERT(!(child_bits & MDS_INODELOCK_LAYOUT));
2400 if (S_ISREG(lu_object_attr(&child->mot_obj)) &&
2401 !mdt_object_remote(child) && ldlm_rep != NULL) {
2402 if (!CFS_FAIL_CHECK(OBD_FAIL_MDS_NO_LL_GETATTR) &&
2403 exp_connect_layout(info->mti_exp)) {
2404 /* try to grant layout lock for regular file. */
2405 try_bits = MDS_INODELOCK_LAYOUT;
2407 /* Acquire DOM lock in advance for data-on-mdt file */
2408 if (child != parent)
2409 try_bits |= MDS_INODELOCK_DOM;
2413 * To avoid possible deadlock between batched statahead RPC
2414 * and rename()/migrate() operation, it should use trylock to
2415 * obtain the DLM PR ibits lock for file attributes in a
2416 * batched statahead RPC. A failed trylock means that other
2417 * users maybe modify the directory simultaneously as in current
2418 * Lustre design the server only grants read lock to a client.
2420 * When a trylock failed, the MDT reports the conflict with
2421 * error code -EBUSY, and stops statahead immediately.
2423 if (info->mti_batch_env) {
2425 * This is a sub stat-ahead request in a batched RPC.
2426 * However, the @child is a remote object, we just
2427 * return -EREMOTE here to forbid stat-ahead on it.
2429 if (mdt_object_remote(child))
2430 GOTO(out_child, rc = -EREMOTE);
2432 try_bits |= child_bits;
2436 if (try_bits != MDS_INODELOCK_NONE) {
2437 /* try layout lock, it may fail to be granted due to
2438 * contention at LOOKUP or UPDATE
2440 rc = mdt_object_lock_try(info, child, lhc, &child_bits,
2442 if (child_bits & MDS_INODELOCK_LAYOUT)
2445 /* Do not enqueue the UPDATE lock from MDT(cross-MDT),
2446 * client will enqueue the lock to the remote MDT
2448 if (mdt_object_remote(child))
2449 rc = mdt_object_lookup_lock(info, NULL, child,
2452 rc = mdt_object_lock(info, child, lhc,
2453 child_bits, LCK_PR);
2455 if (unlikely(rc != 0))
2456 GOTO(out_child, rc);
2457 if (info->mti_batch_env && child_bits == 0) {
2459 mdt_object_unlock(info, child, lhc, 1);
2460 GOTO(out_child, rc = -EBUSY);
2465 child->mot_obj.lo_header->loh_attr |= LOHA_FSCRYPT_MD;
2467 /* finally, we can get attr for child. */
2468 rc = mdt_getattr_internal(info, child, ma_need);
2469 if (unlikely(rc != 0)) {
2471 mdt_object_unlock(info, child, lhc, 1);
2472 GOTO(out_child, rc);
2475 rc = mdt_pack_secctx_in_reply(info, child);
2478 mdt_object_unlock(info, child, lhc, 1);
2479 GOTO(out_child, rc);
2482 rc = mdt_pack_encctx_in_reply(info, child);
2485 mdt_object_unlock(info, child, lhc, 1);
2486 GOTO(out_child, rc);
2489 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
2491 /* Debugging code. */
2492 LDLM_DEBUG(lock, "Returning lock to client");
2493 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
2494 &lock->l_resource->lr_name),
2495 "Lock res_id: "DLDLMRES", fid: "DFID"\n",
2496 PLDLMRES(lock->l_resource),
2497 PFID(mdt_object_fid(child)));
2499 if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_ENQ_RESEND))) {
2500 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
2501 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_ENQ_RESEND,
2503 req->rq_arrival_time.tv_sec +
2505 /* Put the lock to the waiting list and force the cancel */
2506 (lock->l_flags |= LDLM_FL_AST_SENT);
2510 * check whether the object is remote as we can't
2511 * really check attributes w/o explicit check for
2512 * object's existence first.
2514 if (!mdt_object_remote(child) && child != parent &&
2515 S_ISREG(lu_object_attr(&child->mot_obj))) {
2516 mdt_object_put(info->mti_env, child);
2517 rc = mdt_pack_size2body(info, child_fid,
2519 if (rc != 0 && child_bits & MDS_INODELOCK_DOM) {
2520 /* DOM lock was taken in advance but this is
2521 * not DoM file. Drop the lock.
2523 lock_res_and_lock(lock);
2524 ldlm_inodebits_drop(lock, MDS_INODELOCK_DOM);
2525 unlock_res_and_lock(lock);
2527 ldlm_lock_put(lock);
2528 GOTO(unlock_parent, rc = 0);
2530 ldlm_lock_put(lock);
2536 mdt_object_put(info->mti_env, child);
2539 mdt_object_unlock(info, parent, lhp, 1);
2540 if (rc == -ENOENT) {
2541 /* return -ENOKEY instead of -ENOENT to encryption-unaware
2542 * client if trying to access an encrypted file
2544 int rc2 = mdt_check_enc(info, parent);
2552 /* normal handler: should release the child lock */
2553 static int mdt_getattr_name(struct tgt_session_info *tsi)
2555 struct mdt_thread_info *info = tsi2mdt_info(tsi);
2556 struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
2557 struct mdt_body *reqbody;
2558 struct mdt_body *repbody;
2563 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
2564 LASSERT(reqbody != NULL);
2565 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
2566 LASSERT(repbody != NULL);
2568 info->mti_cross_ref = !!(reqbody->mbo_valid & OBD_MD_FLCROSSREF);
2569 repbody->mbo_eadatasize = 0;
2570 repbody->mbo_aclsize = 0;
2572 rc = mdt_init_ucred(info, reqbody);
2574 GOTO(out_shrink, rc);
2576 rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
2577 if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
2578 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
2579 lhc->mlh_reg_lh.cookie = 0;
2581 mdt_exit_ucred(info);
2584 mdt_client_compatibility(info);
2585 rc2 = mdt_fix_reply(info);
2588 mdt_thread_info_fini(info);
2592 static int mdt_rmfid_unlink(struct mdt_thread_info *info,
2593 const struct lu_fid *pfid,
2594 const struct lu_name *name,
2595 struct mdt_object *obj, s64 ctime)
2597 struct lu_fid *child_fid = &info->mti_tmp_fid1;
2598 struct ldlm_enqueue_info *einfo = &info->mti_einfo;
2599 struct mdt_device *mdt = info->mti_mdt;
2600 struct md_attr *ma = &info->mti_attr;
2601 struct mdt_lock_handle *parent_lh;
2602 struct mdt_lock_handle *child_lh;
2603 struct mdt_object *pobj;
2608 pobj = mdt_object_find(info->mti_env, mdt, pfid);
2610 GOTO(out, rc = PTR_ERR(pobj));
2612 parent_lh = &info->mti_lh[MDT_LH_PARENT];
2613 rc = mdt_parent_lock(info, pobj, parent_lh, name, LCK_PW);
2615 GOTO(put_parent, rc);
2617 rc = mdo_lookup(info->mti_env, mdt_object_child(pobj),
2618 name, child_fid, &info->mti_spec);
2620 GOTO(unlock_parent, rc);
2622 if (!lu_fid_eq(child_fid, mdt_object_fid(obj)))
2623 GOTO(unlock_parent, rc = -EREMCHG);
2625 child_lh = &info->mti_lh[MDT_LH_CHILD];
2626 rc = mdt_object_stripes_lock(info, pobj, obj, child_lh, einfo,
2627 MDS_INODELOCK_LOOKUP |
2628 MDS_INODELOCK_UPDATE, LCK_EX);
2630 GOTO(unlock_parent, rc);
2632 if (atomic_read(&obj->mot_open_count)) {
2633 CDEBUG(D_OTHER, "object "DFID" open, skip\n",
2634 PFID(mdt_object_fid(obj)));
2635 GOTO(unlock_child, rc = -EBUSY);
2639 ma->ma_valid = MA_INODE;
2640 ma->ma_attr.la_valid = LA_CTIME;
2641 ma->ma_attr.la_ctime = ctime;
2643 mutex_lock(&obj->mot_lov_mutex);
2645 rc = mdo_unlink(info->mti_env, mdt_object_child(pobj),
2646 mdt_object_child(obj), name, ma, 0);
2648 mutex_unlock(&obj->mot_lov_mutex);
2651 mdt_object_stripes_unlock(info, obj, child_lh, einfo, 1);
2653 mdt_object_unlock(info, pobj, parent_lh, 1);
2655 mdt_object_put(info->mti_env, pobj);
2660 static int mdt_rmfid_check_permission(struct mdt_thread_info *info,
2661 struct mdt_object *obj)
2663 struct lu_ucred *uc = lu_ucred(info->mti_env);
2664 struct md_attr *ma = &info->mti_attr;
2665 struct lu_attr *la = &ma->ma_attr;
2670 ma->ma_need = MA_INODE;
2671 rc = mo_attr_get(info->mti_env, mdt_object_child(obj), ma);
2675 if (la->la_flags & LUSTRE_IMMUTABLE_FL)
2678 /* we want rbac roles to have precedence over any other
2679 * permission or capability checks
2681 if (!uc->uc_rbac_byfid_ops)
2683 if (cap_raised(uc->uc_cap, CAP_DAC_OVERRIDE))
2685 if (uc->uc_fsuid == la->la_uid) {
2686 if ((la->la_mode & 0200) == 0)
2688 } else if (uc->uc_fsgid == la->la_gid) {
2689 if ((la->la_mode & 0020) == 0)
2691 } else if ((la->la_mode & 0002) == 0) {
2699 static int mdt_rmfid_one(struct mdt_thread_info *info, struct lu_fid *fid,
2702 struct mdt_device *mdt = info->mti_mdt;
2703 struct mdt_object *obj = NULL;
2704 struct linkea_data ldata = { NULL };
2705 struct lu_buf *buf = &info->mti_big_buf;
2706 struct lu_name *name = &info->mti_name;
2707 struct lu_fid *pfid = &info->mti_tmp_fid1;
2708 struct link_ea_header *leh;
2709 struct link_ea_entry *lee;
2710 int reclen, count, rc = 0;
2714 if (!fid_is_sane(fid))
2715 GOTO(out, rc = -EINVAL);
2717 if (!fid_is_namespace_visible(fid))
2718 GOTO(out, rc = -EINVAL);
2720 obj = mdt_object_find(info->mti_env, mdt, fid);
2722 GOTO(out, rc = PTR_ERR(obj));
2724 if (mdt_object_remote(obj))
2725 GOTO(out, rc = -EREMOTE);
2726 if (!mdt_object_exists(obj) || lu_object_is_dying(&obj->mot_header))
2727 GOTO(out, rc = -ENOENT);
2729 rc = mdt_rmfid_check_permission(info, obj);
2734 buf = lu_buf_check_and_alloc(buf, PATH_MAX);
2736 GOTO(out, rc = -ENOMEM);
2739 rc = mdt_links_read(info, obj, &ldata);
2744 lee = (struct link_ea_entry *)(leh + 1);
2745 for (count = 0; count < leh->leh_reccount; count++) {
2746 /* remove every hardlink */
2747 linkea_entry_unpack(lee, &reclen, name, pfid);
2748 lee = (struct link_ea_entry *) ((char *)lee + reclen);
2749 rc = mdt_rmfid_unlink(info, pfid, name, obj, ctime);
2755 if (obj && !IS_ERR(obj))
2756 mdt_object_put(info->mti_env, obj);
2757 if (info->mti_big_buf.lb_buf)
2758 lu_buf_free(&info->mti_big_buf);
2763 static int mdt_rmfid(struct tgt_session_info *tsi)
2765 struct mdt_thread_info *mti = tsi2mdt_info(tsi);
2766 struct mdt_body *reqbody;
2767 struct lu_fid *fids, *rfids;
2774 reqbody = req_capsule_client_get(tsi->tsi_pill, &RMF_MDT_BODY);
2775 if (reqbody == NULL)
2777 bufsize = req_capsule_get_size(tsi->tsi_pill, &RMF_FID_ARRAY,
2779 nr = bufsize / sizeof(struct lu_fid);
2780 if (nr * sizeof(struct lu_fid) != bufsize)
2782 req_capsule_set_size(tsi->tsi_pill, &RMF_RCS,
2783 RCL_SERVER, nr * sizeof(__u32));
2784 req_capsule_set_size(tsi->tsi_pill, &RMF_FID_ARRAY,
2785 RCL_SERVER, nr * sizeof(struct lu_fid));
2786 rc = req_capsule_server_pack(tsi->tsi_pill);
2788 GOTO(out, rc = err_serious(rc));
2789 fids = req_capsule_client_get(tsi->tsi_pill, &RMF_FID_ARRAY);
2792 rcs = req_capsule_server_get(tsi->tsi_pill, &RMF_RCS);
2794 rfids = req_capsule_server_get(tsi->tsi_pill, &RMF_FID_ARRAY);
2797 mdt_init_ucred(mti, reqbody);
2798 for (i = 0; i < nr; i++) {
2800 rcs[i] = mdt_rmfid_one(mti, fids + i, reqbody->mbo_ctime);
2802 mdt_exit_ucred(mti);
2808 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2809 void *karg, void __user *uarg);
2811 int mdt_io_set_info(struct tgt_session_info *tsi)
2813 struct ptlrpc_request *req = tgt_ses_req(tsi);
2814 struct ost_body *body = NULL, *repbody;
2815 bool is_grant_shrink;
2821 key = req_capsule_client_get(tsi->tsi_pill, &RMF_SETINFO_KEY);
2823 DEBUG_REQ(D_HA, req, "no set_info key");
2824 RETURN(err_serious(-EFAULT));
2826 keylen = req_capsule_get_size(tsi->tsi_pill, &RMF_SETINFO_KEY,
2829 is_grant_shrink = KEY_IS(KEY_GRANT_SHRINK);
2830 if (is_grant_shrink)
2831 /* In this case the value is actually an RMF_OST_BODY, so we
2832 * transmutate the type of this PTLRPC
2834 req_capsule_extend(tsi->tsi_pill, &RQF_OST_SET_GRANT_INFO);
2836 rc = req_capsule_server_pack(tsi->tsi_pill);
2840 if (is_grant_shrink) {
2841 body = req_capsule_client_get(tsi->tsi_pill, &RMF_OST_BODY);
2843 repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_OST_BODY);
2846 /** handle grant shrink, similar to a read request */
2847 tgt_grant_prepare_read(tsi->tsi_env, tsi->tsi_exp,
2850 CERROR("%s: Unsupported key %s\n",
2851 tgt_name(tsi->tsi_tgt), (char *)key);
2859 static int mdt_set_info(struct tgt_session_info *tsi)
2861 struct ptlrpc_request *req = tgt_ses_req(tsi);
2864 int keylen, vallen, rc = 0;
2868 key = req_capsule_client_get(tsi->tsi_pill, &RMF_SETINFO_KEY);
2870 DEBUG_REQ(D_HA, req, "no set_info key");
2871 RETURN(err_serious(-EFAULT));
2874 keylen = req_capsule_get_size(tsi->tsi_pill, &RMF_SETINFO_KEY,
2877 val = req_capsule_client_get(tsi->tsi_pill, &RMF_SETINFO_VAL);
2879 DEBUG_REQ(D_HA, req, "no set_info val");
2880 RETURN(err_serious(-EFAULT));
2883 vallen = req_capsule_get_size(tsi->tsi_pill, &RMF_SETINFO_VAL,
2886 /* Swab any part of val you need to here */
2887 if (KEY_IS(KEY_READ_ONLY)) {
2888 /* If client wants rw, make sure nodemap does not enforce ro. */
2889 if (!*(__u32 *)val) {
2890 struct lu_nodemap *nm = NULL;
2891 bool readonly = false;
2894 nm = nodemap_get_from_exp(req->rq_export);
2896 if (!IS_ERR_OR_NULL(nm)) {
2897 readonly = nm->nmf_readonly_mount;
2901 if (unlikely(readonly))
2904 spin_lock(&req->rq_export->exp_lock);
2906 *exp_connect_flags_ptr(req->rq_export) |=
2909 *exp_connect_flags_ptr(req->rq_export) &=
2910 ~OBD_CONNECT_RDONLY;
2911 spin_unlock(&req->rq_export->exp_lock);
2912 } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
2913 struct changelog_setinfo *cs = val;
2915 if (vallen != sizeof(*cs)) {
2916 CERROR("%s: bad changelog_clear setinfo size %d\n",
2917 tgt_name(tsi->tsi_tgt), vallen);
2920 if (req_capsule_req_need_swab(&req->rq_pill)) {
2921 __swab64s(&cs->cs_recno);
2922 __swab32s(&cs->cs_id);
2925 if (!mdt_changelog_allow(tsi2mdt_info(tsi)))
2927 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, req->rq_export,
2929 } else if (KEY_IS(KEY_EVICT_BY_NID)) {
2931 obd_export_evict_by_nid(req->rq_export->exp_obd, val);
2938 static int mdt_readpage(struct tgt_session_info *tsi)
2940 struct mdt_thread_info *info = mdt_th_info(tsi->tsi_env);
2941 struct mdt_object *object = mdt_obj(tsi->tsi_corpus);
2942 struct lu_rdpg *rdpg = &info->mti_u.rdpg.mti_rdpg;
2943 const struct mdt_body *reqbody = tsi->tsi_mdt_body;
2944 struct mdt_body *repbody;
2950 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
2951 RETURN(err_serious(-ENOMEM));
2953 repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_MDT_BODY);
2954 if (repbody == NULL || reqbody == NULL)
2955 RETURN(err_serious(-EFAULT));
2958 * prepare @rdpg before calling lower layers and transfer itself. Here
2959 * reqbody->size contains offset of where to start to read and
2960 * reqbody->nlink contains number bytes to read.
2962 rdpg->rp_hash = reqbody->mbo_size;
2963 if (rdpg->rp_hash != reqbody->mbo_size) {
2964 CERROR("Invalid hash: %#llx != %#llx\n",
2965 rdpg->rp_hash, reqbody->mbo_size);
2969 rdpg->rp_attrs = reqbody->mbo_mode;
2970 if (exp_connect_flags(tsi->tsi_exp) & OBD_CONNECT_64BITHASH)
2971 rdpg->rp_attrs |= LUDA_64BITHASH;
2972 rdpg->rp_count = min_t(unsigned int, reqbody->mbo_nlink,
2973 exp_max_brw_size(tsi->tsi_exp));
2974 rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >>
2976 OBD_ALLOC_PTR_ARRAY_LARGE(rdpg->rp_pages, rdpg->rp_npages);
2977 if (rdpg->rp_pages == NULL)
2980 for (i = 0; i < rdpg->rp_npages; ++i) {
2981 rdpg->rp_pages[i] = alloc_page(GFP_NOFS);
2982 if (rdpg->rp_pages[i] == NULL)
2983 GOTO(free_rdpg, rc = -ENOMEM);
2986 /* call lower layers to fill allocated pages with directory data */
2987 rc = mo_readpage(tsi->tsi_env, mdt_object_child(object), rdpg);
2989 GOTO(free_rdpg, rc);
2991 /* send pages to client */
2992 rc = tgt_sendpage(tsi, rdpg, rc);
2997 for (i = 0; i < rdpg->rp_npages; i++)
2998 if (rdpg->rp_pages[i] != NULL)
2999 __free_page(rdpg->rp_pages[i]);
3000 OBD_FREE_PTR_ARRAY_LARGE(rdpg->rp_pages, rdpg->rp_npages);
3002 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
3008 static int mdt_fix_attr_ucred(struct mdt_thread_info *info, __u32 op)
3010 struct lu_ucred *uc = mdt_ucred_check(info);
3011 struct lu_attr *attr = &info->mti_attr.ma_attr;
3016 if (op != REINT_SETATTR) {
3017 if ((attr->la_valid & LA_UID) && (attr->la_uid != -1))
3018 attr->la_uid = uc->uc_fsuid;
3019 /* for S_ISGID, inherit gid from his parent, such work will be
3020 * done in cmm/mdd layer, here set all cases as uc->uc_fsgid.
3022 if ((attr->la_valid & LA_GID) && (attr->la_gid != -1))
3023 attr->la_gid = uc->uc_fsgid;
3029 static inline bool mdt_is_readonly_open(struct mdt_thread_info *info, __u32 op)
3031 return op == REINT_OPEN &&
3032 !(info->mti_spec.sp_cr_flags & (MDS_FMODE_WRITE | MDS_OPEN_CREAT));
3035 static void mdt_preset_secctx_size(struct mdt_thread_info *info)
3037 struct req_capsule *pill = info->mti_pill;
3039 if (req_capsule_has_field(pill, &RMF_FILE_SECCTX,
3041 req_capsule_has_field(pill, &RMF_FILE_SECCTX_NAME,
3043 if (req_capsule_get_size(pill, &RMF_FILE_SECCTX_NAME,
3045 /* pre-set size in server part with max size */
3046 req_capsule_set_size(pill, &RMF_FILE_SECCTX,
3048 req_capsule_ptlreq(pill) ?
3049 OBD_MAX_DEFAULT_EA_SIZE :
3052 req_capsule_set_size(pill, &RMF_FILE_SECCTX,
3057 int mdt_object_striped(struct mdt_thread_info *mti, struct mdt_object *obj)
3059 struct lu_device *bottom_dev;
3060 struct lu_object *bottom_obj;
3063 if (!S_ISDIR(obj->mot_header.loh_attr))
3066 /* getxattr from bottom obj to avoid reading in shard FIDs */
3067 bottom_dev = dt2lu_dev(mti->mti_mdt->mdt_bottom);
3068 bottom_obj = lu_object_find_slice(mti->mti_env, bottom_dev,
3069 mdt_object_fid(obj), NULL);
3070 if (IS_ERR(bottom_obj))
3071 return PTR_ERR(bottom_obj);
3073 rc = dt_xattr_get(mti->mti_env, lu2dt(bottom_obj), &LU_BUF_NULL,
3075 lu_object_put(mti->mti_env, bottom_obj);
3077 return (rc > 0) ? 1 : (rc == -ENODATA) ? 0 : rc;
3080 #define DIR_READ_ON_OPEN_PAGES 1
3082 static int mdt_dir_read_on_open(struct mdt_thread_info *info,
3083 struct lustre_handle *lhc)
3085 const struct lu_env *env = info->mti_env;
3086 struct lu_rdpg *rdpg = &info->mti_u.rdpg.mti_rdpg;
3087 struct req_capsule *pill = info->mti_pill;
3089 struct mdt_body *mbo;
3090 struct mdt_device *mdt = info->mti_mdt;
3091 struct mdt_object *o;
3092 struct ptlrpc_request *req = pill->rc_req;
3093 bool have_lock = false;
3094 struct lu_fid *fid; // dir fid
3098 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
3099 GOTO(out_err, rc = -ENOMEM);
3101 /* client don't want a reply */
3102 if (!req->rq_reqmsg->lm_repsize)
3105 if (lustre_handle_is_used(lhc)) {
3106 struct ldlm_lock *lock;
3108 lock = ldlm_handle2lock(lhc);
3110 have_lock = ldlm_has_update(lock);
3111 ldlm_lock_put(lock);
3115 GOTO(out_err, rc = 0);
3118 rdpg->rp_attrs = LUDA_FID | LUDA_TYPE;
3119 if (exp_connect_flags(info->mti_exp) & OBD_CONNECT_64BITHASH)
3120 rdpg->rp_attrs |= LUDA_64BITHASH;
3121 rdpg->rp_count = min_t(unsigned int, req->rq_reqmsg->lm_repsize,
3122 DIR_READ_ON_OPEN_PAGES << PAGE_SHIFT);
3123 rdpg->rp_npages = 0;
3125 rc = req_capsule_server_grow(pill, &RMF_NIOBUF_INLINE, rdpg->rp_count);
3127 /* failed to grow data buffer, just exit */
3128 GOTO(out_err, rc = -E2BIG);
3131 /* re-take MDT_BODY and NIOBUF_INLINE buffers after the buffer grow */
3132 mbo = req_capsule_server_get(pill, &RMF_MDT_BODY);
3133 fid = &mbo->mbo_fid1;
3134 if (!fid_is_sane(fid))
3135 GOTO(out_rnb, rc = -EINVAL);
3137 rdpg->rp_data = req_capsule_server_get(pill, &RMF_NIOBUF_INLINE);
3138 if (rdpg->rp_data == NULL)
3139 GOTO(out_rnb, rc = -EPROTO);
3141 o = mdt_object_find(info->mti_env, mdt, fid);
3143 GOTO(out_rnb, rc = PTR_ERR(o));
3145 if (!mdt_object_exists(o) ||
3146 mdt_object_remote(o) ||
3147 mdt_object_striped(info, o))
3148 GOTO(out_put, rc = -ENOENT);
3150 /* call lower layers to fill allocated pages with directory data */
3151 rc = mo_readpage(env, mdt_object_child(o), rdpg);
3153 mdt_object_put(env, o);
3157 req_capsule_shrink(pill, &RMF_NIOBUF_INLINE, 0, RCL_SERVER);
3160 CDEBUG(D_INFO, "read dir on open failed with rc = %d\n", rc);
3164 static int mdt_read_inline(struct mdt_thread_info *info,
3165 struct mdt_lock_handle *lhc)
3167 struct req_capsule *pill = info->mti_pill;
3168 struct md_attr *ma = &info->mti_attr;
3169 struct lu_attr *la = &ma->ma_attr;
3170 struct ptlrpc_request *req = pill->rc_req;
3174 if (!req_capsule_field_present(pill, &RMF_NIOBUF_INLINE, RCL_SERVER)) {
3175 /* There is no reply buffers for this field, this means that
3176 * client has no support for data in reply.
3180 /* client don't want a reply */
3181 if (!req->rq_reqmsg->lm_repsize)
3184 if (S_ISREG(la->la_mode))
3185 rc = mdt_dom_read_on_open(info, info->mti_mdt,
3187 else if (S_ISDIR(la->la_mode))
3188 rc = mdt_dir_read_on_open(info, &lhc->mlh_reg_lh);
3193 static int mdt_reint_internal(struct mdt_thread_info *info,
3194 struct mdt_lock_handle *lhc,
3197 struct req_capsule *pill = info->mti_pill;
3198 struct mdt_body *repbody;
3203 rc = mdt_reint_unpack(info, op);
3205 CERROR("Can't unpack reint, rc %d\n", rc);
3206 RETURN(err_serious(rc));
3210 /* check if the file system is set to readonly. O_RDONLY open
3211 * is still allowed even the file system is set to readonly mode
3213 if (mdt_rdonly(info->mti_exp) && !mdt_is_readonly_open(info, op))
3214 RETURN(err_serious(-EROFS));
3216 /* for replay (no_create) lmm is not needed, client has it already */
3217 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
3218 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
3221 /* llog cookies are always 0, the field is kept for compatibility */
3222 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
3223 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0);
3225 /* Set ACL reply buffer size as LUSTRE_POSIX_ACL_MAX_SIZE_OLD
3226 * by default. If the target object has more ACL entries, then
3227 * enlarge the buffer when necessary.
3229 if (req_capsule_has_field(pill, &RMF_ACL, RCL_SERVER))
3230 req_capsule_set_size(pill, &RMF_ACL, RCL_SERVER,
3231 LUSTRE_POSIX_ACL_MAX_SIZE_OLD);
3233 mdt_preset_secctx_size(info);
3234 mdt_preset_encctx_size(info);
3236 rc = req_capsule_server_pack(pill);
3238 CERROR("Can't pack response, rc %d\n", rc);
3239 RETURN(err_serious(rc));
3242 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
3243 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
3245 repbody->mbo_eadatasize = 0;
3246 repbody->mbo_aclsize = 0;
3249 CFS_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
3251 /* for replay no cookkie / lmm need, because client have this already */
3252 if (info->mti_spec.no_create)
3253 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
3254 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
3256 rc = mdt_init_ucred_reint(info);
3258 GOTO(out_shrink, rc);
3260 rc = mdt_fix_attr_ucred(info, op);
3262 GOTO(out_ucred, rc = err_serious(rc));
3264 rc = mdt_check_resent(info, mdt_reconstruct, lhc);
3266 GOTO(out_ucred, rc);
3267 } else if (rc == 1) {
3268 DEBUG_REQ(D_INODE, mdt_info_req(info), "resent opt");
3269 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
3270 GOTO(out_ucred, rc);
3272 rc = mdt_reint_rec(info, lhc);
3275 mdt_exit_ucred(info);
3277 mdt_client_compatibility(info);
3279 rc2 = mdt_fix_reply(info);
3284 * Data-on-MDT optimization - read data along with OPEN and return it
3285 * in reply when possible.
3287 if (rc == 0 && op == REINT_OPEN && !req_is_replay(pill->rc_req))
3288 rc = mdt_read_inline(info, lhc);
3293 static long mdt_reint_opcode(struct ptlrpc_request *req,
3294 const struct req_format **fmt)
3296 struct mdt_device *mdt;
3297 struct mdt_rec_reint *rec;
3300 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
3302 opc = rec->rr_opcode;
3303 DEBUG_REQ(D_INODE, req, "reint opt = %ld", opc);
3304 if (opc < REINT_MAX && fmt[opc] != NULL)
3305 req_capsule_extend(&req->rq_pill, fmt[opc]);
3307 mdt = mdt_exp2dev(req->rq_export);
3308 CERROR("%s: Unsupported opcode '%ld' from client '%s': rc = %d\n",
3309 req->rq_export->exp_obd->obd_name,
3310 opc, mdt->mdt_ldlm_client->cli_name, -EFAULT);
3311 opc = err_serious(-EFAULT);
3314 opc = err_serious(-EFAULT);
3319 static int mdt_reint(struct tgt_session_info *tsi)
3323 static const struct req_format *reint_fmts[REINT_MAX] = {
3324 [REINT_SETATTR] = &RQF_MDS_REINT_SETATTR,
3325 [REINT_CREATE] = &RQF_MDS_REINT_CREATE,
3326 [REINT_LINK] = &RQF_MDS_REINT_LINK,
3327 [REINT_UNLINK] = &RQF_MDS_REINT_UNLINK,
3328 [REINT_RENAME] = &RQF_MDS_REINT_RENAME,
3329 [REINT_OPEN] = &RQF_MDS_REINT_OPEN,
3330 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR,
3331 [REINT_RMENTRY] = &RQF_MDS_REINT_UNLINK,
3332 [REINT_MIGRATE] = &RQF_MDS_REINT_MIGRATE,
3333 [REINT_RESYNC] = &RQF_MDS_REINT_RESYNC,
3338 opc = mdt_reint_opcode(tgt_ses_req(tsi), reint_fmts);
3340 struct mdt_thread_info *info = tsi2mdt_info(tsi);
3342 * No lock possible here from client to pass it to reint code
3345 rc = mdt_reint_internal(info, NULL, opc);
3346 mdt_thread_info_fini(info);
3351 tsi->tsi_reply_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
3355 /* this should sync the whole device */
3356 int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
3358 struct dt_device *dt = mdt->mdt_bottom;
3363 rc = dt->dd_ops->dt_sync(env, dt);
3367 /* this should sync this object */
3368 static int mdt_object_sync(const struct lu_env *env, struct obd_export *exp,
3369 struct mdt_object *mo)
3375 if (!mdt_object_exists(mo)) {
3376 CWARN("%s: non existing object "DFID": rc = %d\n",
3377 exp->exp_obd->obd_name, PFID(mdt_object_fid(mo)),
3382 if (S_ISREG(lu_object_attr(&mo->mot_obj))) {
3383 struct lu_target *tgt = tgt_ses_info(env)->tsi_tgt;
3384 dt_obj_version_t version;
3386 version = dt_version_get(env, mdt_obj2dt(mo));
3387 if (version > tgt->lut_obd->obd_last_committed)
3388 rc = mo_object_sync(env, mdt_object_child(mo));
3390 rc = mo_object_sync(env, mdt_object_child(mo));
3396 static int mdt_sync(struct tgt_session_info *tsi)
3398 struct ptlrpc_request *req = tgt_ses_req(tsi);
3399 struct req_capsule *pill = tsi->tsi_pill;
3400 struct mdt_body *body;
3401 ktime_t kstart = ktime_get();
3406 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
3407 RETURN(err_serious(-ENOMEM));
3409 if (fid_seq(&tsi->tsi_mdt_body->mbo_fid1) == 0) {
3410 rc = mdt_device_sync(tsi->tsi_env, mdt_exp2dev(tsi->tsi_exp));
3412 struct mdt_thread_info *info = tsi2mdt_info(tsi);
3414 if (unlikely(info->mti_object == NULL))
3417 /* sync an object */
3418 rc = mdt_object_sync(tsi->tsi_env, tsi->tsi_exp,
3421 const struct lu_fid *fid;
3422 struct lu_attr *la = &info->mti_attr.ma_attr;
3424 info->mti_attr.ma_need = MA_INODE;
3425 info->mti_attr.ma_valid = 0;
3426 rc = mdt_attr_get_complex(info, info->mti_object,
3429 body = req_capsule_server_get(pill,
3431 fid = mdt_object_fid(info->mti_object);
3432 mdt_pack_attr2body(info, body, la, fid);
3435 mdt_thread_info_fini(info);
3438 mdt_counter_incr(req, LPROC_MDT_SYNC,
3439 ktime_us_delta(ktime_get(), kstart));
3444 static int mdt_data_sync(struct tgt_session_info *tsi)
3446 struct mdt_thread_info *info;
3447 struct mdt_device *mdt = mdt_exp2dev(tsi->tsi_exp);
3448 struct ost_body *body = tsi->tsi_ost_body;
3449 struct ost_body *repbody;
3450 struct mdt_object *mo = NULL;
3456 repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_OST_BODY);
3458 /* device sync is done via MDS_SYNC. NOOP if no fid is specified */
3459 if (fid_is_zero(&tsi->tsi_fid))
3462 mo = mdt_object_find(tsi->tsi_env, mdt, &tsi->tsi_fid);
3464 RETURN(PTR_ERR(mo));
3466 rc = mdt_object_sync(tsi->tsi_env, tsi->tsi_exp, mo);
3470 repbody->oa.o_oi = body->oa.o_oi;
3471 repbody->oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
3473 info = tsi2mdt_info(tsi);
3474 ma = &info->mti_attr;
3475 ma->ma_need = MA_INODE;
3477 rc = mdt_attr_get_complex(info, mo, ma);
3479 obdo_from_la(&repbody->oa, &ma->ma_attr, VALID_FLAGS);
3482 mdt_thread_info_fini(info);
3487 mdt_object_put(tsi->tsi_env, mo);
3491 /* To get default quotas ID needs to be 0, so
3492 * no reasons to swap this according to nodemap.
3494 static inline bool qmt_need_swap(__u32 cmd)
3496 if (cmd == LUSTRE_Q_GETDEFAULT || cmd == LUSTRE_Q_GETDEFAULT_POOL)
3503 * Handle quota control requests to consult current usage/limit, but also
3504 * to configure quota enforcement
3506 static int mdt_quotactl(struct tgt_session_info *tsi)
3508 struct obd_export *exp = tsi->tsi_exp;
3509 struct req_capsule *pill = tsi->tsi_pill;
3510 struct obd_quotactl *oqctl, *repoqc;
3511 struct mdt_device *mdt = mdt_exp2dev(exp);
3512 struct lu_device *qmt = mdt->mdt_qmt_dev;
3513 struct lu_nodemap *nodemap;
3514 char *buffer = NULL;
3519 oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
3521 RETURN(err_serious(-EPROTO));
3523 if (oqctl->qc_cmd == LUSTRE_Q_ITERQUOTA ||
3524 oqctl->qc_cmd == LUSTRE_Q_ITEROQUOTA)
3525 req_capsule_set_size(pill, &RMF_OBD_QUOTA_ITER, RCL_SERVER,
3526 LQUOTA_ITER_BUFLEN);
3528 req_capsule_set_size(pill, &RMF_OBD_QUOTA_ITER, RCL_SERVER, 0);
3530 rc = req_capsule_server_pack(pill);
3532 RETURN(err_serious(rc));
3534 nodemap = nodemap_get_from_exp(exp);
3535 if (IS_ERR(nodemap))
3536 RETURN(PTR_ERR(nodemap));
3538 switch (oqctl->qc_cmd) {
3539 /* master quotactl */
3542 case LUSTRE_Q_SETDEFAULT:
3543 case LUSTRE_Q_SETQUOTAPOOL:
3544 case LUSTRE_Q_SETINFOPOOL:
3545 case LUSTRE_Q_SETDEFAULT_POOL:
3546 case LUSTRE_Q_DELETEQID:
3547 case LUSTRE_Q_RESETQID:
3548 if (!nodemap_can_setquota(nodemap, oqctl->qc_type,
3550 GOTO(out_nodemap, rc = -EPERM);
3554 case LUSTRE_Q_GETDEFAULT:
3555 case LUSTRE_Q_GETQUOTAPOOL:
3556 case LUSTRE_Q_GETINFOPOOL:
3557 case LUSTRE_Q_GETDEFAULT_POOL:
3558 case LUSTRE_Q_ITERQUOTA:
3560 GOTO(out_nodemap, rc = -EOPNOTSUPP);
3561 /* slave quotactl */
3565 case LUSTRE_Q_ITEROQUOTA:
3569 CERROR("%s: unsupported quotactl command %d: rc = %d\n",
3570 mdt_obd_name(mdt), oqctl->qc_cmd, rc);
3571 GOTO(out_nodemap, rc);
3575 switch (oqctl->qc_type) {
3577 id = nodemap_map_id(nodemap, NODEMAP_UID,
3578 NODEMAP_CLIENT_TO_FS, id);
3581 id = nodemap_map_id(nodemap, NODEMAP_GID,
3582 NODEMAP_CLIENT_TO_FS, id);
3585 id = nodemap_map_id(nodemap, NODEMAP_PROJID,
3586 NODEMAP_CLIENT_TO_FS, id);
3589 GOTO(out_nodemap, rc = -EOPNOTSUPP);
3591 repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
3593 GOTO(out_nodemap, rc = err_serious(-EFAULT));
3595 if (oqctl->qc_cmd == LUSTRE_Q_ITERQUOTA ||
3596 oqctl->qc_cmd == LUSTRE_Q_ITEROQUOTA) {
3597 buffer = req_capsule_server_get(pill, &RMF_OBD_QUOTA_ITER);
3599 GOTO(out_nodemap, rc = err_serious(-EFAULT));
3602 if (oqctl->qc_cmd == Q_SETINFO || oqctl->qc_cmd == Q_SETQUOTA)
3603 barrier_exit(tsi->tsi_tgt->lut_bottom);
3605 if (oqctl->qc_id != id && qmt_need_swap(oqctl->qc_cmd))
3606 swap(oqctl->qc_id, id);
3608 if (oqctl->qc_cmd == Q_SETINFO || oqctl->qc_cmd == Q_SETQUOTA) {
3609 if (unlikely(!barrier_entry(tsi->tsi_tgt->lut_bottom)))
3610 GOTO(out_nodemap, -EINPROGRESS);
3613 switch (oqctl->qc_cmd) {
3615 case LUSTRE_Q_ITERQUOTA:
3616 rc = lquota_iter_change_qid(nodemap, oqctl);
3618 GOTO(out_nodemap, rc);
3624 case LUSTRE_Q_SETDEFAULT:
3625 case LUSTRE_Q_GETDEFAULT:
3626 case LUSTRE_Q_SETQUOTAPOOL:
3627 case LUSTRE_Q_GETQUOTAPOOL:
3628 case LUSTRE_Q_SETINFOPOOL:
3629 case LUSTRE_Q_GETINFOPOOL:
3630 case LUSTRE_Q_SETDEFAULT_POOL:
3631 case LUSTRE_Q_GETDEFAULT_POOL:
3632 case LUSTRE_Q_DELETEQID:
3633 case LUSTRE_Q_RESETQID:
3634 /* forward quotactl request to QMT */
3635 rc = qmt_hdls.qmth_quotactl(tsi->tsi_env, qmt, nodemap, oqctl,
3639 case LUSTRE_Q_ITEROQUOTA:
3640 rc = lquota_iter_change_qid(nodemap, oqctl);
3642 GOTO(out_nodemap, rc);
3646 /* slave quotactl */
3647 rc = lquotactl_slv(tsi->tsi_env, tsi->tsi_tgt->lut_bottom,
3648 nodemap, oqctl, buffer);
3652 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
3653 GOTO(out_nodemap, rc = -EFAULT);
3656 if (oqctl->qc_id != id && qmt_need_swap(oqctl->qc_cmd))
3657 swap(oqctl->qc_id, id);
3659 QCTL_COPY_NO_PNAME(repoqc, oqctl);
3663 nodemap_putref(nodemap);
3668 /** clone llog ctxt from child (mdd)
3669 * This allows remote llog (replicator) access.
3670 * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
3671 * context was originally set up, or we can handle them directly.
3672 * I choose the latter, but that means I need any llog
3673 * contexts set up by child to be accessable by the mdt. So we clone the
3674 * context into our context list here.
3676 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
3679 struct md_device *next = mdt->mdt_child;
3680 struct llog_ctxt *ctxt;
3683 if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
3686 rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
3687 if (rc || ctxt == NULL)
3690 rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
3692 CERROR("Can't set mdt ctxt %d\n", rc);
3697 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
3698 struct mdt_device *mdt, int idx)
3700 struct llog_ctxt *ctxt;
3702 ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
3705 /* Put once for the get we just did, and once for the clone */
3706 llog_ctxt_put(ctxt);
3707 llog_ctxt_put(ctxt);
3712 * sec context handlers
3714 static int mdt_sec_ctx_handle(struct tgt_session_info *tsi)
3716 CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
3722 * quota request handlers
3724 static int mdt_quota_dqacq(struct tgt_session_info *tsi)
3726 struct mdt_device *mdt = mdt_exp2dev(tsi->tsi_exp);
3727 struct lu_device *qmt = mdt->mdt_qmt_dev;
3733 RETURN(err_serious(-EOPNOTSUPP));
3735 rc = qmt_hdls.qmth_dqacq(tsi->tsi_env, qmt, tgt_ses_req(tsi));
3739 struct mdt_object *mdt_object_new(const struct lu_env *env,
3740 struct mdt_device *d,
3741 const struct lu_fid *f)
3743 struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
3744 struct lu_object *o;
3745 struct mdt_object *m;
3749 CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
3750 o = lu_object_find(env, &d->mdt_lu_dev, f, &conf);
3751 if (unlikely(IS_ERR(o)))
3752 m = (struct mdt_object *)o;
3758 struct mdt_object *mdt_object_find(const struct lu_env *env,
3759 struct mdt_device *d,
3760 const struct lu_fid *f)
3762 struct lu_object *o;
3763 struct mdt_object *m;
3766 /* mdt_orphan_open() gets local ROOT */
3767 if (!fid_is_namespace_visible(f) && !fid_is_local_file(f)) {
3768 CERROR("%s: MDT object FID "DFID" is corrupt: rc = %d\n",
3769 mdt_obd_name(d), PFID(f), -EINVAL);
3770 RETURN(ERR_PTR(-EINVAL));
3773 CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
3774 o = lu_object_find(env, &d->mdt_lu_dev, f, NULL);
3775 if (unlikely(IS_ERR(o)))
3776 m = (struct mdt_object *)o;
3784 * Asyncronous commit for mdt device.
3786 * Pass asynchonous commit call down the MDS stack.
3788 * \param env environment
3789 * \param mdt the mdt device
3791 static void mdt_device_commit_async(const struct lu_env *env,
3792 struct mdt_device *mdt)
3794 struct dt_device *dt = mdt->mdt_bottom;
3799 rc = dt->dd_ops->dt_commit_async(env, dt);
3800 if (unlikely(rc != 0))
3801 CWARN("%s: async commit start failed: rc = %d\n",
3802 mdt_obd_name(mdt), rc);
3803 atomic_inc(&mdt->mdt_async_commit_count);
3808 * Mark the lock as "synchonous".
3810 * Mark the lock to deffer transaction commit to the unlock time.
3812 * \param lock the lock to mark as "synchonous"
3814 * \see mdt_is_lock_sync
3815 * \see mdt_save_lock
3817 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
3819 lock->l_ast_data = (void *)1;
3823 * Check whehter the lock "synchonous" or not.
3825 * \param lock the lock to check
3826 * \retval 1 the lock is "synchonous"
3827 * \retval 0 the lock isn't "synchronous"
3829 * \see mdt_set_lock_sync
3830 * \see mdt_save_lock
3832 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
3834 return lock->l_ast_data != NULL;
3838 * Blocking AST for mdt locks.
3840 * Starts transaction commit if in case of COS lock conflict or
3841 * deffers such a commit to the mdt_save_lock.
3843 * \param lock the lock which blocks a request or cancelling lock
3844 * \param desc unused
3845 * \param data unused
3846 * \param flag indicates whether this cancelling or blocking callback
3848 * \see ldlm_blocking_ast_nocheck
3850 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
3851 void *data, int flag)
3853 struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
3854 struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
3855 struct ldlm_cb_set_arg *arg = data;
3856 bool commit_async = false;
3860 if (flag == LDLM_CB_CANCELING)
3863 lock_res_and_lock(lock);
3864 if (lock->l_blocking_ast != mdt_blocking_ast) {
3865 unlock_res_and_lock(lock);
3869 /* A blocking ast may be sent from ldlm_lock_decref_internal
3870 * when the last reference to a local lock was released and
3871 * during blocking event from ldlm_work_bl_ast_lock().
3872 * The 'data' parameter is l_ast_data in the first case and
3873 * callback arguments in the second one. Distinguish them by that.
3875 if (data && data != lock->l_ast_data && arg->bl_desc) {
3876 if (lock->l_req_mode & (LCK_COS | LCK_TXN))
3877 commit_async = true;
3878 else if ((lock->l_req_mode & (LCK_PW | LCK_EX)) &&
3879 ((mdt_cos_is_enabled(mdt) &&
3880 !arg->bl_desc->bl_same_client) ||
3881 (mdt_slc_is_enabled(mdt) &&
3882 arg->bl_desc->bl_txn_dependent)))
3883 mdt_set_lock_sync(lock);
3886 rc = ldlm_blocking_ast_nocheck(lock);
3891 rc = lu_env_init(&env, LCT_LOCAL);
3892 if (unlikely(rc != 0))
3893 CWARN("%s: lu_env initialization failed, cannot start asynchronous commit: rc = %d\n",
3896 mdt_device_commit_async(&env, mdt);
3903 * Blocking AST for cross-MDT lock
3905 * Discard lock from uncommitted_slc_locks and cancel it.
3907 * \param lock the lock which blocks a request or cancelling lock
3908 * \param desc unused
3909 * \param data unused
3910 * \param flag indicates whether this cancelling or blocking callback
3911 * \retval 0 on success
3912 * \retval negative number on error
3914 int mdt_remote_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
3915 void *data, int flag)
3922 case LDLM_CB_BLOCKING: {
3923 struct lustre_handle lockh;
3925 ldlm_lock2handle(lock, &lockh);
3926 rc = ldlm_cli_cancel(&lockh,
3927 (lock->l_flags & LDLM_FL_ATOMIC_CB) ? 0 : LCF_ASYNC);
3929 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
3934 case LDLM_CB_CANCELING: {
3935 struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
3936 struct mdt_device *mdt =
3937 mdt_dev(obd->obd_lu_dev->ld_site->ls_top_dev);
3939 LDLM_DEBUG(lock, "Revoke remote lock");
3941 /* discard slc lock here so that it can be cleaned anytime,
3942 * especially for cleanup_resource()
3944 tgt_discard_slc_lock(&mdt->mdt_lut, lock);
3946 /* once we cache lock, l_ast_data is set to mdt_object */
3947 if (lock->l_ast_data != NULL) {
3948 struct mdt_object *mo = lock->l_ast_data;
3951 rc = lu_env_init(&env, LCT_MD_THREAD);
3952 if (unlikely(rc != 0)) {
3953 CWARN("%s: lu_env initialization failed, object %p "DFID" is leaked!: rc = %d\n",
3955 PFID(mdt_object_fid(mo)), rc);
3959 if (lock->l_policy_data.l_inodebits.bits &
3960 (MDS_INODELOCK_XATTR | MDS_INODELOCK_UPDATE)) {
3961 rc = mo_invalidate(&env, mdt_object_child(mo));
3962 mo->mot_cache_attr = 0;
3964 mdt_object_put(&env, mo);
3976 int mdt_check_resent_lock(struct mdt_thread_info *info,
3977 struct mdt_object *mo,
3978 struct mdt_lock_handle *lhc)
3980 /* the lock might already be gotten in ldlm_handle_enqueue() */
3981 if (unlikely(lustre_handle_is_used(&lhc->mlh_reg_lh))) {
3982 struct ptlrpc_request *req = mdt_info_req(info);
3983 struct ldlm_lock *lock;
3985 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
3986 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
3988 /* Lock is pinned by ldlm_handle_enqueue0() as it is
3989 * a resend case, however, it could be already destroyed
3990 * due to client eviction or a raced cancel RPC.
3992 LDLM_DEBUG_NOLOCK("Invalid lock handle %#llx",
3993 lhc->mlh_reg_lh.cookie);
3997 if (!fid_res_name_eq(mdt_object_fid(mo),
3998 &lock->l_resource->lr_name)) {
3999 CWARN("%s: Although resent, but still not get child lock:"
4001 info->mti_exp->exp_obd->obd_name,
4002 PFID(mdt_object_fid(mo)));
4003 ldlm_lock_put(lock);
4006 ldlm_lock_put(lock);
4012 static void mdt_remote_object_lock_created_cb(struct ldlm_lock *lock)
4014 mdt_object_get(NULL, lock->l_ast_data);
4017 static int mdt_remote_object_lock_try(struct mdt_thread_info *mti,
4018 struct mdt_object *obj,
4019 struct lustre_handle *lh,
4020 enum ldlm_mode mode,
4021 union ldlm_policy_data *policy,
4022 struct ldlm_res_id *res_id,
4025 struct ldlm_enqueue_info *einfo = &mti->mti_remote_einfo;
4028 LASSERT(mdt_object_remote(obj));
4030 memset(einfo, 0, sizeof(*einfo));
4031 einfo->ei_type = LDLM_IBITS;
4032 einfo->ei_mode = mode;
4033 einfo->ei_cb_bl = mdt_remote_blocking_ast;
4034 einfo->ei_cb_cp = ldlm_completion_ast;
4035 einfo->ei_enq_slave = 0;
4036 einfo->ei_res_id = res_id;
4037 einfo->ei_req_slot = 1;
4040 * if we cache lock, couple lock with mdt_object, so that object
4041 * can be easily found in lock ASTs.
4043 einfo->ei_cbdata = obj;
4044 einfo->ei_cb_created = mdt_remote_object_lock_created_cb;
4047 rc = mo_object_lock(mti->mti_env, mdt_object_child(obj), lh, einfo,
4054 /* other components like LFSCK can use lockless access
4055 * and populate cache, so we better invalidate it
4057 if (policy->l_inodebits.bits &
4058 (MDS_INODELOCK_UPDATE | MDS_INODELOCK_XATTR))
4059 mo_invalidate(mti->mti_env, mdt_object_child(obj));
4065 * Helper function to take PDO and hash lock.
4067 * if \a pdo_lock is false, don't take PDO lock, this is case in rename.
4069 int mdt_object_pdo_lock(struct mdt_thread_info *info, struct mdt_object *obj,
4070 struct mdt_lock_handle *lh, const struct lu_name *name,
4071 enum ldlm_mode mode, bool pdo_lock)
4073 struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
4074 union ldlm_policy_data *policy = &info->mti_policy;
4075 struct ldlm_res_id *res_id = &info->mti_res_id;
4077 * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it is never going to
4078 * be sent to client and we do not want it slowed down due to possible
4081 __u64 dlmflags = LDLM_FL_ATOMIC_CB;
4082 __u64 *cookie = NULL;
4087 /* check for exists after object is locked */
4088 if (!mdt_object_exists(obj))
4089 /* Non-existent object shouldn't have PDO lock */
4092 /* Non-dir object shouldn't have PDO lock */
4093 if (!S_ISDIR(lu_object_attr(&obj->mot_obj)))
4096 policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
4097 policy->l_inodebits.try_bits = MDS_INODELOCK_NONE;
4098 policy->l_inodebits.li_gid = 0;
4099 policy->l_inodebits.li_initiator_id = mdt_node_id(info->mti_mdt);
4100 fid_build_reg_res_name(mdt_object_fid(obj), res_id);
4102 cookie = &info->mti_exp->exp_handle.h_cookie;
4104 mdt_lock_pdo_init(lh, mode, name);
4105 mdt_lock_pdo_mode(info, obj, lh);
4106 if (lh->mlh_pdo_mode != LCK_NL) {
4108 if (mdt_object_remote(obj)) {
4109 rc = mdt_remote_object_lock_try(info, obj,
4110 &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
4111 policy, res_id, false);
4112 lh->mlh_pdo_remote = 1;
4114 rc = mdt_fid_lock(info->mti_env, ns,
4115 &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
4116 policy, res_id, dlmflags, cookie);
4119 mdt_object_unlock(info, obj, lh, 1);
4123 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
4126 if (mdt_object_remote(obj))
4127 rc = mdt_remote_object_lock_try(info, obj, &lh->mlh_rreg_lh,
4128 lh->mlh_rreg_mode, policy, res_id, false);
4130 rc = mdt_fid_lock(info->mti_env, ns, &lh->mlh_reg_lh,
4131 lh->mlh_reg_mode, policy, res_id, dlmflags, cookie);
4133 mdt_object_unlock(info, obj, lh, 1);
4134 else if (CFS_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK) &&
4135 lh->mlh_pdo_hash != 0 &&
4136 (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX))
4137 CFS_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
4142 int mdt_object_lock_internal(struct mdt_thread_info *info,
4143 struct mdt_object *obj, const struct lu_fid *fid,
4144 struct mdt_lock_handle *lh,
4145 enum mds_ibits_locks *ibits,
4146 enum mds_ibits_locks trybits,
4149 union ldlm_policy_data *policy = &info->mti_policy;
4150 struct ldlm_res_id *res_id = &info->mti_res_id;
4151 struct lustre_handle *handle;
4154 /* DoM lock shouln't be combined with other ibits when it is
4155 * taken locally due to potential conflict with GROUP lock.
4156 * Consider trylock either for DoM bit or for others
4158 LASSERT(!(*ibits & MDS_INODELOCK_DOM && *ibits & ~MDS_INODELOCK_DOM));
4160 policy->l_inodebits.bits = *ibits;
4161 policy->l_inodebits.try_bits = trybits;
4162 policy->l_inodebits.li_gid = lh->mlh_gid;
4163 policy->l_inodebits.li_initiator_id = mdt_node_id(info->mti_mdt);
4164 fid_build_reg_res_name(fid, res_id);
4166 if (obj && mdt_object_remote(obj)) {
4167 handle = &lh->mlh_rreg_lh;
4168 LASSERT(!lustre_handle_is_used(handle));
4169 LASSERT(lh->mlh_rreg_mode != LCK_MODE_MIN);
4170 LASSERT(lh->mlh_type != MDT_NUL_LOCK);
4171 rc = mdt_remote_object_lock_try(info, obj, handle,
4172 lh->mlh_rreg_mode, policy,
4175 struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
4177 * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if
4178 * it is going to be sent to client. If it is -
4179 * mdt_intent_policy() path will fix it up and turn FL_LOCAL
4182 __u64 dlmflags = LDLM_FL_ATOMIC_CB | LDLM_FL_LOCAL_ONLY;
4183 __u64 *cookie = NULL;
4185 handle = &lh->mlh_reg_lh;
4186 LASSERT(!lustre_handle_is_used(handle));
4187 LASSERT(lh->mlh_reg_mode != LCK_MODE_MIN);
4188 LASSERT(lh->mlh_type != MDT_NUL_LOCK);
4190 /* Lease lock are granted with LDLM_FL_CANCEL_ON_BLOCK */
4191 if (lh->mlh_type == MDT_REG_LOCK &&
4192 lh->mlh_reg_mode == LCK_EX && *ibits == MDS_INODELOCK_OPEN)
4193 dlmflags |= LDLM_FL_CANCEL_ON_BLOCK;
4197 cookie = &info->mti_exp->exp_handle.h_cookie;
4199 rc = mdt_fid_lock(info->mti_env, ns, handle, lh->mlh_reg_mode,
4200 policy, res_id, dlmflags, cookie);
4202 mdt_object_unlock(info, obj, lh, 1);
4206 struct ldlm_lock *lock;
4208 /* Return successfully acquired bits to a caller */
4209 lock = ldlm_handle2lock(handle);
4211 *ibits = lock->l_policy_data.l_inodebits.bits;
4212 ldlm_lock_put(lock);
4219 * MDT object locking functions:
4220 * mdt_object_lock(): lock object, this is used in most places, and normally
4221 * lock ibits doesn't contain LOOKUP, unless the caller knows it's not
4223 * mdt_object_check_lock(): lock object with LOOKUP and other ibits, it needs
4224 * to check whether parent is on remote MDT, if so, take LOOKUP on parent
4225 * MDT separately, and then lock other ibits on child object.
4226 * mdt_parent_lock(): take parent UPDATE lock with specific mode, if parent is
4227 * local, take PDO lock by name hash, otherwise take regular lock.
4228 * mdt_object_stripes_lock(): lock object which should be local, and if it's a
4229 * striped directory, lock its stripes, this is called in operations which
4230 * modify both object and stripes.
4231 * mdt_object_lock_try(): lock object with trybits, the trybits contains
4232 * optional inode lock bits that can be granted. This is called by
4233 * getattr/open to fetch more inode lock bits to client, and is also called
4234 * by dir migration to lock link parent in non-block mode to avoid
4241 * this is used to lock object in most places, and normally lock ibits doesn't
4242 * contain LOOKUP, unless the caller knows it's not remote object.
4244 * \param info struct mdt_thread_info
4246 * \param lh lock handle
4247 * \param ibits MDS inode lock bits
4248 * \param mode lock mode
4250 * \retval 0 on success, -ev on error.
4252 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *obj,
4253 struct mdt_lock_handle *lh, enum mds_ibits_locks ibits,
4254 enum ldlm_mode mode)
4259 mdt_lock_reg_init(lh, mode);
4260 rc = mdt_object_lock_internal(info, obj, mdt_object_fid(obj), lh,
4266 * lock object with LOOKUP and other ibits
4268 * it will check whether parent and child are on different MDTs, if so, take
4269 * LOOKUP lock on parent MDT, and lock other ibits on child MDT, otherwise lock
4270 * all ibits on child MDT. Note, parent and child shouldn't be both on remote
4271 * MDTs, in which case specific lock function should be used, and it's in
4272 * rename and migrate only.
4274 * \param info struct mdt_thread_info
4275 * \param parent parent object
4276 * \param child child object
4277 * \param lh lock handle
4278 * \param ibits MDS inode lock bits
4279 * \param mode lock mode
4281 * \retval 0 on success, -ev on error.
4283 int mdt_object_check_lock(struct mdt_thread_info *info,
4284 struct mdt_object *parent, struct mdt_object *child,
4285 struct mdt_lock_handle *lh,
4286 enum mds_ibits_locks ibits, enum ldlm_mode mode)
4291 /* if LOOKUP ibit is not set, use mdt_object_lock() */
4292 LASSERT(ibits & MDS_INODELOCK_LOOKUP);
4293 /* if only LOOKUP ibit is needed, use mdt_object_lookup_lock() */
4294 LASSERT(ibits != MDS_INODELOCK_LOOKUP);
4296 /* @parent and @child shouldn't both be on remote MDTs */
4297 LASSERT(!(mdt_object_remote(parent) && mdt_object_remote(child)));
4299 mdt_lock_reg_init(lh, mode);
4300 if (mdt_object_remote(parent) ^ mdt_object_remote(child)) {
4301 enum mds_ibits_locks lookup_ibits = MDS_INODELOCK_LOOKUP;
4303 rc = mdt_object_lock_internal(info, parent,
4304 mdt_object_fid(child), lh,
4305 &lookup_ibits, 0, false);
4309 ibits &= ~MDS_INODELOCK_LOOKUP;
4312 rc = mdt_object_lock_internal(info, child, mdt_object_fid(child), lh,
4314 if (rc && !(ibits & MDS_INODELOCK_LOOKUP))
4315 mdt_object_unlock(info, NULL, lh, 1);
4321 * take parent UPDATE lock
4323 * if parent is local or mode is LCK_PW, take PDO lock, otherwise take regular
4326 * \param info struct mdt_thread_info
4327 * \param obj parent object
4328 * \param lh lock handle
4329 * \param lname child name
4330 * \param mode lock mode
4332 * \retval 0 on success, -ev on error.
4334 int mdt_parent_lock(struct mdt_thread_info *info, struct mdt_object *obj,
4335 struct mdt_lock_handle *lh, const struct lu_name *lname,
4336 enum ldlm_mode mode)
4341 LASSERT(obj && lname);
4342 LASSERT(mode == LCK_PW || mode == LCK_PR);
4343 if (mdt_object_remote(obj) && mode == LCK_PR) {
4344 enum mds_ibits_locks ibits = MDS_INODELOCK_UPDATE;
4346 mdt_lock_reg_init(lh, mode);
4347 rc = mdt_object_lock_internal(info, obj, mdt_object_fid(obj),
4348 lh, &ibits, 0, false);
4350 rc = mdt_object_pdo_lock(info, obj, lh, lname, mode, true);
4356 * lock object with trybits
4358 * the trybits contains optional inode lock bits that can be granted. This is
4359 * called by getattr/open to fetch more inode lock bits to client, and is also
4360 * called by dir migration to lock link parent in non-block mode to avoid
4363 * \param info struct mdt_thread_info
4365 * \param lh lock handle
4366 * \param ibits MDS inode lock bits
4367 * \param trybits optional inode lock bits
4368 * \param mode lock mode
4370 * \retval 0 on success, -ev on error.
4372 int mdt_object_lock_try(struct mdt_thread_info *info, struct mdt_object *obj,
4373 struct mdt_lock_handle *lh, enum mds_ibits_locks *ibits,
4374 enum mds_ibits_locks trybits, enum ldlm_mode mode)
4376 bool trylock_only = *ibits == 0;
4380 LASSERT(!(*ibits & trybits));
4381 mdt_lock_reg_init(lh, mode);
4382 rc = mdt_object_lock_internal(info, obj, mdt_object_fid(obj), lh, ibits,
4384 if (rc && trylock_only) { /* clear error for try ibits lock only */
4385 LASSERT(*ibits == 0);
4392 * Helper function to take \a obj LOOKUP lock.
4394 * Both \a pobj and \a obj may be located on remote MDTs.
4396 int mdt_object_lookup_lock(struct mdt_thread_info *info,
4397 struct mdt_object *pobj, struct mdt_object *obj,
4398 struct mdt_lock_handle *lh, enum ldlm_mode mode)
4400 enum mds_ibits_locks ibits = MDS_INODELOCK_LOOKUP;
4404 /* if @parent is NULL, it's on local MDT, and @child is remote,
4405 * this is case in getattr/unlink/open by name.
4407 LASSERT(ergo(!pobj, mdt_object_remote(obj)));
4408 mdt_lock_reg_init(lh, mode);
4409 rc = mdt_object_lock_internal(info, pobj, mdt_object_fid(obj), lh,
4415 * Save a lock within request object.
4417 * Keep the lock referenced until whether client ACK or transaction
4418 * commit happens or release the lock immediately depending on input
4419 * parameters. If COS is ON, a write lock is converted to COS lock
4422 * \param info thead info object
4423 * \param h lock handle
4424 * \param mode lock mode
4425 * \param decref force immediate lock releasing
4427 static void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
4428 enum ldlm_mode mode, int decref)
4430 struct tgt_session_info *tsi = info->mti_env->le_ses ?
4431 tgt_ses_info(info->mti_env) : NULL;
4435 if (lustre_handle_is_used(h)) {
4436 bool has_trans = tsi && tsi->tsi_has_trans;
4438 if (decref || !has_trans || !(mode & (LCK_PW | LCK_EX))) {
4439 mdt_fid_unlock(h, mode);
4441 struct mdt_device *mdt = info->mti_mdt;
4442 struct ldlm_lock *lock = ldlm_handle2lock(h);
4443 struct ptlrpc_request *req = mdt_info_req(info);
4444 bool no_ack = false;
4446 LASSERTF(lock != NULL, "no lock for cookie %#llx\n",
4449 /* there is no request if mdt_object_unlock() is called
4450 * from mdt_export_cleanup()->mdt_add_dirty_flag()
4452 if (likely(req != NULL)) {
4453 LDLM_DEBUG(lock, "save lock request %p reply state %p transno %lld",
4455 req->rq_reply_state, req->rq_transno);
4456 if (mdt_cos_is_enabled(mdt)) {
4459 ldlm_lock_mode_downgrade(lock, mode);
4460 } else if (mdt_slc_is_enabled(mdt)) {
4462 if (mode != LCK_TXN) {
4464 ldlm_lock_mode_downgrade(lock,
4468 if (req->rq_export->exp_disconnected)
4469 mdt_fid_unlock(h, mode);
4471 ptlrpc_save_lock(req, h, no_ack);
4473 mdt_fid_unlock(h, mode);
4476 if (mdt_is_lock_sync(lock)) {
4477 CDEBUG(D_HA, "sync_lock, do async commit\n");
4478 mdt_device_commit_async(info->mti_env, mdt);
4480 ldlm_lock_put(lock);
4489 * Save cross-MDT lock in uncommitted_slc_locks
4491 * Keep the lock referenced until transaction commit happens or release the lock
4492 * immediately depending on input parameters.
4494 * \param info thead info object
4495 * \param h lock handle
4496 * \param mode lock mode
4497 * \param decref force immediate lock releasing
4499 static void mdt_save_remote_lock(struct mdt_thread_info *info,
4500 struct mdt_object *o, struct lustre_handle *h,
4501 enum ldlm_mode mode, int decref)
4505 if (lustre_handle_is_used(h)) {
4506 struct ldlm_lock *lock = ldlm_handle2lock(h);
4507 struct ptlrpc_request *req = mdt_info_req(info);
4510 (lock->l_policy_data.l_inodebits.bits &
4511 (MDS_INODELOCK_XATTR | MDS_INODELOCK_UPDATE)))
4512 mo_invalidate(info->mti_env, mdt_object_child(o));
4514 if (decref || !req || !(mode & (LCK_PW | LCK_EX)) ||
4515 !tgt_ses_info(info->mti_env)->tsi_has_trans) {
4516 ldlm_lock_decref_and_cancel(h, mode);
4517 ldlm_lock_put(lock);
4519 tgt_save_slc_lock(&info->mti_mdt->mdt_lut, lock,
4521 ldlm_lock_decref(h, mode);
4530 * Unlock mdt object.
4532 * Immeditely release the regular lock and the PDO lock or save the
4533 * lock in request and keep them referenced until client ACK or
4534 * transaction commit.
4536 * \param info thread info object
4537 * \param o mdt object
4538 * \param lh mdt lock handle referencing regular and PDO locks
4539 * \param decref force immediate lock releasing
4541 * XXX o is not used and may be NULL, see hsm_cdt_request_completed().
4543 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
4544 struct mdt_lock_handle *lh, int decref)
4548 if (lh->mlh_pdo_remote)
4549 mdt_save_remote_lock(info, o, &lh->mlh_pdo_lh,
4550 lh->mlh_pdo_mode, decref);
4552 mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
4553 mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
4554 mdt_save_remote_lock(info, o, &lh->mlh_rreg_lh, lh->mlh_rreg_mode,
4560 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
4561 const struct lu_fid *f,
4562 struct mdt_lock_handle *lh,
4563 enum mds_ibits_locks ibits,
4564 enum ldlm_mode mode)
4566 struct mdt_object *o;
4568 o = mdt_object_find(info->mti_env, info->mti_mdt, f);
4572 rc = mdt_object_lock(info, o, lh, ibits, mode);
4574 mdt_object_put(info->mti_env, o);
4581 void mdt_object_unlock_put(struct mdt_thread_info *info,
4582 struct mdt_object *o,
4583 struct mdt_lock_handle *lh,
4586 mdt_object_unlock(info, o, lh, decref);
4587 mdt_object_put(info->mti_env, o);
4591 * Generic code handling requests that have struct mdt_body passed in:
4593 * - extract mdt_body from request and save it in @info, if present;
4595 * - create lu_object, corresponding to the fid in mdt_body, and save it in
4598 * - if HAS_BODY flag is set for this request type check whether object
4599 * actually exists on storage (lu_object_exists()).
4602 static int mdt_body_unpack(struct mdt_thread_info *info,
4603 enum tgt_handler_flags flags)
4605 const struct mdt_body *body;
4606 struct mdt_object *obj;
4607 const struct lu_env *env;
4608 struct req_capsule *pill;
4613 env = info->mti_env;
4614 pill = info->mti_pill;
4616 body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
4620 if (!(body->mbo_valid & OBD_MD_FLID))
4623 if (!fid_is_sane(&body->mbo_fid1)) {
4624 CERROR("Invalid fid: "DFID"\n", PFID(&body->mbo_fid1));
4628 obj = mdt_object_find(env, info->mti_mdt, &body->mbo_fid1);
4630 if ((flags & HAS_BODY) && !mdt_object_exists(obj)) {
4631 mdt_object_put(env, obj);
4634 info->mti_object = obj;
4643 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info,
4644 enum tgt_handler_flags flags)
4646 struct req_capsule *pill = info->mti_pill;
4651 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
4652 rc = mdt_body_unpack(info, flags);
4656 if (rc == 0 && (flags & HAS_REPLY)) {
4658 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
4659 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
4660 req_capsule_ptlreq(pill) ?
4661 DEF_REP_MD_SIZE : MAX_MD_SIZE_OLD);
4663 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
4664 req_capsule_set_size(pill, &RMF_LOGCOOKIES,
4667 /* Set ACL reply buffer size as LUSTRE_POSIX_ACL_MAX_SIZE_OLD
4668 * by default. If the target object has more ACL entries, then
4669 * enlarge the buffer when necessary.
4671 if (req_capsule_has_field(pill, &RMF_ACL, RCL_SERVER))
4672 req_capsule_set_size(pill, &RMF_ACL, RCL_SERVER,
4673 LUSTRE_POSIX_ACL_MAX_SIZE_OLD);
4675 mdt_preset_secctx_size(info);
4676 mdt_preset_encctx_size(info);
4678 rc = req_capsule_server_pack(pill);
4680 CWARN("%s: cannot pack response: rc = %d\n",
4681 mdt_obd_name(info->mti_mdt), rc);
4686 void mdt_thread_info_reset(struct mdt_thread_info *info)
4688 memset(&info->mti_attr, 0, sizeof(info->mti_attr));
4689 info->mti_body = NULL;
4690 info->mti_dlm_req = NULL;
4691 info->mti_cross_ref = 0;
4692 info->mti_opdata = 0;
4693 info->mti_big_lov_used = 0;
4694 info->mti_big_lmv_used = 0;
4695 info->mti_big_acl_used = 0;
4696 info->mti_som_strict = 0;
4697 info->mti_intent_lock = 0;
4699 info->mti_spec.no_create = 0;
4700 info->mti_spec.sp_rm_entry = 0;
4701 info->mti_spec.sp_permitted = 0;
4703 info->mti_spec.u.sp_ea.eadata = NULL;
4704 info->mti_spec.u.sp_ea.eadatalen = 0;
4706 if (info->mti_batch_env && info->mti_object != NULL) {
4707 mdt_object_put(info->mti_env, info->mti_object);
4708 info->mti_object = NULL;
4713 * Initialize fields of struct mdt_thread_info. Other fields are left in
4714 * uninitialized state, because it's too expensive to zero out whole
4715 * mdt_thread_info (> 1K) on each request arrival.
4717 void mdt_thread_info_init(struct ptlrpc_request *req,
4718 struct mdt_thread_info *info)
4720 info->mti_pill = &req->rq_pill;
4722 /* mdt device: it can be NULL while CONNECT */
4723 if (req->rq_export) {
4724 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
4725 info->mti_exp = req->rq_export;
4727 info->mti_mdt = NULL;
4728 info->mti_env = req->rq_svc_thread->t_env;
4729 info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
4730 info->mti_big_buf = LU_BUF_NULL;
4731 info->mti_batch_env = 0;
4732 info->mti_object = NULL;
4734 mdt_thread_info_reset(info);
4737 void mdt_thread_info_fini(struct mdt_thread_info *info)
4741 if (info->mti_object != NULL) {
4742 mdt_object_put(info->mti_env, info->mti_object);
4743 info->mti_object = NULL;
4746 for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
4747 mdt_lock_handle_assert(&info->mti_lh[i]);
4748 info->mti_env = NULL;
4749 info->mti_pill = NULL;
4750 info->mti_exp = NULL;
4751 info->mti_mdt = NULL;
4753 if (unlikely(info->mti_big_buf.lb_buf != NULL))
4754 lu_buf_free(&info->mti_big_buf);
4757 struct mdt_thread_info *tsi2mdt_info(struct tgt_session_info *tsi)
4759 struct mdt_thread_info *mti;
4761 mti = mdt_th_info(tsi->tsi_env);
4762 LASSERT(mti != NULL);
4764 mdt_thread_info_init(tgt_ses_req(tsi), mti);
4765 if (tsi->tsi_corpus != NULL) {
4766 mti->mti_object = mdt_obj(tsi->tsi_corpus);
4767 lu_object_get(tsi->tsi_corpus);
4769 mti->mti_body = tsi->tsi_mdt_body;
4770 mti->mti_dlm_req = tsi->tsi_dlm_req;
4775 static int mdt_tgt_connect(struct tgt_session_info *tsi)
4777 if (CFS_FAIL_CHECK(OBD_FAIL_TGT_DELAY_CONDITIONAL) &&
4779 tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id)
4780 schedule_timeout_uninterruptible(cfs_time_seconds(3));
4782 return tgt_connect(tsi);
4785 static int mdt_intent_glimpse(enum ldlm_intent_flags it_opc,
4786 struct mdt_thread_info *info,
4787 struct ldlm_lock **lockp, __u64 flags)
4789 return mdt_glimpse_enqueue(info, info->mti_mdt->mdt_namespace,
4792 static int mdt_intent_brw(enum ldlm_intent_flags it_opc,
4793 struct mdt_thread_info *info,
4794 struct ldlm_lock **lockp, __u64 flags)
4796 return mdt_brw_enqueue(info, info->mti_mdt->mdt_namespace,
4800 int mdt_intent_lock_replace(struct mdt_thread_info *info,
4801 struct ldlm_lock **lockp,
4802 struct mdt_lock_handle *lh,
4803 __u64 flags, int result)
4805 struct ptlrpc_request *req = mdt_info_req(info);
4806 struct ldlm_lock *lock = *lockp;
4807 struct ldlm_lock *new_lock;
4809 /* If possible resent found a lock, @lh is set to its handle */
4810 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
4812 if (new_lock == NULL) {
4813 if (flags & LDLM_FL_INTENT_ONLY) {
4815 } else if (flags & LDLM_FL_RESENT) {
4816 /* Lock is pinned by ldlm_handle_enqueue0() as it is a
4817 * resend case, however, it could be already destroyed
4818 * due to client eviction or a raced cancel RPC.
4820 LDLM_DEBUG_NOLOCK("Invalid lock handle %#llx\n",
4821 lh->mlh_reg_lh.cookie);
4824 CERROR("%s: Invalid lockh=%#llx flags=%#llx fid1="DFID" fid2="DFID": rc = %d\n",
4825 mdt_obd_name(info->mti_mdt),
4826 lh->mlh_reg_lh.cookie, flags,
4827 PFID(&info->mti_tmp_fid1),
4828 PFID(&info->mti_tmp_fid2), result);
4831 lh->mlh_reg_lh.cookie = 0;
4836 * If we've already given this lock to a client once, then we should
4837 * have no readers or writers. Otherwise, we should have one reader
4838 * _or_ writer ref (which will be zeroed below) before returning the
4841 if (new_lock->l_export == req->rq_export) {
4842 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
4844 LASSERT(new_lock->l_export == NULL);
4845 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
4850 if (new_lock->l_export == req->rq_export) {
4852 * Already gave this to the client, which means that we
4853 * reconstructed a reply.
4855 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
4858 ldlm_lock_put(new_lock);
4859 lh->mlh_reg_lh.cookie = 0;
4860 RETURN(ELDLM_LOCK_REPLACED);
4864 * Fixup the lock to be given to the client.
4866 lock_res_and_lock(new_lock);
4867 /* Zero new_lock->l_readers and new_lock->l_writers without triggering
4868 * possible blocking AST.
4870 while (new_lock->l_readers > 0) {
4871 new_lock->l_readers--;
4873 while (new_lock->l_writers > 0) {
4874 new_lock->l_writers--;
4877 new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
4878 new_lock->l_blocking_ast = lock->l_blocking_ast;
4879 new_lock->l_completion_ast = lock->l_completion_ast;
4880 if (ldlm_has_dom(new_lock))
4881 new_lock->l_glimpse_ast = ldlm_server_glimpse_ast;
4882 new_lock->l_remote_handle = lock->l_remote_handle;
4883 new_lock->l_flags &= ~LDLM_FL_LOCAL;
4885 unlock_res_and_lock(new_lock);
4887 cfs_hash_add(new_lock->l_export->exp_lock_hash,
4888 &new_lock->l_remote_handle,
4889 &new_lock->l_exp_hash);
4891 ldlm_lock_put(new_lock);
4892 lh->mlh_reg_lh.cookie = 0;
4894 RETURN(ELDLM_LOCK_REPLACED);
4897 void mdt_intent_fixup_resent(struct mdt_thread_info *info,
4898 struct ldlm_lock *new_lock,
4899 struct mdt_lock_handle *lh, __u64 flags)
4901 struct ptlrpc_request *req = mdt_info_req(info);
4902 struct ldlm_request *dlmreq;
4904 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
4907 dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
4909 /* if this is a resend case (MSG_RESENT is set on RPC) and a lock was
4910 * found by ldlm_handle_enqueue(); if so @lh must be initialized.
4912 if (flags & LDLM_FL_RESENT) {
4913 lh->mlh_reg_lh.cookie = new_lock->l_handle.h_cookie;
4914 lh->mlh_reg_mode = new_lock->l_granted_mode;
4916 LDLM_DEBUG(new_lock, "Restoring lock cookie");
4917 DEBUG_REQ(D_DLMTRACE, req, "restoring lock cookie %#llx",
4918 lh->mlh_reg_lh.cookie);
4923 * If the xid matches, then we know this is a resent request, and allow
4924 * it. (It's probably an OPEN, for which we don't send a lock.
4926 if (req_can_reconstruct(req, NULL) != 0)
4930 * This remote handle isn't enqueued, so we never received or processed
4931 * this request. Clear MSG_RESENT, because it can be handled like any
4932 * normal request now.
4934 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
4936 DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle %#llx",
4937 dlmreq->lock_handle[0].cookie);
4940 static int mdt_intent_getxattr(enum ldlm_intent_flags it_opc,
4941 struct mdt_thread_info *info,
4942 struct ldlm_lock **lockp,
4945 struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
4946 struct ldlm_reply *ldlm_rep = NULL;
4952 * Initialize lhc->mlh_reg_lh either from a previously granted lock
4953 * (for the resend case) or a new lock. Below we will use it to
4954 * replace the original lock.
4956 mdt_intent_fixup_resent(info, *lockp, lhc, flags);
4957 if (!lustre_handle_is_used(&lhc->mlh_reg_lh)) {
4958 rc = mdt_object_lock(info, info->mti_object, lhc,
4959 MDS_INODELOCK_XATTR, (*lockp)->l_req_mode);
4964 rc = mdt_getxattr(info);
4966 if (mdt_info_req(info)->rq_repmsg != NULL)
4967 ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
4969 if (ldlm_rep == NULL ||
4970 CFS_FAIL_CHECK(OBD_FAIL_MDS_XATTR_REP)) {
4971 mdt_object_unlock(info, info->mti_object, lhc, 1);
4975 RETURN(err_serious(-EFAULT));
4978 ldlm_rep->lock_policy_res2 = clear_serious(rc);
4980 /* This is for interop instead of adding a new interop flag. LU-7433 */
4981 #if LUSTRE_VERSION_CODE > OBD_OCD_VERSION(3, 0, 0, 0)
4982 if (ldlm_rep->lock_policy_res2) {
4983 mdt_object_unlock(info, info->mti_object, lhc, 1);
4984 RETURN(ELDLM_LOCK_ABORTED);
4988 rc = mdt_intent_lock_replace(info, lockp, lhc, flags, rc);
4992 static int mdt_intent_getattr(enum ldlm_intent_flags it_opc,
4993 struct mdt_thread_info *info,
4994 struct ldlm_lock **lockp,
4997 struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
4999 struct ldlm_reply *ldlm_rep;
5000 struct mdt_body *reqbody;
5001 struct mdt_body *repbody;
5006 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
5009 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
5012 info->mti_cross_ref = !!(reqbody->mbo_valid & OBD_MD_FLCROSSREF);
5013 repbody->mbo_eadatasize = 0;
5014 repbody->mbo_aclsize = 0;
5018 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM;
5021 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
5025 CERROR("%s: unsupported intent %#x\n",
5026 mdt_obd_name(info->mti_mdt), (unsigned int)it_opc);
5027 GOTO(out_shrink, rc = -EINVAL);
5030 rc = mdt_init_ucred(info, reqbody);
5032 GOTO(out_shrink, rc);
5034 ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
5035 mdt_set_disposition(info, ldlm_rep, DISP_IT_EXECD);
5037 /* Get lock from request for possible resent case. */
5038 mdt_intent_fixup_resent(info, *lockp, lhc, flags);
5040 rc = mdt_getattr_name_lock(info, lhc, child_bits, ldlm_rep);
5041 ldlm_rep->lock_policy_res2 = clear_serious(rc);
5043 if (mdt_get_disposition(ldlm_rep, DISP_LOOKUP_NEG) &&
5044 ldlm_rep->lock_policy_res2 != -ENOKEY)
5045 ldlm_rep->lock_policy_res2 = 0;
5046 if (!mdt_get_disposition(ldlm_rep, DISP_LOOKUP_POS) ||
5047 ldlm_rep->lock_policy_res2) {
5048 lhc->mlh_reg_lh.cookie = 0ull;
5049 /* Return error code immediately to stop batched statahead. */
5050 GOTO(out_ucred, rc = info->mti_batch_env ? rc :
5051 ELDLM_LOCK_ABORTED);
5054 rc = mdt_intent_lock_replace(info, lockp, lhc, flags, rc);
5057 mdt_exit_ucred(info);
5059 mdt_client_compatibility(info);
5060 rc2 = mdt_fix_reply(info);
5066 static int mdt_layout_change_pccro(struct mdt_thread_info *info,
5067 struct mdt_object *obj,
5068 struct mdt_lock_handle *lhc,
5069 struct md_layout_change *layout)
5075 if (!mdt_object_exists(obj))
5078 if (!S_ISREG(lu_object_attr(&obj->mot_obj)))
5081 rc = mdt_object_lock(info, obj, lhc, MDS_INODELOCK_LAYOUT, LCK_CR);
5085 rc = mo_layout_check(info->mti_env,
5086 mdt_object_child(obj), layout);
5087 if (rc == -EALREADY)
5090 mdt_object_unlock(info, obj, lhc, 1);
5091 rc = mdt_layout_change(info, obj, lhc, layout);
5095 static int mdt_intent_layout(enum ldlm_intent_flags it_opc,
5096 struct mdt_thread_info *info,
5097 struct ldlm_lock **lockp,
5100 struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
5101 struct md_layout_change layout = { .mlc_opc = MD_LAYOUT_NOP };
5102 struct layout_intent *intent;
5103 struct ldlm_reply *ldlm_rep;
5104 struct lu_fid *fid = &info->mti_tmp_fid2;
5105 struct mdt_object *obj = NULL;
5106 int layout_size = 0;
5107 struct lu_buf *buf = &layout.mlc_buf;
5112 fid_extract_from_res_name(fid, &(*lockp)->l_resource->lr_name);
5114 intent = req_capsule_client_get(info->mti_pill, &RMF_LAYOUT_INTENT);
5118 CDEBUG(D_INFO, DFID "got layout change request from client: opc:%u flags:%#x extent "
5120 PFID(fid), intent->lai_opc, intent->lai_flags,
5121 PEXT(&intent->lai_extent));
5123 switch (intent->lai_opc) {
5124 case LAYOUT_INTENT_TRUNC:
5125 case LAYOUT_INTENT_WRITE:
5126 case LAYOUT_INTENT_PCCRO_SET:
5127 case LAYOUT_INTENT_PCCRO_CLEAR:
5128 layout.mlc_opc = MD_LAYOUT_WRITE;
5129 layout.mlc_intent = intent;
5131 case LAYOUT_INTENT_ACCESS:
5133 case LAYOUT_INTENT_READ:
5134 case LAYOUT_INTENT_GLIMPSE:
5135 case LAYOUT_INTENT_RELEASE:
5136 case LAYOUT_INTENT_RESTORE:
5137 CERROR("%s: Unsupported layout intent opc %d\n",
5138 mdt_obd_name(info->mti_mdt), intent->lai_opc);
5141 CERROR("%s: Unknown layout intent opc %d\n",
5142 mdt_obd_name(info->mti_mdt), intent->lai_opc);
5146 obj = mdt_object_find(info->mti_env, info->mti_mdt, fid);
5148 RETURN(PTR_ERR(obj));
5150 if (mdt_object_exists(obj) && !mdt_object_remote(obj)) {
5151 /* if layout is going to be changed don't use the current EA
5152 * size but the maximum one. That buffer will be shrinked
5153 * to the actual size in req_capsule_shrink() before reply.
5155 if (layout.mlc_opc == MD_LAYOUT_WRITE) {
5156 layout_size = info->mti_mdt->mdt_max_mdsize;
5158 layout_size = mdt_attr_get_eabuf_size(info, obj);
5159 if (layout_size < 0)
5160 GOTO(out, rc = layout_size);
5162 if (layout_size > info->mti_mdt->mdt_max_mdsize)
5163 info->mti_mdt->mdt_max_mdsize = layout_size;
5165 CDEBUG(D_INFO, "%s: layout_size %d\n",
5166 mdt_obd_name(info->mti_mdt), layout_size);
5170 * set reply buffer size, so that ldlm_handle_enqueue0()->
5171 * ldlm_lvbo_fill() will fill the reply buffer with lovea.
5173 req_capsule_set_size(info->mti_pill, &RMF_DLM_LVB, RCL_SERVER,
5175 rc = req_capsule_server_pack(info->mti_pill);
5179 ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
5181 GOTO(out, rc = -EPROTO);
5183 mdt_set_disposition(info, ldlm_rep, DISP_IT_EXECD);
5185 /* take lock in ldlm_lock_enqueue() for LAYOUT_INTENT_ACCESS */
5186 if (layout.mlc_opc == MD_LAYOUT_NOP)
5189 rc = mdt_check_resent(info, mdt_reconstruct_generic, lhc);
5193 DEBUG_REQ(D_INODE, mdt_info_req(info), "resent opt.");
5194 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
5200 if (unlikely(req_is_replay(mdt_info_req(info)))) {
5201 buf->lb_buf = req_capsule_client_get(info->mti_pill,
5203 buf->lb_len = req_capsule_get_size(info->mti_pill,
5204 &RMF_EADATA, RCL_CLIENT);
5206 * If it's a replay of layout write intent RPC, the client has
5207 * saved the extended lovea when it get reply then.
5209 if (buf->lb_len > 0)
5210 mdt_fix_lov_magic(info, buf->lb_buf);
5213 /* Get lock from request for possible resent case. */
5214 mdt_intent_fixup_resent(info, *lockp, lhc, flags);
5215 (*lockp)->l_lvb_type = LVB_T_LAYOUT;
5217 if (intent->lai_opc == LAYOUT_INTENT_PCCRO_SET)
5219 * Take a CR layout lock against the file object first to check
5220 * whether the file is already PCC-RO cached. If so, return
5221 * immediately; Otherwise, take an EX layout lock on the file
5222 * to update the FLR PCC-RO state accordingly. By this check,
5223 * it can avoid heavy lock contention and unnecessary revocation
5224 * of the layout lock granted to the other clients when multiple
5225 * processes from many clients perform read-only attach on a
5226 * shared file object simultaneously.
5228 rc = mdt_layout_change_pccro(info, obj, lhc, &layout);
5231 * Instantiate some layout components, if @buf contains lovea,
5232 * then it's a replay of the layout intent write RPC.
5234 rc = mdt_layout_change(info, obj, lhc, &layout);
5236 ldlm_rep->lock_policy_res2 = clear_serious(rc);
5238 if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
5239 rc = mdt_intent_lock_replace(info, lockp, lhc, flags, rc);
5240 if (rc == ELDLM_LOCK_REPLACED &&
5241 (*lockp)->l_granted_mode == LCK_EX)
5242 ldlm_lock_mode_downgrade(*lockp, LCK_CR);
5247 mdt_object_put(info->mti_env, obj);
5251 static int mdt_intent_open(enum ldlm_intent_flags it_opc,
5252 struct mdt_thread_info *info,
5253 struct ldlm_lock **lockp,
5256 struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
5257 struct ldlm_reply *rep = NULL;
5260 struct ptlrpc_request *req = mdt_info_req(info);
5262 static const struct req_format *intent_fmts[REINT_MAX] = {
5263 [REINT_CREATE] = &RQF_LDLM_INTENT_CREATE,
5264 [REINT_OPEN] = &RQF_LDLM_INTENT_OPEN
5269 opc = mdt_reint_opcode(mdt_info_req(info), intent_fmts);
5273 /* Get lock from request for possible resent case. */
5274 mdt_intent_fixup_resent(info, *lockp, lhc, flags);
5276 rc = mdt_reint_internal(info, lhc, opc);
5278 if (rc < 0 && lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
5279 DEBUG_REQ(D_ERROR, req, "Replay open failed with %d", rc);
5281 /* Check whether the reply has been packed successfully. */
5282 if (mdt_info_req(info)->rq_repmsg != NULL)
5283 rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
5288 RETURN(err_serious(-EFAULT));
5291 /* MDC expects this in any case */
5293 mdt_set_disposition(info, rep, DISP_LOOKUP_EXECD);
5295 /* the open lock or the lock for cross-ref object should be
5296 * returned to the client
5298 if (lustre_handle_is_used(&lhc->mlh_reg_lh) &&
5299 (rc == 0 || rc == -MDT_EREMOTE_OPEN)) {
5300 rep->lock_policy_res2 = 0;
5301 rc = mdt_intent_lock_replace(info, lockp, lhc, flags, rc);
5305 rep->lock_policy_res2 = clear_serious(rc);
5307 if (rep->lock_policy_res2 == -ENOENT &&
5308 mdt_get_disposition(rep, DISP_LOOKUP_NEG) &&
5309 !mdt_get_disposition(rep, DISP_OPEN_CREATE))
5310 rep->lock_policy_res2 = 0;
5312 lhc->mlh_reg_lh.cookie = 0ull;
5313 if (rc == -ENOTCONN || rc == -ENODEV ||
5314 rc == -EOVERFLOW) { /**< if VBR failure then return error */
5316 * If it is the disconnect error (ENODEV & ENOCONN), the error
5317 * will be returned by rq_status, and client at ptlrpc layer
5318 * will detect this, then disconnect, reconnect the import
5319 * immediately, instead of impacting the following the rpc.
5324 * For other cases, the error will be returned by intent, and client
5325 * will retrieve the result from intent.
5327 RETURN(ELDLM_LOCK_ABORTED);
5330 static int mdt_intent_opc(enum ldlm_intent_flags it_opc,
5331 struct mdt_thread_info *info,
5332 struct ldlm_lock **lockp,
5333 u64 flags /* LDLM_FL_* */)
5335 struct req_capsule *pill = info->mti_pill;
5336 struct ptlrpc_request *req = mdt_info_req(info);
5337 const struct req_format *it_format;
5338 int (*it_handler)(enum ldlm_intent_flags,
5339 struct mdt_thread_info *,
5340 struct ldlm_lock **,
5342 enum tgt_handler_flags it_handler_flags = 0;
5343 struct ldlm_reply *rep;
5344 bool check_mdt_object = false;
5352 case IT_OPEN|IT_CREAT:
5354 * OCREAT is not a IS_MUTABLE request since the file may
5355 * already exist. We do the extra check of
5356 * OBD_CONNECT_RDONLY in mdt_reint_open() when we
5357 * really need to create the object.
5359 it_format = &RQF_LDLM_INTENT;
5360 it_handler = &mdt_intent_open;
5363 check_mdt_object = true;
5366 it_format = &RQF_LDLM_INTENT_GETATTR;
5367 it_handler = &mdt_intent_getattr;
5368 it_handler_flags = HAS_REPLY;
5371 check_mdt_object = true;
5372 it_format = &RQF_LDLM_INTENT_GETXATTR;
5373 it_handler = &mdt_intent_getxattr;
5374 it_handler_flags = HAS_BODY;
5377 it_format = &RQF_LDLM_INTENT_LAYOUT;
5378 it_handler = &mdt_intent_layout;
5381 it_format = &RQF_LDLM_INTENT;
5382 it_handler = &mdt_intent_glimpse;
5385 it_format = &RQF_LDLM_INTENT;
5386 it_handler = &mdt_intent_brw;
5388 case IT_QUOTA_DQACQ:
5389 case IT_QUOTA_CONN: {
5390 struct lu_device *qmt = info->mti_mdt->mdt_qmt_dev;
5393 RETURN(-EOPNOTSUPP);
5395 if (mdt_rdonly(req->rq_export))
5398 (*lockp)->l_lvb_type = LVB_T_LQUOTA;
5399 /* pass the request to quota master */
5400 rc = qmt_hdls.qmth_intent_policy(info->mti_env, qmt,
5401 mdt_info_req(info), lockp,
5406 CERROR("%s: unknown intent code %#x\n",
5407 mdt_obd_name(info->mti_mdt), it_opc);
5411 if (!info->mti_batch_env)
5412 req_capsule_extend(pill, it_format);
5414 rc = mdt_unpack_req_pack_rep(info, it_handler_flags);
5418 if (unlikely(info->mti_object == NULL && check_mdt_object))
5421 if (it_handler_flags & IS_MUTABLE && mdt_rdonly(req->rq_export))
5424 CFS_FAIL_TIMEOUT(OBD_FAIL_MDS_INTENT_DELAY, 10);
5426 /* execute policy */
5427 rc = (*it_handler)(it_opc, info, lockp, flags);
5429 /* Check whether the reply has been packed successfully. */
5430 if (info->mti_batch_env || req->rq_repmsg != NULL) {
5431 rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
5432 rep->lock_policy_res2 =
5433 ptlrpc_status_hton(rep->lock_policy_res2);
5439 static void mdt_ptlrpc_stats_update(struct ptlrpc_request *req,
5440 enum ldlm_intent_flags it_opc)
5442 struct lprocfs_stats *srv_stats = ptlrpc_req2svc(req)->srv_stats;
5444 /* update stats when IT code is known */
5445 if (srv_stats != NULL)
5446 lprocfs_counter_incr(srv_stats,
5447 PTLRPC_LAST_CNTR + (it_opc == IT_GLIMPSE ?
5448 LDLM_GLIMPSE_ENQUEUE : LDLM_IBITS_ENQUEUE));
5451 static int mdt_intent_policy(const struct lu_env *env,
5452 struct ldlm_namespace *ns,
5453 struct ldlm_lock **lockp,
5455 enum ldlm_mode mode,
5456 __u64 flags, void *data)
5458 struct tgt_session_info *tsi;
5459 struct mdt_thread_info *info;
5460 struct ptlrpc_request *req = req_cookie;
5461 struct ldlm_intent *it;
5462 struct req_capsule *pill;
5463 const struct ldlm_lock_desc *ldesc;
5468 LASSERT(req != NULL);
5470 tsi = tgt_ses_info(env);
5472 info = mdt_th_info(env);
5473 LASSERT(info != NULL);
5475 /* Check whether it is a sub request processing in a batch request */
5476 if (info->mti_batch_env) {
5477 pill = info->mti_pill;
5478 LASSERT(pill == &info->mti_sub_pill);
5480 info = tsi2mdt_info(tsi);
5481 pill = info->mti_pill;
5484 LASSERT(pill->rc_req == req);
5485 ldesc = &info->mti_dlm_req->lock_desc;
5487 if (info->mti_batch_env ||
5488 req->rq_reqmsg->lm_bufcount > DLM_INTENT_IT_OFF) {
5490 * For batch processing environment, the request format has
5493 if (!info->mti_batch_env)
5494 req_capsule_extend(pill, &RQF_LDLM_INTENT_BASIC);
5496 it = req_capsule_client_get(pill, &RMF_LDLM_INTENT);
5498 mdt_ptlrpc_stats_update(req, it->opc);
5499 info->mti_intent_lock = 1;
5501 * For intent lock request with policy, the ELC locks
5502 * have been cancelled in ldlm_handle_enqueue0().
5503 * Thus set @mti_dlm_req with null here.
5505 info->mti_dlm_req = NULL;
5506 rc = mdt_intent_opc(it->opc, info, lockp, flags);
5510 /* Lock without inodebits makes no sense and will oops
5511 * later in ldlm. Let's check it now to see if we have
5512 * ibits corrupted somewhere in mdt_intent_opc().
5513 * The case for client miss to set ibits has been
5514 * processed by others.
5516 LASSERT(ergo(ldesc->l_resource.lr_type == LDLM_IBITS,
5517 ldesc->l_policy_data.l_inodebits.bits != 0));
5519 rc = err_serious(-EFAULT);
5521 } else if (ldesc->l_resource.lr_type == LDLM_IBITS &&
5522 ldesc->l_policy_data.l_inodebits.bits == MDS_INODELOCK_DOM) {
5523 struct ldlm_reply *rep;
5525 /* No intent was provided but INTENT flag is set along with
5526 * DOM bit, this is considered as GLIMPSE request.
5527 * This logic is common for MDT and OST glimpse
5529 mdt_ptlrpc_stats_update(req, IT_GLIMPSE);
5530 rc = mdt_glimpse_enqueue(info, ns, lockp, flags);
5531 /* Check whether the reply has been packed successfully. */
5532 if (req->rq_repmsg != NULL) {
5533 rep = req_capsule_server_get(info->mti_pill,
5535 rep->lock_policy_res2 =
5536 ptlrpc_status_hton(rep->lock_policy_res2);
5539 /* No intent was provided */
5540 req_capsule_set_size(pill, &RMF_DLM_LVB, RCL_SERVER, 0);
5541 rc = req_capsule_server_pack(pill);
5543 rc = err_serious(rc);
5546 if (!info->mti_batch_env)
5547 mdt_thread_info_fini(info);
5551 static void mdt_deregister_seq_exp(struct mdt_device *mdt)
5553 struct seq_server_site *ss = mdt_seq_site(mdt);
5555 if (ss->ss_node_id == 0)
5558 if (ss->ss_client_seq != NULL) {
5559 lustre_deregister_lwp_item(&ss->ss_client_seq->lcs_exp);
5560 ss->ss_client_seq->lcs_exp = NULL;
5563 if (ss->ss_server_fld != NULL) {
5564 lustre_deregister_lwp_item(&ss->ss_server_fld->lsf_control_exp);
5565 ss->ss_server_fld->lsf_control_exp = NULL;
5569 static void mdt_seq_fini_cli(struct mdt_device *mdt)
5571 struct seq_server_site *ss = mdt_seq_site(mdt);
5576 if (ss->ss_server_seq != NULL)
5577 seq_server_set_cli(NULL, ss->ss_server_seq, NULL);
5580 static int mdt_seq_fini(const struct lu_env *env, struct mdt_device *mdt)
5582 mdt_seq_fini_cli(mdt);
5583 mdt_deregister_seq_exp(mdt);
5585 return seq_site_fini(env, mdt_seq_site(mdt));
5589 * It will retrieve its FLDB entries from MDT0, and it only happens
5590 * when upgrading existent FS to 2.6 or when local FLDB is corrupted,
5591 * and it needs to refresh FLDB from the MDT0.
5593 static int mdt_register_lwp_callback(void *data)
5596 struct mdt_device *mdt = data;
5597 struct lu_server_fld *fld = mdt_seq_site(mdt)->ss_server_fld;
5602 LASSERT(mdt_seq_site(mdt)->ss_node_id != 0);
5604 rc = lu_env_init(&env, LCT_MD_THREAD);
5606 CERROR("%s: cannot init env: rc = %d\n", mdt_obd_name(mdt), rc);
5610 /* Allocate new sequence now to avoid creating local transaction
5611 * in the normal transaction process
5613 rc = seq_server_check_and_alloc_super(&env,
5614 mdt_seq_site(mdt)->ss_server_seq);
5619 rc = fld_update_from_controller(&env, fld);
5621 CERROR("%s: cannot update controller: rc = %d\n",
5622 mdt_obd_name(mdt), rc);
5631 static int mdt_register_seq_exp(struct mdt_device *mdt)
5633 struct seq_server_site *ss = mdt_seq_site(mdt);
5634 char *lwp_name = NULL;
5637 if (ss->ss_node_id == 0)
5640 OBD_ALLOC(lwp_name, MAX_OBD_NAME);
5641 if (lwp_name == NULL)
5642 GOTO(out_free, rc = -ENOMEM);
5644 rc = tgt_name2lwp_name(mdt_obd_name(mdt), lwp_name, MAX_OBD_NAME, 0);
5648 rc = lustre_register_lwp_item(lwp_name, &ss->ss_client_seq->lcs_exp,
5653 rc = lustre_register_lwp_item(lwp_name,
5654 &ss->ss_server_fld->lsf_control_exp,
5655 mdt_register_lwp_callback, mdt);
5657 lustre_deregister_lwp_item(&ss->ss_client_seq->lcs_exp);
5658 ss->ss_client_seq->lcs_exp = NULL;
5662 OBD_FREE(lwp_name, MAX_OBD_NAME);
5668 * Init client sequence manager which is used by local MDS to talk to sequence
5669 * controller on remote node.
5671 static int mdt_seq_init_cli(const struct lu_env *env, struct mdt_device *mdt)
5673 struct seq_server_site *ss = mdt_seq_site(mdt);
5678 /* check if this is adding the first MDC and controller is not yet
5681 OBD_ALLOC_PTR(ss->ss_client_seq);
5682 if (ss->ss_client_seq == NULL)
5685 OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
5686 if (prefix == NULL) {
5687 OBD_FREE_PTR(ss->ss_client_seq);
5688 ss->ss_client_seq = NULL;
5692 /* Note: seq_client_fini will be called in seq_site_fini */
5693 snprintf(prefix, MAX_OBD_NAME + 5, "ctl-%s", mdt_obd_name(mdt));
5694 seq_client_init(ss->ss_client_seq, NULL, LUSTRE_SEQ_METADATA,
5695 prefix, ss->ss_node_id == 0 ? ss->ss_control_seq :
5697 OBD_FREE(prefix, MAX_OBD_NAME + 5);
5699 RETURN(seq_server_set_cli(env, ss->ss_server_seq, ss->ss_client_seq));
5702 static int mdt_seq_init(const struct lu_env *env, struct mdt_device *mdt)
5704 struct seq_server_site *ss;
5709 ss = mdt_seq_site(mdt);
5710 /* init sequence controller server(MDT0) */
5711 if (ss->ss_node_id == 0) {
5712 OBD_ALLOC_PTR(ss->ss_control_seq);
5713 if (ss->ss_control_seq == NULL)
5716 rc = seq_server_init(env, ss->ss_control_seq, mdt->mdt_bottom,
5717 mdt_obd_name(mdt), LUSTRE_SEQ_CONTROLLER,
5720 GOTO(out_seq_fini, rc);
5723 /* Init normal sequence server */
5724 OBD_ALLOC_PTR(ss->ss_server_seq);
5725 if (ss->ss_server_seq == NULL)
5726 GOTO(out_seq_fini, rc = -ENOMEM);
5728 rc = seq_server_init(env, ss->ss_server_seq, mdt->mdt_bottom,
5729 mdt_obd_name(mdt), LUSTRE_SEQ_SERVER, ss, true);
5731 GOTO(out_seq_fini, rc);
5733 /* init seq client for seq server to talk to seq controller(MDT0) */
5734 rc = mdt_seq_init_cli(env, mdt);
5736 GOTO(out_seq_fini, rc);
5738 if (ss->ss_node_id != 0)
5739 /* register controller export through lwp */
5740 rc = mdt_register_seq_exp(mdt);
5745 mdt_seq_fini(env, mdt);
5753 static int mdt_fld_fini(const struct lu_env *env,
5754 struct mdt_device *m)
5756 struct seq_server_site *ss = mdt_seq_site(m);
5760 if (ss && ss->ss_server_fld) {
5761 fld_server_fini(env, ss->ss_server_fld);
5762 OBD_FREE_PTR(ss->ss_server_fld);
5763 ss->ss_server_fld = NULL;
5769 static int mdt_fld_init(const struct lu_env *env,
5771 struct mdt_device *m)
5773 struct seq_server_site *ss;
5778 ss = mdt_seq_site(m);
5780 OBD_ALLOC_PTR(ss->ss_server_fld);
5781 if (ss->ss_server_fld == NULL)
5782 RETURN(rc = -ENOMEM);
5784 rc = fld_server_init(env, ss->ss_server_fld, m->mdt_bottom, uuid,
5787 OBD_FREE_PTR(ss->ss_server_fld);
5788 ss->ss_server_fld = NULL;
5795 static void mdt_stack_pre_fini(const struct lu_env *env,
5796 struct mdt_device *m, struct lu_device *top)
5798 struct lustre_cfg_bufs *bufs;
5799 struct lustre_cfg *lcfg;
5800 struct mdt_thread_info *info;
5806 info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
5807 LASSERT(info != NULL);
5809 bufs = &info->mti_u.bufs;
5811 LASSERT(m->mdt_child_exp);
5812 LASSERT(m->mdt_child_exp->exp_obd);
5814 /* process cleanup, pass mdt obd name to get obd umount flags */
5815 /* XXX: this is needed because all layers are referenced by
5816 * objects (some of them are pinned by osd, for example *
5817 * the proper solution should be a model where object used
5818 * by osd only doesn't have mdt/mdd slices -bzzz
5820 lustre_cfg_bufs_reset(bufs, mdt_obd_name(m));
5821 lustre_cfg_bufs_set_string(bufs, 1, NULL);
5822 OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen));
5825 lustre_cfg_init(lcfg, LCFG_PRE_CLEANUP, bufs);
5827 top->ld_ops->ldo_process_config(env, top, lcfg);
5828 OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens));
5832 static void mdt_stack_fini(const struct lu_env *env,
5833 struct mdt_device *m, struct lu_device *top)
5835 struct obd_device *obd = mdt2obd_dev(m);
5836 struct lustre_cfg_bufs *bufs;
5837 struct lustre_cfg *lcfg;
5838 struct mdt_thread_info *info;
5843 info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
5844 LASSERT(info != NULL);
5846 lu_dev_del_linkage(top->ld_site, top);
5848 lu_site_purge(env, top->ld_site, -1);
5850 bufs = &info->mti_u.bufs;
5851 /* process cleanup, pass mdt obd name to get obd umount flags */
5852 /* another purpose is to let all layers to release their objects */
5853 lustre_cfg_bufs_reset(bufs, mdt_obd_name(m));
5858 lustre_cfg_bufs_set_string(bufs, 1, flags);
5859 OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen));
5862 lustre_cfg_init(lcfg, LCFG_CLEANUP, bufs);
5865 top->ld_ops->ldo_process_config(env, top, lcfg);
5866 OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens));
5868 lu_site_purge(env, top->ld_site, -1);
5870 m->mdt_child = NULL;
5871 m->mdt_bottom = NULL;
5873 obd_disconnect(m->mdt_child_exp);
5874 m->mdt_child_exp = NULL;
5876 obd_disconnect(m->mdt_bottom_exp);
5877 m->mdt_child_exp = NULL;
5880 static int mdt_connect_to_next(const struct lu_env *env, struct mdt_device *m,
5881 const char *next, struct obd_export **exp)
5883 struct obd_connect_data *data = NULL;
5884 struct obd_device *obd;
5889 OBD_ALLOC_PTR(data);
5891 GOTO(out, rc = -ENOMEM);
5893 obd = class_name2obd(next);
5895 CERROR("%s: can't locate next device: %s\n",
5896 mdt_obd_name(m), next);
5897 GOTO(out, rc = -ENOTCONN);
5900 data->ocd_connect_flags = OBD_CONNECT_VERSION;
5901 data->ocd_version = LUSTRE_VERSION_CODE;
5903 rc = obd_connect(NULL, exp, obd, &obd->obd_uuid, data, NULL);
5905 CERROR("%s: cannot connect to next dev %s (%d)\n",
5906 mdt_obd_name(m), next, rc);
5915 static int mdt_stack_init(const struct lu_env *env, struct mdt_device *mdt,
5916 struct lustre_cfg *cfg)
5918 char *dev = lustre_cfg_string(cfg, 0);
5919 int rc, name_size, uuid_size;
5920 char *name, *uuid, *p;
5921 struct lustre_cfg_bufs *bufs;
5922 struct lustre_cfg *lcfg;
5923 struct obd_device *obd;
5924 struct lustre_profile *lprof;
5925 struct lu_site *site;
5929 /* in 1.8 we had the only device in the stack - MDS.
5930 * 2.0 introduces MDT, MDD, OSD; MDT starts others internally.
5931 * in 2.3 OSD is instantiated by obd_mount.c, so we need
5932 * to generate names and setup MDT, MDD. MDT will be using
5933 * generated name to connect to MDD. for MDD the next device
5934 * will be LOD with name taken from so called "profile" which
5935 * is generated by mount_option line
5937 * 1.8 MGS generates config. commands like this:
5938 * #06 (104)mount_option 0: 1:lustre-MDT0000 2:lustre-mdtlov
5939 * #08 (120)setup 0:lustre-MDT0000 1:dev 2:type 3:lustre-MDT0000
5940 * 2.0 MGS generates config. commands like this:
5941 * #07 (112)mount_option 0: 1:lustre-MDT0000 2:lustre-MDT0000-mdtlov
5942 * #08 (160)setup 0:lustre-MDT0000 1:lustre-MDT0000_UUID 2:0
5943 * 3:lustre-MDT0000-mdtlov 4:f
5945 * we generate MDD name from MDT one, just replacing T with D
5947 * after all the preparations, the logical equivalent will be
5948 * #01 (160)setup 0:lustre-MDD0000 1:lustre-MDD0000_UUID 2:0
5949 * 3:lustre-MDT0000-mdtlov 4:f
5950 * #02 (160)setup 0:lustre-MDT0000 1:lustre-MDT0000_UUID 2:0
5951 * 3:lustre-MDD0000 4:f
5953 * notice we build the stack from down to top: MDD first, then MDT
5956 name_size = MAX_OBD_NAME;
5957 uuid_size = MAX_OBD_NAME;
5959 OBD_ALLOC(name, name_size);
5960 OBD_ALLOC(uuid, uuid_size);
5961 if (name == NULL || uuid == NULL)
5962 GOTO(cleanup_mem, rc = -ENOMEM);
5964 OBD_ALLOC_PTR(bufs);
5966 GOTO(cleanup_mem, rc = -ENOMEM);
5969 p = strstr(name, "-MDT");
5971 GOTO(free_bufs, rc = -ENOMEM);
5974 snprintf(uuid, MAX_OBD_NAME, "%s_UUID", name);
5976 lprof = class_get_profile(lustre_cfg_string(cfg, 0));
5977 if (lprof == NULL || lprof->lp_dt == NULL) {
5978 CERROR("can't find the profile: %s\n",
5979 lustre_cfg_string(cfg, 0));
5980 GOTO(free_bufs, rc = -EINVAL);
5983 lustre_cfg_bufs_reset(bufs, name);
5984 lustre_cfg_bufs_set_string(bufs, 1, LUSTRE_MDD_NAME);
5985 lustre_cfg_bufs_set_string(bufs, 2, uuid);
5986 lustre_cfg_bufs_set_string(bufs, 3, lprof->lp_dt);
5988 OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen));
5990 GOTO(put_profile, rc = -ENOMEM);
5991 lustre_cfg_init(lcfg, LCFG_ATTACH, bufs);
5993 rc = class_attach(lcfg);
5995 GOTO(lcfg_cleanup, rc);
5997 obd = class_name2obd(name);
5999 CERROR("Can not find obd %s (%s in config)\n",
6000 MDD_OBD_NAME, lustre_cfg_string(cfg, 0));
6001 GOTO(lcfg_cleanup, rc = -EINVAL);
6004 OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens));
6006 lustre_cfg_bufs_reset(bufs, name);
6007 lustre_cfg_bufs_set_string(bufs, 1, uuid);
6008 lustre_cfg_bufs_set_string(bufs, 2, dev);
6009 lustre_cfg_bufs_set_string(bufs, 3, lprof->lp_dt);
6011 OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen));
6013 GOTO(class_detach, rc = -ENOMEM);
6014 lustre_cfg_init(lcfg, LCFG_SETUP, bufs);
6016 rc = class_setup(obd, lcfg);
6018 GOTO(class_detach, rc);
6020 /* connect to MDD we just setup */
6021 rc = mdt_connect_to_next(env, mdt, name, &mdt->mdt_child_exp);
6023 GOTO(class_detach, rc);
6025 site = mdt->mdt_child_exp->exp_obd->obd_lu_dev->ld_site;
6027 LASSERT(mdt_lu_site(mdt) == NULL);
6028 mdt->mdt_lu_dev.ld_site = site;
6029 site->ls_top_dev = &mdt->mdt_lu_dev;
6030 mdt->mdt_child = lu2md_dev(mdt->mdt_child_exp->exp_obd->obd_lu_dev);
6032 /* now connect to bottom OSD */
6033 snprintf(name, MAX_OBD_NAME, "%s-osd", dev);
6034 rc = mdt_connect_to_next(env, mdt, name, &mdt->mdt_bottom_exp);
6036 GOTO(class_detach, rc);
6038 lu2dt_dev(mdt->mdt_bottom_exp->exp_obd->obd_lu_dev);
6040 rc = lu_env_refill((struct lu_env *)env);
6042 CERROR("Failure to refill session: '%d'\n", rc);
6044 lu_dev_add_linkage(site, &mdt->mdt_lu_dev);
6049 class_detach(obd, lcfg);
6051 OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens));
6053 class_put_profile(lprof);
6057 OBD_FREE(name, name_size);
6058 OBD_FREE(uuid, uuid_size);
6062 /* setup quota master target on MDT0 */
6063 static int mdt_quota_init(const struct lu_env *env, struct mdt_device *mdt,
6064 struct lustre_cfg *cfg)
6066 struct obd_device *obd;
6067 char *dev = lustre_cfg_string(cfg, 0);
6068 char *qmtname, *uuid, *p;
6069 struct lustre_cfg_bufs *bufs;
6070 struct lustre_cfg *lcfg;
6071 struct lustre_profile *lprof;
6072 struct obd_connect_data *data;
6077 LASSERT(mdt->mdt_qmt_exp == NULL);
6078 LASSERT(mdt->mdt_qmt_dev == NULL);
6080 /* quota master is on MDT0 only for now */
6081 if (mdt->mdt_seq_site.ss_node_id != 0)
6084 /* MGS generates config commands which look as follows:
6085 * #01 (160)setup 0:lustre-MDT0000 1:lustre-MDT0000_UUID 2:0
6086 * 3:lustre-MDT0000-mdtlov 4:f
6088 * We generate the QMT name from the MDT one, just replacing MD with QM
6089 * after all the preparations, the logical equivalent will be:
6090 * #01 (160)setup 0:lustre-QMT0000 1:lustre-QMT0000_UUID 2:0
6091 * 3:lustre-MDT0000-osd 4:f
6093 OBD_ALLOC(qmtname, MAX_OBD_NAME);
6094 OBD_ALLOC(uuid, UUID_MAX);
6095 OBD_ALLOC_PTR(bufs);
6096 OBD_ALLOC_PTR(data);
6097 if (qmtname == NULL || uuid == NULL || bufs == NULL || data == NULL)
6098 GOTO(cleanup_mem, rc = -ENOMEM);
6100 strcpy(qmtname, dev);
6101 p = strstr(qmtname, "-MDT");
6103 GOTO(cleanup_mem, rc = -ENOMEM);
6104 /* replace MD with QM */
6108 snprintf(uuid, UUID_MAX, "%s_UUID", qmtname);
6110 lprof = class_get_profile(lustre_cfg_string(cfg, 0));
6111 if (lprof == NULL || lprof->lp_dt == NULL) {
6112 CERROR("can't find profile for %s\n",
6113 lustre_cfg_string(cfg, 0));
6114 GOTO(cleanup_mem, rc = -EINVAL);
6117 lustre_cfg_bufs_reset(bufs, qmtname);
6118 lustre_cfg_bufs_set_string(bufs, 1, LUSTRE_QMT_NAME);
6119 lustre_cfg_bufs_set_string(bufs, 2, uuid);
6120 lustre_cfg_bufs_set_string(bufs, 3, lprof->lp_dt);
6122 OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen));
6124 GOTO(put_profile, rc = -ENOMEM);
6125 lustre_cfg_init(lcfg, LCFG_ATTACH, bufs);
6127 rc = class_attach(lcfg);
6129 GOTO(lcfg_cleanup, rc);
6131 obd = class_name2obd(qmtname);
6133 CERROR("Can not find obd %s (%s in config)\n", qmtname,
6134 lustre_cfg_string(cfg, 0));
6135 GOTO(lcfg_cleanup, rc = -EINVAL);
6138 OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens));
6140 lustre_cfg_bufs_reset(bufs, qmtname);
6141 lustre_cfg_bufs_set_string(bufs, 1, uuid);
6142 lustre_cfg_bufs_set_string(bufs, 2, dev);
6144 /* for quota, the next device should be the OSD device */
6145 lustre_cfg_bufs_set_string(bufs, 3,
6146 mdt->mdt_bottom->dd_lu_dev.ld_obd->obd_name);
6148 OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen));
6150 GOTO(class_detach, rc = -ENOMEM);
6151 lustre_cfg_init(lcfg, LCFG_SETUP, bufs);
6153 rc = class_setup(obd, lcfg);
6155 GOTO(class_detach, rc);
6157 mdt->mdt_qmt_dev = obd->obd_lu_dev;
6159 /* configure local quota objects */
6160 if (CFS_FAIL_CHECK(OBD_FAIL_QUOTA_INIT))
6163 rc = mdt->mdt_qmt_dev->ld_ops->ldo_prepare(env,
6167 GOTO(class_cleanup, rc);
6169 /* connect to quota master target */
6170 data->ocd_connect_flags = OBD_CONNECT_VERSION;
6171 data->ocd_version = LUSTRE_VERSION_CODE;
6172 rc = obd_connect(NULL, &mdt->mdt_qmt_exp, obd, &obd->obd_uuid,
6175 CERROR("cannot connect to quota master device %s (%d)\n",
6177 GOTO(class_cleanup, rc);
6183 class_manual_cleanup(obd);
6184 mdt->mdt_qmt_dev = NULL;
6185 GOTO(lcfg_cleanup, rc);
6189 class_detach(obd, lcfg);
6191 OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens));
6193 class_put_profile(lprof);
6196 OBD_FREE(qmtname, MAX_OBD_NAME);
6197 OBD_FREE(uuid, UUID_MAX);
6202 /* Shutdown quota master target associated with mdt */
6203 static void mdt_quota_fini(const struct lu_env *env, struct mdt_device *mdt)
6207 if (mdt->mdt_qmt_exp == NULL)
6209 LASSERT(mdt->mdt_qmt_dev != NULL);
6211 /* the qmt automatically shuts down when the mdt disconnects */
6212 obd_disconnect(mdt->mdt_qmt_exp);
6213 mdt->mdt_qmt_exp = NULL;
6214 mdt->mdt_qmt_dev = NULL;
6218 /* mdt_getxattr() is used from mdt_intent_getxattr(), use this wrapper
6219 * for now. This will be removed along with converting rest of MDT code
6220 * to use tgt_session_info
6222 static int mdt_tgt_getxattr(struct tgt_session_info *tsi)
6224 struct mdt_thread_info *info = tsi2mdt_info(tsi);
6227 if (unlikely(info->mti_object == NULL))
6230 rc = mdt_getxattr(info);
6232 mdt_thread_info_fini(info);
6236 static int mdt_llog_open(struct tgt_session_info *tsi)
6240 if (!mdt_changelog_allow(tsi2mdt_info(tsi)))
6241 RETURN(err_serious(-EACCES));
6243 RETURN(tgt_llog_open(tsi));
6246 #define OBD_FAIL_OST_READ_NET OBD_FAIL_OST_BRW_NET
6247 #define OBD_FAIL_OST_WRITE_NET OBD_FAIL_OST_BRW_NET
6248 #define OST_BRW_READ OST_READ
6249 #define OST_BRW_WRITE OST_WRITE
6251 static struct tgt_handler mdt_tgt_handlers[] = {
6252 TGT_RPC_HANDLER(MDS_FIRST_OPC,
6253 0, MDS_CONNECT, mdt_tgt_connect,
6254 &RQF_CONNECT, LUSTRE_OBD_VERSION),
6255 TGT_RPC_HANDLER(MDS_FIRST_OPC,
6256 0, MDS_DISCONNECT, tgt_disconnect,
6257 &RQF_MDS_DISCONNECT, LUSTRE_OBD_VERSION),
6258 TGT_RPC_HANDLER(MDS_FIRST_OPC,
6259 HAS_REPLY, MDS_SET_INFO, mdt_set_info,
6260 &RQF_MDT_SET_INFO, LUSTRE_MDS_VERSION),
6261 TGT_MDT_HDL(0, MDS_GET_INFO, mdt_get_info),
6262 TGT_MDT_HDL(HAS_REPLY, MDS_GET_ROOT, mdt_get_root),
6263 TGT_MDT_HDL(HAS_BODY, MDS_GETATTR, mdt_getattr),
6264 TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_GETATTR_NAME,
6266 TGT_MDT_HDL(HAS_BODY, MDS_GETXATTR, mdt_tgt_getxattr),
6267 TGT_MDT_HDL(HAS_REPLY, MDS_STATFS, mdt_statfs),
6268 TGT_MDT_HDL(IS_MUTABLE, MDS_REINT, mdt_reint),
6269 TGT_MDT_HDL(HAS_BODY, MDS_CLOSE, mdt_close),
6270 TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_READPAGE, mdt_readpage),
6271 TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_SYNC, mdt_sync),
6272 TGT_MDT_HDL(0, MDS_QUOTACTL, mdt_quotactl),
6273 TGT_MDT_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, MDS_HSM_PROGRESS,
6275 TGT_MDT_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, MDS_HSM_CT_REGISTER,
6276 mdt_hsm_ct_register),
6277 TGT_MDT_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, MDS_HSM_CT_UNREGISTER,
6278 mdt_hsm_ct_unregister),
6279 TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_HSM_STATE_GET,
6281 TGT_MDT_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, MDS_HSM_STATE_SET,
6283 TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_HSM_ACTION, mdt_hsm_action),
6284 TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_HSM_REQUEST,
6286 TGT_MDT_HDL(HAS_KEY | HAS_BODY | HAS_REPLY | IS_MUTABLE,
6289 TGT_MDT_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, MDS_HSM_DATA_VERSION,
6290 mdt_hsm_data_version),
6291 TGT_MDT_HDL(IS_MUTABLE, MDS_RMFID, mdt_rmfid),
6292 TGT_MDT_HDL(IS_MUTABLE, MDS_BATCH, mdt_batch),
6295 static struct tgt_handler mdt_io_ops[] = {
6296 TGT_OST_HDL_HP(HAS_BODY | HAS_REPLY, OST_BRW_READ, tgt_brw_read,
6298 TGT_OST_HDL_HP(HAS_BODY | IS_MUTABLE, OST_BRW_WRITE, tgt_brw_write,
6300 TGT_OST_HDL_HP(HAS_BODY | HAS_REPLY | IS_MUTABLE,
6301 OST_PUNCH, mdt_punch_hdl,
6303 TGT_OST_HDL(HAS_BODY | HAS_REPLY, OST_SYNC, mdt_data_sync),
6304 TGT_OST_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, OST_FALLOCATE,
6306 TGT_OST_HDL(HAS_BODY | HAS_REPLY, OST_SEEK, tgt_lseek),
6307 TGT_RPC_HANDLER(OST_FIRST_OPC,
6308 0, OST_SET_INFO, mdt_io_set_info,
6309 &RQF_OBD_SET_INFO, LUSTRE_OST_VERSION),
6312 static struct tgt_handler mdt_sec_ctx_ops[] = {
6313 TGT_SEC_HDL_VAR(0, SEC_CTX_INIT, mdt_sec_ctx_handle),
6314 TGT_SEC_HDL_VAR(0, SEC_CTX_INIT_CONT, mdt_sec_ctx_handle),
6315 TGT_SEC_HDL_VAR(0, SEC_CTX_FINI, mdt_sec_ctx_handle)
6318 static struct tgt_handler mdt_quota_ops[] = {
6319 TGT_QUOTA_HDL(HAS_REPLY, QUOTA_DQACQ, mdt_quota_dqacq),
6322 static struct tgt_handler mdt_llog_handlers[] = {
6323 TGT_LLOG_HDL(0, LLOG_ORIGIN_HANDLE_CREATE, mdt_llog_open),
6324 TGT_LLOG_HDL(0, LLOG_ORIGIN_HANDLE_NEXT_BLOCK, tgt_llog_next_block),
6325 TGT_LLOG_HDL(0, LLOG_ORIGIN_HANDLE_READ_HEADER, tgt_llog_read_header),
6326 TGT_LLOG_HDL(0, LLOG_ORIGIN_HANDLE_PREV_BLOCK, tgt_llog_prev_block),
6329 static struct tgt_opc_slice mdt_common_slice[] = {
6331 .tos_opc_start = MDS_FIRST_OPC,
6332 .tos_opc_end = MDS_LAST_OPC,
6333 .tos_hs = mdt_tgt_handlers
6336 .tos_opc_start = OBD_FIRST_OPC,
6337 .tos_opc_end = OBD_LAST_OPC,
6338 .tos_hs = tgt_obd_handlers
6341 .tos_opc_start = LDLM_FIRST_OPC,
6342 .tos_opc_end = LDLM_LAST_OPC,
6343 .tos_hs = tgt_dlm_handlers
6346 .tos_opc_start = SEC_FIRST_OPC,
6347 .tos_opc_end = SEC_LAST_OPC,
6348 .tos_hs = mdt_sec_ctx_ops
6351 .tos_opc_start = OUT_UPDATE_FIRST_OPC,
6352 .tos_opc_end = OUT_UPDATE_LAST_OPC,
6353 .tos_hs = tgt_out_handlers
6356 .tos_opc_start = FLD_FIRST_OPC,
6357 .tos_opc_end = FLD_LAST_OPC,
6358 .tos_hs = fld_handlers
6361 .tos_opc_start = SEQ_FIRST_OPC,
6362 .tos_opc_end = SEQ_LAST_OPC,
6363 .tos_hs = seq_handlers
6366 .tos_opc_start = QUOTA_DQACQ,
6367 .tos_opc_end = QUOTA_LAST_OPC,
6368 .tos_hs = mdt_quota_ops
6371 .tos_opc_start = LLOG_FIRST_OPC,
6372 .tos_opc_end = LLOG_LAST_OPC,
6373 .tos_hs = mdt_llog_handlers
6376 .tos_opc_start = LFSCK_FIRST_OPC,
6377 .tos_opc_end = LFSCK_LAST_OPC,
6378 .tos_hs = tgt_lfsck_handlers
6381 .tos_opc_start = OST_FIRST_OPC,
6382 .tos_opc_end = OST_LAST_OPC,
6383 .tos_hs = mdt_io_ops
6390 static void mdt_fini(const struct lu_env *env, struct mdt_device *m)
6392 struct md_device *next = m->mdt_child;
6393 struct lu_device *d = &m->mdt_lu_dev;
6394 struct obd_device *obd = mdt2obd_dev(m);
6395 struct lfsck_stop stop;
6398 stop.ls_status = LS_PAUSED;
6400 next->md_ops->mdo_iocontrol(env, next, OBD_IOC_STOP_LFSCK, 0, &stop);
6402 mdt_stack_pre_fini(env, m, md2lu_dev(m->mdt_child));
6404 mdt_restriper_stop(m);
6405 ping_evictor_stop();
6407 /* Remove the HSM /proc entry so the coordinator cannot be
6408 * restarted by a user while it's shutting down.
6410 mdt_hsm_cdt_stop(m);
6412 mdt_llog_ctxt_unclone(env, m, LLOG_AGENT_ORIG_CTXT);
6413 mdt_llog_ctxt_unclone(env, m, LLOG_CHANGELOG_ORIG_CTXT);
6414 target_recovery_fini(obd);
6415 if (m->mdt_namespace != NULL)
6416 ldlm_namespace_free_prior(m->mdt_namespace, NULL,
6417 d->ld_obd->obd_force);
6418 mdt_quota_fini(env, m);
6420 obd_exports_barrier(obd);
6421 obd_zombie_barrier();
6423 cfs_free_nidlist(&m->mdt_squash.rsi_nosquash_nids);
6425 /* Calling the cleanup functions in the same order as in the mdt_init0
6428 mdt_tunables_fini(m);
6429 upcall_cache_cleanup(m->mdt_identity_cache);
6430 m->mdt_identity_cache = NULL;
6431 upcall_cache_cleanup(m->mdt_identity_cache_int);
6432 m->mdt_identity_cache_int = NULL;
6434 tgt_fini(env, &m->mdt_lut);
6436 mdt_hsm_cdt_fini(m);
6438 if (m->mdt_los != NULL) {
6439 local_oid_storage_fini(env, m->mdt_los);
6443 if (m->mdt_namespace != NULL) {
6444 ldlm_namespace_free_post(m->mdt_namespace);
6445 d->ld_obd->obd_namespace = m->mdt_namespace = NULL;
6448 if (m->mdt_md_root != NULL) {
6449 mdt_object_put(env, m->mdt_md_root);
6450 m->mdt_md_root = NULL;
6453 mdt_seq_fini(env, m);
6455 mdt_fld_fini(env, m);
6460 mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
6462 LASSERT(atomic_read(&d->ld_ref) == 0);
6464 server_put_mount(mdt_obd_name(m), true);
6469 static int mdt_postrecov(const struct lu_env *, struct mdt_device *);
6471 static int mdt_init0(const struct lu_env *env, struct mdt_device *m,
6472 struct lu_device_type *ldt, struct lustre_cfg *cfg)
6474 const struct dt_device_param *dt_conf;
6475 struct mdt_thread_info *info;
6476 struct obd_device *obd;
6477 const char *dev = lustre_cfg_string(cfg, 0);
6478 const char *num = lustre_cfg_string(cfg, 2);
6479 struct tg_grants_data *tgd = &m->mdt_lut.lut_tgd;
6480 struct lustre_mount_info *lmi = NULL;
6481 struct lustre_sb_info *lsi;
6483 struct seq_server_site *ss_site;
6484 const char *identity_upcall = "NONE";
6485 char cache_internal[NAME_MAX + 1] = { 0 };
6486 struct md_device *next;
6494 lu_device_init(&m->mdt_lu_dev, ldt);
6496 * Environment (env) might be missing mdt_thread_key values at that
6497 * point, if device is allocated when mdt_thread_key is in QUIESCENT
6500 * Usually device allocation path doesn't use module key values, but
6501 * mdt has to do a lot of work here, so allocate key value.
6503 rc = lu_env_refill((struct lu_env *)env);
6507 info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
6508 LASSERT(info != NULL);
6510 obd = class_name2obd(dev);
6511 LASSERT(obd != NULL);
6513 m->mdt_max_mdsize = MAX_MD_SIZE_OLD;
6514 m->mdt_evict_tgt_nids = 1;
6515 m->mdt_opts.mo_cos = MDT_COS_DEFAULT;
6517 lmi = server_get_mount(dev);
6519 CERROR("Cannot get mount info for %s!\n", dev);
6522 lsi = s2lsi(lmi->lmi_sb);
6523 LASSERT(lsi->lsi_lmd);
6524 /* CMD is supported only in IAM mode */
6526 rc = kstrtol(num, 10, &node_id);
6531 if (test_bit(LMD_FLG_SKIP_LFSCK, lsi->lsi_lmd->lmd_flags))
6532 m->mdt_skip_lfsck = 1;
6533 if (test_bit(LMD_FLG_NO_CREATE, lsi->lsi_lmd->lmd_flags))
6534 m->mdt_lut.lut_no_create = 1;
6537 /* Just try to get a DoM lock by default. Otherwise, having a group
6538 * lock granted, it may get blocked for a long time.
6540 m->mdt_opts.mo_dom_lock = TRYLOCK_DOM_ON_OPEN;
6541 /* DoM files are read at open and data is packed in the reply */
6542 m->mdt_dom_read_open = 1;
6544 m->mdt_squash.rsi_uid = 0;
6545 m->mdt_squash.rsi_gid = 0;
6546 INIT_LIST_HEAD(&m->mdt_squash.rsi_nosquash_nids);
6547 spin_lock_init(&m->mdt_squash.rsi_lock);
6548 spin_lock_init(&m->mdt_lock);
6549 m->mdt_enable_chprojid_gid = 0;
6550 m->mdt_enable_dir_migration = 1;
6551 m->mdt_enable_dir_restripe = 0;
6552 m->mdt_enable_dir_auto_split = 0;
6553 m->mdt_enable_parallel_rename_dir = 1;
6554 m->mdt_enable_parallel_rename_file = 1;
6555 m->mdt_enable_parallel_rename_crossdir = 1;
6556 m->mdt_enable_remote_dir = 1;
6557 m->mdt_enable_remote_dir_gid = 0;
6558 m->mdt_enable_pin_gid = 0;
6559 m->mdt_enable_remote_rename = 1;
6560 m->mdt_enable_rename_trylock = 1;
6561 m->mdt_enable_striped_dir = 1;
6562 m->mdt_enable_dmv_implicit_inherit = 1;
6563 m->mdt_dir_restripe_nsonly = 1;
6564 m->mdt_max_mod_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
6566 atomic_set(&m->mdt_mds_mds_conns, 0);
6567 atomic_set(&m->mdt_async_commit_count, 0);
6568 atomic_set(&m->mdt_dmv_old_client_count, 0);
6570 m->mdt_lu_dev.ld_ops = &mdt_lu_ops;
6571 m->mdt_lu_dev.ld_obd = obd;
6572 /* Set this lu_device to obd for error handling purposes. */
6573 obd->obd_lu_dev = &m->mdt_lu_dev;
6575 strncpy(m->mdt_job_xattr, XATTR_NAME_JOB_DEFAULT, XATTR_JOB_MAX_LEN);
6577 /* init the stack */
6578 rc = mdt_stack_init((struct lu_env *)env, m, cfg);
6580 CERROR("%s: Can't init device stack, rc %d\n",
6581 mdt_obd_name(m), rc);
6586 ss_site = mdt_seq_site(m);
6587 s->ld_seq_site = ss_site;
6590 /* set server index */
6591 ss_site->ss_node_id = node_id;
6593 /* failover is the default
6594 * FIXME: we do not failout mds0/mgs, which may cause some problems.
6595 * assumed whose ss_node_id == 0 XXX
6597 obd->obd_replayable = 1;
6598 /* No connection accepted until configurations will finish */
6599 obd->obd_no_conn = 1;
6601 if (cfg->lcfg_bufcount > 4 && LUSTRE_CFG_BUFLEN(cfg, 4) > 0) {
6602 char *str = lustre_cfg_string(cfg, 4);
6604 if (strchr(str, 'n')) {
6605 CWARN("%s: recovery disabled\n", mdt_obd_name(m));
6606 obd->obd_replayable = 0;
6610 rc = mdt_fld_init(env, mdt_obd_name(m), m);
6612 GOTO(err_fini_stack, rc);
6614 rc = mdt_seq_init(env, m);
6616 GOTO(err_fini_fld, rc);
6618 snprintf(info->mti_u.ns_name, sizeof(info->mti_u.ns_name), "%s-%s",
6619 LUSTRE_MDT_NAME, obd->obd_uuid.uuid);
6620 m->mdt_namespace = ldlm_namespace_new(obd, info->mti_u.ns_name,
6621 LDLM_NAMESPACE_SERVER,
6622 LDLM_NAMESPACE_GREEDY,
6624 if (IS_ERR(m->mdt_namespace)) {
6625 rc = PTR_ERR(m->mdt_namespace);
6626 CERROR("%s: unable to create server namespace: rc = %d\n",
6628 m->mdt_namespace = NULL;
6629 GOTO(err_fini_seq, rc);
6632 m->mdt_namespace->ns_lvbp = m;
6633 m->mdt_namespace->ns_lvbo = &mdt_lvbo;
6635 ldlm_register_intent(m->mdt_namespace, mdt_intent_policy);
6636 /* set obd_namespace for compatibility with old code */
6637 obd->obd_namespace = m->mdt_namespace;
6639 rc = tgt_init(env, &m->mdt_lut, obd, m->mdt_bottom, mdt_common_slice,
6640 OBD_FAIL_MDS_ALL_REQUEST_NET,
6641 OBD_FAIL_MDS_ALL_REPLY_NET);
6643 GOTO(err_free_ns, rc);
6645 /* Amount of available space excluded from granting and reserved
6646 * for metadata. It is a percentage of the total MDT size.
6648 tgd->tgd_reserved_pcnt = 10;
6650 if (ONE_MB_BRW_SIZE < (1U << tgd->tgd_blockbits))
6651 m->mdt_brw_size = 1U << tgd->tgd_blockbits;
6653 m->mdt_brw_size = ONE_MB_BRW_SIZE;
6655 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_FS_SETUP))
6656 GOTO(err_tgt, rc = -ENOENT);
6658 fid.f_seq = FID_SEQ_LOCAL_NAME;
6661 rc = local_oid_storage_init(env, m->mdt_bottom, &fid, &m->mdt_los);
6665 rc = mdt_hsm_cdt_init(m);
6667 CERROR("%s: error initializing coordinator, rc %d\n",
6668 mdt_obd_name(m), rc);
6669 GOTO(err_los_fini, rc);
6672 tgt_adapt_sptlrpc_conf(&m->mdt_lut);
6674 next = m->mdt_child;
6675 dt_conf = next->md_ops->mdo_dtconf_get(env, next);
6677 mntopts = dt_conf->ddp_mntopts;
6679 if (mntopts & MNTOPT_USERXATTR)
6680 m->mdt_opts.mo_user_xattr = 1;
6682 m->mdt_opts.mo_user_xattr = 0;
6684 m->mdt_max_ea_size = dt_conf->ddp_max_ea_size;
6686 if (mntopts & MNTOPT_ACL)
6687 m->mdt_opts.mo_acl = 1;
6689 m->mdt_opts.mo_acl = 0;
6691 m->mdt_enable_strict_som = 1;
6693 /* XXX: to support suppgid for ACL, we enable identity_upcall
6694 * by default, otherwise, maybe got unexpected -EACCESS.
6696 if (m->mdt_opts.mo_acl)
6697 identity_upcall = MDT_IDENTITY_UPCALL_PATH;
6698 m->mdt_identity_cache = upcall_cache_init(mdt_obd_name(m),
6700 UC_IDCACHE_HASH_SIZE,
6701 1200, /* entry expire: 20 mn */
6702 30, /* acquire expire: 30 s */
6703 true, /* acquire can replay */
6704 &mdt_identity_upcall_cache_ops);
6705 if (IS_ERR(m->mdt_identity_cache)) {
6706 rc = PTR_ERR(m->mdt_identity_cache);
6707 m->mdt_identity_cache = NULL;
6708 GOTO(err_free_hsm, rc);
6711 snprintf(cache_internal, sizeof(cache_internal), "%s_int",
6713 m->mdt_identity_cache_int = upcall_cache_init(cache_internal,
6714 IDENTITY_UPCALL_INTERNAL,
6715 UC_IDCACHE_HASH_SIZE,
6716 1200, /* entry expire: 20 mn */
6717 30, /* acquire expire: 30 s */
6718 true, /* acquire can replay */
6719 &mdt_identity_upcall_cache_ops);
6720 if (IS_ERR(m->mdt_identity_cache_int)) {
6721 rc = PTR_ERR(m->mdt_identity_cache_int);
6722 m->mdt_identity_cache_int = NULL;
6723 GOTO(err_cache, rc);
6726 rc = mdt_tunables_init(m, dev);
6728 CERROR("Can't init MDT lprocfs, rc %d\n", rc);
6729 GOTO(err_recovery, rc);
6732 rc = mdt_quota_init(env, m, cfg);
6734 GOTO(err_procfs, rc);
6736 m->mdt_ldlm_client = &mdt2obd_dev(m)->obd_ldlm_client;
6737 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
6738 "mdt_ldlm_client", m->mdt_ldlm_client);
6740 ping_evictor_start();
6742 /* recovery will be started upon mdt_prepare() when the whole stack is
6743 * complete and ready to serve the requests
6746 /* Reduce the initial timeout on an MDS because it doesn't need such
6747 * a long timeout as an OST does. Adaptive timeouts will adjust this
6748 * value appropriately.
6750 if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT)
6751 ldlm_timeout = MDS_LDLM_TIMEOUT_DEFAULT;
6753 if (test_bit(LMD_FLG_LOCAL_RECOV, lsi->lsi_lmd->lmd_flags))
6754 m->mdt_lut.lut_local_recovery = 1;
6756 rc = mdt_restriper_start(m);
6758 GOTO(err_ping_evictor, rc);
6763 ping_evictor_stop();
6765 mdt_tunables_fini(m);
6767 upcall_cache_cleanup(m->mdt_identity_cache_int);
6768 m->mdt_identity_cache_int = NULL;
6770 upcall_cache_cleanup(m->mdt_identity_cache);
6771 m->mdt_identity_cache = NULL;
6773 mdt_hsm_cdt_fini(m);
6775 local_oid_storage_fini(env, m->mdt_los);
6778 /* keep recoverable clients */
6780 target_recovery_fini(obd);
6781 obd_exports_barrier(obd);
6782 obd_zombie_barrier();
6783 tgt_fini(env, &m->mdt_lut);
6785 ldlm_namespace_free(m->mdt_namespace, NULL, 0);
6786 obd->obd_namespace = m->mdt_namespace = NULL;
6788 mdt_seq_fini(env, m);
6790 mdt_fld_fini(env, m);
6792 mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
6795 server_put_mount(dev, true);
6799 /* For interoperability, the left element is old parameter, the right one
6800 * is the new version of the parameter, if some parameter is deprecated,
6801 * the new version should be set as NULL.
6803 static struct cfg_interop_param mdt_interop_param[] = {
6804 { "mdt.group_upcall", NULL },
6805 { "mdt.quota_type", NULL },
6806 { "mdd.quota_type", NULL },
6807 { "mdt.som", NULL },
6808 { "mdt.rootsquash", "mdt.root_squash" },
6809 { "mdt.nosquash_nid", "mdt.nosquash_nids" },
6813 /* used by MGS to process specific configurations */
6814 static int mdt_process_config(const struct lu_env *env,
6815 struct lu_device *d, struct lustre_cfg *cfg)
6817 struct mdt_device *m = mdt_dev(d);
6818 struct md_device *md_next = m->mdt_child;
6819 struct lu_device *next = md2lu_dev(md_next);
6824 switch (cfg->lcfg_command) {
6826 struct obd_device *obd = d->ld_obd;
6827 /* For interoperability */
6828 struct cfg_interop_param *ptr = NULL;
6829 struct lustre_cfg *old_cfg = NULL;
6833 param = lustre_cfg_string(cfg, 1);
6834 if (param == NULL) {
6835 CERROR("param is empty\n");
6840 ptr = class_find_old_param(param, mdt_interop_param);
6842 if (ptr->new_param == NULL) {
6844 CWARN("For interoperability, skip this %s. It is obsolete.\n",
6849 CWARN("Found old param %s, changed it to %s.\n",
6850 ptr->old_param, ptr->new_param);
6853 cfg = lustre_cfg_rename(old_cfg, ptr->new_param);
6860 count = class_modify_config(cfg, PARAM_MDT,
6861 &obd->obd_kset.kobj);
6863 struct coordinator *cdt = &m->mdt_coordinator;
6865 /* is it an HSM var ? */
6866 count = class_modify_config(cfg, PARAM_HSM,
6867 &cdt->cdt_hsm_kobj);
6869 /* we don't understand; pass it on */
6870 rc = next->ld_ops->ldo_process_config(env, next,
6873 rc = count > 0 ? 0 : count;
6875 rc = count > 0 ? 0 : count;
6879 OBD_FREE(cfg, lustre_cfg_len(cfg->lcfg_bufcount,
6880 cfg->lcfg_buflens));
6884 /* others are passed further */
6885 rc = next->ld_ops->ldo_process_config(env, next, cfg);
6891 static struct lu_object *mdt_object_alloc(const struct lu_env *env,
6892 const struct lu_object_header *hdr,
6893 struct lu_device *d)
6895 struct mdt_object *mo;
6899 OBD_SLAB_ALLOC_PTR_GFP(mo, mdt_object_kmem, GFP_NOFS);
6901 struct lu_object *o;
6902 struct lu_object_header *h;
6905 h = &mo->mot_header;
6906 lu_object_header_init(h);
6907 lu_object_init(o, h, d);
6908 lu_object_add_top(h, o);
6909 o->lo_ops = &mdt_obj_ops;
6910 spin_lock_init(&mo->mot_write_lock);
6911 mutex_init(&mo->mot_som_mutex);
6912 mutex_init(&mo->mot_lov_mutex);
6913 init_rwsem(&mo->mot_dom_sem);
6914 init_rwsem(&mo->mot_open_sem);
6915 atomic_set(&mo->mot_open_count, 0);
6916 mo->mot_restripe_offset = 0;
6917 INIT_LIST_HEAD(&mo->mot_restripe_linkage);
6918 mo->mot_lsom_size = 0;
6919 mo->mot_lsom_blocks = 0;
6920 mo->mot_lsom_inited = false;
6921 mo->mot_discard_done = false;
6927 static int mdt_object_init(const struct lu_env *env, struct lu_object *o,
6928 const struct lu_object_conf *unused)
6930 struct mdt_device *d = mdt_dev(o->lo_dev);
6931 struct lu_device *under;
6932 struct lu_object *below;
6937 CDEBUG(D_INFO, "object init, fid = "DFID"\n",
6938 PFID(lu_object_fid(o)));
6940 under = &d->mdt_child->md_lu_dev;
6941 below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
6943 lu_object_add(o, below);
6950 static void mdt_object_free_rcu(struct rcu_head *head)
6952 struct mdt_object *mo = container_of(head, struct mdt_object,
6953 mot_header.loh_rcu);
6955 kmem_cache_free(mdt_object_kmem, mo);
6958 static void mdt_object_free(const struct lu_env *env, struct lu_object *o)
6960 struct mdt_object *mo = mdt_obj(o);
6961 struct lu_object_header *h;
6966 CDEBUG(D_INFO, "object free, fid = "DFID"\n",
6967 PFID(lu_object_fid(o)));
6969 LASSERT(atomic_read(&mo->mot_open_count) == 0);
6970 LASSERT(atomic_read(&mo->mot_lease_count) == 0);
6973 lu_object_header_fini(h);
6974 OBD_FREE_PRE(mo, sizeof(*mo), "slab-freed");
6975 call_rcu(&mo->mot_header.loh_rcu, mdt_object_free_rcu);
6980 static int mdt_object_print(const struct lu_env *env, void *cookie,
6981 lu_printer_t p, const struct lu_object *o)
6983 struct mdt_object *mdto = mdt_obj((struct lu_object *)o);
6985 return (*p)(env, cookie,
6986 LUSTRE_MDT_NAME"-object@%p(%s %s, writecount=%d)",
6987 mdto, mdto->mot_lov_created ? "lov_created" : "",
6988 mdto->mot_cache_attr ? "cache_attr" : "",
6989 mdto->mot_write_count);
6992 static int mdt_prepare(const struct lu_env *env,
6993 struct lu_device *pdev,
6994 struct lu_device *cdev)
6996 struct mdt_device *mdt = mdt_dev(cdev);
6997 struct lu_device *next = &mdt->mdt_child->md_lu_dev;
6998 struct obd_device *obd = cdev->ld_obd;
7005 rc = next->ld_ops->ldo_prepare(env, cdev, next);
7009 rc = mdt_llog_ctxt_clone(env, mdt, LLOG_CHANGELOG_ORIG_CTXT);
7013 rc = mdt_llog_ctxt_clone(env, mdt, LLOG_AGENT_ORIG_CTXT);
7017 rc = lfsck_register_namespace(env, mdt->mdt_bottom, mdt->mdt_namespace);
7018 /* The LFSCK instance is registered just now, so it must be there when
7019 * register the namespace to such instance.
7021 LASSERTF(rc == 0, "register namespace failed: rc = %d\n", rc);
7023 if (mdt->mdt_seq_site.ss_node_id == 0) {
7024 rc = mdt->mdt_child->md_ops->mdo_root_get(env, mdt->mdt_child,
7025 &mdt->mdt_md_root_fid);
7030 LASSERT(!test_bit(MDT_FL_CFGLOG, &mdt->mdt_state));
7032 target_recovery_init(&mdt->mdt_lut, tgt_request_handle);
7033 set_bit(MDT_FL_CFGLOG, &mdt->mdt_state);
7034 LASSERT(obd->obd_no_conn);
7035 spin_lock(&obd->obd_dev_lock);
7036 obd->obd_no_conn = 0;
7037 spin_unlock(&obd->obd_dev_lock);
7039 if (!test_bit(OBDF_RECOVERING, obd->obd_flags))
7040 mdt_postrecov(env, mdt);
7045 const struct lu_device_operations mdt_lu_ops = {
7046 .ldo_object_alloc = mdt_object_alloc,
7047 .ldo_process_config = mdt_process_config,
7048 .ldo_prepare = mdt_prepare,
7051 static const struct lu_object_operations mdt_obj_ops = {
7052 .loo_object_init = mdt_object_init,
7053 .loo_object_free = mdt_object_free,
7054 .loo_object_print = mdt_object_print
7057 static int mdt_obd_set_info_async(const struct lu_env *env,
7058 struct obd_export *exp,
7059 __u32 keylen, void *key,
7060 __u32 vallen, void *val,
7061 struct ptlrpc_request_set *set)
7067 if (KEY_IS(KEY_SPTLRPC_CONF)) {
7068 rc = tgt_adapt_sptlrpc_conf(class_exp2tgt(exp));
7075 static inline void mdt_enable_slc(struct mdt_device *mdt)
7077 if (mdt->mdt_lut.lut_sync_lock_cancel == SYNC_LOCK_CANCEL_NEVER)
7078 mdt->mdt_lut.lut_sync_lock_cancel = SYNC_LOCK_CANCEL_BLOCKING;
7081 static inline void mdt_disable_slc(struct mdt_device *mdt)
7083 if (mdt->mdt_lut.lut_sync_lock_cancel == SYNC_LOCK_CANCEL_BLOCKING)
7084 mdt->mdt_lut.lut_sync_lock_cancel = SYNC_LOCK_CANCEL_NEVER;
7088 * Match client and server connection feature flags.
7090 * Compute the compatibility flags for a connection request based on
7091 * features mutually supported by client and server.
7093 * The obd_export::exp_connect_data.ocd_connect_flags field in \a exp
7094 * must not be updated here, otherwise a partially initialized value may
7095 * be exposed. After the connection request is successfully processed,
7096 * the top-level MDT connect request handler atomically updates the export
7097 * connect flags from the obd_connect_data::ocd_connect_flags field of the
7098 * reply. \see mdt_connect().
7100 * Before 2.7.50 clients will send a struct obd_connect_data_v1 rather than a
7101 * full struct obd_connect_data. So care must be taken when accessing fields
7102 * that are not present in struct obd_connect_data_v1. See LU-16.
7104 * \param exp the obd_export associated with this client/target pair
7105 * \param mdt the target device for the connection
7106 * \param data stores data for this connect request
7109 * \retval -EPROTO \a data unexpectedly has zero obd_connect_data::ocd_brw_size
7110 * \retval -EBADE client and server feature requirements are incompatible
7112 static int mdt_connect_internal(const struct lu_env *env,
7113 struct obd_export *exp,
7114 struct mdt_device *mdt,
7115 struct obd_connect_data *data, bool reconnect)
7117 const char *obd_name = mdt_obd_name(mdt);
7119 LASSERT(data != NULL);
7121 data->ocd_connect_flags &= MDT_CONNECT_SUPPORTED;
7123 if (mdt->mdt_bottom->dd_rdonly &&
7124 !(data->ocd_connect_flags & OBD_CONNECT_MDS_MDS) &&
7125 !(data->ocd_connect_flags & OBD_CONNECT_RDONLY))
7128 if (data->ocd_connect_flags & OBD_CONNECT_FLAGS2)
7129 data->ocd_connect_flags2 &= MDT_CONNECT_SUPPORTED2;
7131 data->ocd_ibits_known &= MDS_INODELOCK_FULL;
7133 if (!mdt->mdt_opts.mo_acl)
7134 data->ocd_connect_flags &= ~OBD_CONNECT_ACL;
7136 if (!mdt->mdt_opts.mo_user_xattr)
7137 data->ocd_connect_flags &= ~OBD_CONNECT_XATTR;
7139 if (OCD_HAS_FLAG(data, BRW_SIZE)) {
7140 data->ocd_brw_size = min(data->ocd_brw_size,
7142 if (data->ocd_brw_size == 0) {
7143 CERROR("%s: cli %s/%p ocd_connect_flags: %#llx ocd_version: %x ocd_grant: %d ocd_index: %u ocd_brw_size unexpectedly zero, network data corruption? Refusing to connect this client\n", obd_name, exp->exp_client_uuid.uuid,
7144 exp, data->ocd_connect_flags, data->ocd_version,
7145 data->ocd_grant, data->ocd_index);
7150 if (OCD_HAS_FLAG(data, GRANT_PARAM)) {
7151 struct dt_device_param *ddp = &mdt->mdt_lut.lut_dt_conf;
7153 /* client is reporting its page size, for future use */
7154 exp->exp_target_data.ted_pagebits = data->ocd_grant_blkbits;
7155 data->ocd_grant_blkbits = mdt->mdt_lut.lut_tgd.tgd_blockbits;
7156 /* ddp_inodespace may not be power-of-two value, eg. for ldiskfs
7157 * it's LDISKFS_DIR_REC_LEN(20) = 28.
7159 data->ocd_grant_inobits = fls(ddp->ddp_inodespace - 1);
7160 /* ocd_grant_tax_kb is in 1K byte blocks */
7161 data->ocd_grant_tax_kb = ddp->ddp_extent_tax >> 10;
7162 data->ocd_grant_max_blks = ddp->ddp_max_extent_blks;
7165 /* Save connect_data we have so far because tgt_grant_connect()
7166 * uses it to calculate grant, and we want to save the client
7167 * version before it is overwritten by LUSTRE_VERSION_CODE.
7169 exp->exp_connect_data = *data;
7170 if (OCD_HAS_FLAG(data, GRANT))
7171 tgt_grant_connect(env, exp, data, !reconnect);
7173 if (OCD_HAS_FLAG(data, MAXBYTES))
7174 data->ocd_maxbytes = mdt->mdt_lut.lut_dt_conf.ddp_maxbytes;
7176 /* NB: Disregard the rule against updating
7177 * exp_connect_data.ocd_connect_flags in this case, since
7178 * tgt_client_new() needs to know if this is a lightweight
7179 * connection, and it is safe to expose this flag before
7180 * connection processing completes.
7182 if (data->ocd_connect_flags & OBD_CONNECT_LIGHTWEIGHT) {
7183 spin_lock(&exp->exp_lock);
7184 *exp_connect_flags_ptr(exp) |= OBD_CONNECT_LIGHTWEIGHT;
7185 spin_unlock(&exp->exp_lock);
7188 data->ocd_version = LUSTRE_VERSION_CODE;
7190 if ((data->ocd_connect_flags & OBD_CONNECT_FID) == 0) {
7191 CWARN("%s: MDS requires FID support, but client not\n",
7196 if (OCD_HAS_FLAG(data, PINGLESS) && !ptlrpc_pinger_suppress_pings())
7197 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
7199 /* Because we do not want this export to be evicted by pinger,
7200 * let's not add this export to the timed chain list. */
7201 if (!OCD_HAS_FLAG(data, PINGLESS) &&
7202 !(data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
7203 spin_lock(&exp->exp_lock);
7205 spin_unlock(&exp->exp_lock);
7208 data->ocd_max_easize = mdt->mdt_max_ea_size;
7210 /* NB: Disregard the rule against updating
7211 * exp_connect_data.ocd_connect_flags in this case, since
7212 * tgt_client_new() needs to know if this is client supports
7213 * multiple modify RPCs, and it is safe to expose this flag before
7214 * connection processing completes.
7216 if (data->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS) {
7217 if (mdt_max_mod_rpcs_changed(mdt))
7218 /* The new mdt.*.max_mod_rpcs_in_flight parameter
7219 * has not changed since initialization, but the
7220 * deprecated module parameter was changed,
7221 * so use that instead.
7223 data->ocd_maxmodrpcs = max_mod_rpcs_per_client;
7225 data->ocd_maxmodrpcs = mdt->mdt_max_mod_rpcs_in_flight;
7226 spin_lock(&exp->exp_lock);
7227 *exp_connect_flags_ptr(exp) |= OBD_CONNECT_MULTIMODRPCS;
7228 spin_unlock(&exp->exp_lock);
7231 if (OCD_HAS_FLAG(data, CKSUM)) {
7232 __u32 cksum_types = data->ocd_cksum_types;
7234 tgt_mask_cksum_types(&mdt->mdt_lut, &data->ocd_cksum_types);
7236 if (unlikely(data->ocd_cksum_types == 0)) {
7237 CERROR("%s: Connect with checksum support but no ocd_cksum_types is set\n",
7238 exp->exp_obd->obd_name);
7242 CDEBUG(D_RPCTRACE, "%s: cli %s supports cksum type %x, return %x\n",
7243 exp->exp_obd->obd_name, obd_export_nid2str(exp),
7244 cksum_types, data->ocd_cksum_types);
7246 /* Client not support OBD_CONNECT_CKSUM? fall back to CRC32 */
7247 CDEBUG(D_RPCTRACE, "%s: cli %s does not support OBD_CONNECT_CKSUM, CRC32 will be used\n",
7248 exp->exp_obd->obd_name, obd_export_nid2str(exp));
7251 if ((data->ocd_connect_flags & OBD_CONNECT_MDS_MDS) &&
7252 !(data->ocd_connect_flags & OBD_CONNECT_LIGHTWEIGHT)) {
7253 atomic_inc(&mdt->mdt_mds_mds_conns);
7254 mdt_enable_slc(mdt);
7257 if (!mdt->mdt_lut.lut_dt_conf.ddp_has_lseek_data_hole)
7258 data->ocd_connect_flags2 &= ~OBD_CONNECT2_LSEEK;
7260 if (!OCD_HAS_FLAG(data, MDS_MDS) && !OCD_HAS_FLAG(data, LIGHTWEIGHT) &&
7261 !OCD_HAS_FLAG2(data, DMV_IMP_INHERIT)) {
7262 atomic_inc(&mdt->mdt_dmv_old_client_count);
7263 mdt->mdt_enable_dmv_implicit_inherit = 0;
7269 static int mdt_ctxt_add_dirty_flag(struct lu_env *env,
7270 struct mdt_thread_info *info,
7271 struct mdt_file_data *mfd)
7273 struct lu_context ses;
7278 rc = lu_context_init(&ses, LCT_SERVER_SESSION);
7283 lu_context_enter(&ses);
7285 mdt_ucred(info)->uc_valid = UCRED_OLD;
7286 /* do not let rbac interfere with dirty flag internal system event */
7287 mdt_ucred(info)->uc_rbac_file_perms = 1;
7288 mdt_ucred(info)->uc_rbac_dne_ops = 1;
7289 mdt_ucred(info)->uc_rbac_quota_ops = 1;
7290 mdt_ucred(info)->uc_rbac_byfid_ops = 1;
7291 mdt_ucred(info)->uc_rbac_chlg_ops = 1;
7292 mdt_ucred(info)->uc_rbac_fscrypt_admin = 1;
7293 mdt_ucred(info)->uc_rbac_server_upcall = 1;
7294 mdt_ucred(info)->uc_rbac_ignore_root_prjquota = 1;
7295 mdt_ucred(info)->uc_rbac_hsm_ops = 1;
7296 mdt_ucred(info)->uc_rbac_local_admin = 1;
7297 rc = mdt_add_dirty_flag(info, mfd->mfd_object, &info->mti_attr);
7299 lu_context_exit(&ses);
7300 lu_context_fini(&ses);
7306 static int mdt_export_cleanup(struct obd_export *exp)
7308 LIST_HEAD(closing_list);
7309 struct mdt_export_data *med = &exp->exp_mdt_data;
7310 struct obd_device *obd = exp->exp_obd;
7311 struct mdt_device *mdt;
7312 struct mdt_thread_info *info;
7314 struct mdt_file_data *mfd, *n;
7319 spin_lock(&med->med_open_lock);
7320 while (!list_empty(&med->med_open_head)) {
7321 struct list_head *tmp = med->med_open_head.next;
7323 mfd = list_entry(tmp, struct mdt_file_data, mfd_list);
7325 /* Remove mfd handle so it can't be found again.
7326 * We are consuming the mfd_list reference here.
7328 class_handle_unhash(&mfd->mfd_open_handle);
7329 list_move_tail(&mfd->mfd_list, &closing_list);
7331 spin_unlock(&med->med_open_lock);
7332 mdt = mdt_dev(obd->obd_lu_dev);
7333 LASSERT(mdt != NULL);
7335 env = lu_env_find();
7338 info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
7339 LASSERT(info != NULL);
7340 info->mti_env = env;
7341 info->mti_mdt = mdt;
7342 info->mti_exp = exp;
7343 info->mti_pill = NULL;
7345 if (!list_empty(&closing_list)) {
7346 struct md_attr *ma = &info->mti_attr;
7348 /* Close any open files (which may cause orphan unlinking). */
7349 list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
7350 list_del_init(&mfd->mfd_list);
7351 ma->ma_need = ma->ma_valid = 0;
7353 /* This file is being closed due to an eviction, it
7354 * could have been modified and now dirty regarding to
7355 * HSM archive, check this!
7356 * The logic here is to mark a file dirty if there's a
7357 * chance it was dirtied before the client was evicted,
7358 * so that we don't have to wait for a release attempt
7359 * before finding out the file was actually dirty and
7360 * fail the release. Aggressively marking it dirty here
7361 * will cause the policy engine to attempt to
7362 * re-archive it; when rearchiving, we can compare the
7363 * current version to the HSM data_version and make the
7364 * archive request into a noop if it's not actually
7367 if (mfd->mfd_open_flags & MDS_FMODE_WRITE)
7368 rc = mdt_ctxt_add_dirty_flag(env, info, mfd);
7370 /* Don't unlink orphan on failover umount, LU-184 */
7371 if (exp->exp_flags & OBD_OPT_FAILOVER ||
7372 exp->exp_obd->obd_stopping) {
7373 ma->ma_valid = MA_FLAGS;
7374 ma->ma_attr_flags |= MDS_KEEP_ORPHAN;
7376 ma->ma_valid |= MA_FORCE_LOG;
7377 mdt_mfd_close(info, mfd);
7380 info->mti_mdt = NULL;
7381 /* cleanup client slot early */
7382 /* Do not erase record for recoverable client. */
7383 if (!(exp->exp_flags & OBD_OPT_FAILOVER) || exp->exp_failed)
7384 tgt_client_del(env, exp);
7389 static int mdt_obd_disconnect(struct obd_export *exp)
7391 struct obd_connect_data *data = &exp->exp_connect_data;
7392 struct mdt_device *mdt = mdt_dev(exp->exp_obd->obd_lu_dev);
7398 class_export_get(exp);
7400 if (OCD_HAS_FLAG(data, MDS_MDS) && !OCD_HAS_FLAG(data, LIGHTWEIGHT) &&
7401 atomic_dec_and_test(&mdt->mdt_mds_mds_conns))
7402 mdt_disable_slc(mdt);
7404 if (!OCD_HAS_FLAG(data, MDS_MDS) && !OCD_HAS_FLAG(data, LIGHTWEIGHT) &&
7405 !OCD_HAS_FLAG2(data, DMV_IMP_INHERIT) &&
7406 atomic_dec_and_test(&mdt->mdt_dmv_old_client_count))
7407 mdt->mdt_enable_dmv_implicit_inherit = 1;
7409 rc = server_disconnect_export(exp);
7411 CDEBUG(D_IOCTL, "server disconnect error: rc = %d\n", rc);
7413 tgt_grant_discard(exp);
7415 if (!(exp->exp_flags & OBD_OPT_FORCE))
7416 tgt_grant_sanity_check(exp->exp_obd, __func__);
7418 rc = mdt_export_cleanup(exp);
7419 nodemap_del_member(exp);
7420 class_export_put(exp);
7424 /* mds_connect copy */
7425 static int mdt_obd_connect(const struct lu_env *env,
7426 struct obd_export **exp, struct obd_device *obd,
7427 struct obd_uuid *cluuid,
7428 struct obd_connect_data *data,
7431 struct obd_export *lexp;
7432 struct lustre_handle conn = { 0 };
7433 struct mdt_device *mdt;
7435 struct lnet_nid *client_nid = localdata;
7439 LASSERT(env != NULL);
7440 LASSERT(data != NULL);
7442 if (!exp || !obd || !cluuid)
7445 mdt = mdt_dev(obd->obd_lu_dev);
7448 * first, check whether the stack is ready to handle requests
7449 * XXX: probably not very appropriate method is used now
7450 * at some point we should find a better one
7452 if (!test_bit(MDT_FL_SYNCED, &mdt->mdt_state) &&
7453 !(data->ocd_connect_flags & OBD_CONNECT_LIGHTWEIGHT) &&
7454 !(data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
7455 rc = obd_get_info(env, mdt->mdt_child_exp,
7456 sizeof(KEY_OSP_CONNECTED),
7457 KEY_OSP_CONNECTED, NULL, NULL);
7460 set_bit(MDT_FL_SYNCED, &mdt->mdt_state);
7463 rc = class_connect(&conn, obd, cluuid);
7467 lexp = class_conn2export(&conn);
7468 LASSERT(lexp != NULL);
7470 rc = nodemap_add_member(client_nid, lexp);
7471 if (rc != 0 && rc != -EEXIST)
7474 rc = mdt_connect_internal(env, lexp, mdt, data, false);
7476 struct lsd_client_data *lcd = lexp->exp_target_data.ted_lcd;
7479 memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
7480 rc = tgt_client_new(env, lexp);
7482 mdt_export_stats_init(obd, lexp, localdata);
7486 class_disconnect(lexp);
7487 nodemap_del_member(lexp);
7496 static int mdt_obd_reconnect(const struct lu_env *env,
7497 struct obd_export *exp, struct obd_device *obd,
7498 struct obd_uuid *cluuid,
7499 struct obd_connect_data *data,
7502 struct lnet_nid *client_nid = localdata;
7507 if (exp == NULL || obd == NULL || cluuid == NULL)
7510 rc = nodemap_add_member(client_nid, exp);
7511 if (rc != 0 && rc != -EEXIST)
7514 rc = mdt_connect_internal(env, exp, mdt_dev(obd->obd_lu_dev), data,
7517 mdt_export_stats_init(obd, exp, localdata);
7519 nodemap_del_member(exp);
7524 /* FIXME: Can we avoid using these two interfaces? */
7525 static int mdt_init_export(struct obd_export *exp)
7527 struct mdt_export_data *med = &exp->exp_mdt_data;
7532 INIT_LIST_HEAD(&med->med_open_head);
7533 spin_lock_init(&med->med_open_lock);
7534 spin_lock(&exp->exp_lock);
7535 exp->exp_connecting = 1;
7536 spin_unlock(&exp->exp_lock);
7538 OBD_ALLOC(exp->exp_used_slots,
7539 BITS_TO_LONGS(OBD_MAX_RIF_MAX) * sizeof(long));
7540 if (exp->exp_used_slots == NULL)
7543 /* self-export doesn't need client data and ldlm initialization */
7544 if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
7545 &exp->exp_client_uuid)))
7548 rc = tgt_client_alloc(exp);
7552 rc = ldlm_init_export(exp);
7559 tgt_client_free(exp);
7561 OBD_FREE(exp->exp_used_slots,
7562 BITS_TO_LONGS(OBD_MAX_RIF_MAX) * sizeof(long));
7563 exp->exp_used_slots = NULL;
7565 CERROR("%s: Failed to initialize export: rc = %d\n",
7566 exp->exp_obd->obd_name, rc);
7570 static int mdt_destroy_export(struct obd_export *exp)
7574 target_destroy_export(exp);
7575 OBD_FREE(exp->exp_used_slots,
7576 BITS_TO_LONGS(OBD_MAX_RIF_MAX) * sizeof(long));
7578 /* destroy can be called from failed obd_setup, so
7579 * checking uuid is safer than obd_self_export
7581 if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
7582 &exp->exp_client_uuid)))
7585 ldlm_destroy_export(exp);
7586 tgt_client_free(exp);
7588 LASSERT(list_empty(&exp->exp_outstanding_replies));
7589 LASSERT(list_empty(&exp->exp_mdt_data.med_open_head));
7592 * discard grants once we're sure no more
7593 * interaction with the client is possible
7595 tgt_grant_discard(exp);
7596 if (exp_connect_flags(exp) & OBD_CONNECT_GRANT)
7597 obd2obt(exp->exp_obd)->obt_lut->lut_tgd.tgd_tot_granted_clients--;
7599 if (!(exp->exp_flags & OBD_OPT_FORCE))
7600 tgt_grant_sanity_check(exp->exp_obd, __func__);
7605 int mdt_links_read(struct mdt_thread_info *info, struct mdt_object *mdt_obj,
7606 struct linkea_data *ldata)
7610 LASSERT(ldata->ld_buf->lb_buf != NULL);
7612 if (!mdt_object_exists(mdt_obj))
7615 rc = mo_xattr_get(info->mti_env, mdt_object_child(mdt_obj),
7616 ldata->ld_buf, XATTR_NAME_LINK);
7617 if (rc == -ERANGE) {
7618 /* Buf was too small, figure out what we need. */
7619 lu_buf_free(ldata->ld_buf);
7620 rc = mo_xattr_get(info->mti_env, mdt_object_child(mdt_obj),
7621 ldata->ld_buf, XATTR_NAME_LINK);
7624 ldata->ld_buf = lu_buf_check_and_alloc(ldata->ld_buf, rc);
7625 if (ldata->ld_buf->lb_buf == NULL)
7627 rc = mo_xattr_get(info->mti_env, mdt_object_child(mdt_obj),
7628 ldata->ld_buf, XATTR_NAME_LINK);
7633 return linkea_init_with_rec(ldata);
7637 * Given an MDT object, try to look up the full path to the object.
7638 * Part of the MDT layer implementation of lfs fid2path.
7640 * \param[in] info Per-thread common data shared by MDT level handlers.
7641 * \param[in] obj Object to do path lookup of
7642 * \param[in,out] fp User-provided struct to store path information
7643 * \param[in] root_fid Root FID of current path should reach
7645 * \retval 0 Lookup successful, path information stored in fp
7646 * \retval -EAGAIN Lookup failed, usually because object is being moved
7647 * \retval negative errno if there was a problem
7649 static int mdt_path_current(struct mdt_thread_info *info,
7650 struct mdt_object *obj,
7651 struct getinfo_fid2path *fp,
7652 struct lu_fid *root_fid)
7654 struct mdt_device *mdt = info->mti_mdt;
7655 struct lu_name *tmpname = &info->mti_name;
7656 struct lu_fid *tmpfid = &info->mti_tmp_fid1;
7657 struct lu_buf *buf = &info->mti_big_buf;
7658 struct linkea_data ldata = { NULL };
7660 struct mdt_object *mdt_obj;
7661 struct link_ea_header *leh;
7662 struct link_ea_entry *lee;
7663 bool worthchecking = true;
7664 bool needsfid = false;
7665 bool supported = false;
7673 /* temp buffer for path element, the buffer will be finally freed
7674 * in mdt_thread_info_fini
7676 buf = lu_buf_check_and_alloc(buf, PATH_MAX);
7677 if (buf->lb_buf == NULL)
7681 ptr = fp->gf_u.gf_path + fp->gf_pathlen - 1;
7684 *tmpfid = fp->gf_fid = *mdt_object_fid(obj);
7686 while (!lu_fid_eq(root_fid, &fp->gf_fid)) {
7687 if (!lu_fid_eq(root_fid, &mdt->mdt_md_root_fid) &&
7688 lu_fid_eq(&mdt->mdt_md_root_fid, &fp->gf_fid))
7689 GOTO(out, rc = -ENOENT);
7691 if (lu_fid_eq(mdt_object_fid(obj), tmpfid)) {
7693 mdt_object_get(info->mti_env, mdt_obj);
7695 mdt_obj = mdt_object_find(info->mti_env, mdt, tmpfid);
7696 if (IS_ERR(mdt_obj))
7697 GOTO(out, rc = PTR_ERR(mdt_obj));
7700 if (!mdt_object_exists(mdt_obj)) {
7701 mdt_object_put(info->mti_env, mdt_obj);
7702 GOTO(out, rc = -ENOENT);
7705 if (mdt_object_remote(mdt_obj)) {
7706 mdt_object_put(info->mti_env, mdt_obj);
7707 GOTO(remote_out, rc = -EREMOTE);
7710 if (worthchecking) {
7711 /* need to know if FID being looked up is encrypted */
7712 struct lu_attr la = { 0 };
7713 struct dt_object *dt = mdt_obj2dt(mdt_obj);
7715 if (dt && dt->do_ops && dt->do_ops->do_attr_get)
7716 dt_attr_get(info->mti_env, dt, &la);
7717 if (la.la_valid & LA_FLAGS &&
7718 la.la_flags & LUSTRE_ENCRYPT_FL) {
7719 if (!supported && mdt_info_req(info) &&
7720 !exp_connect_encrypt_fid2path(
7721 mdt_info_req(info)->rq_export)) {
7722 /* client does not support fid2path
7723 * for encrypted files
7725 mdt_object_put(info->mti_env, mdt_obj);
7726 GOTO(out, rc = -ENODATA);
7734 worthchecking = false;
7741 rc = mdt_links_read(info, mdt_obj, &ldata);
7743 mdt_object_put(info->mti_env, mdt_obj);
7748 lee = (struct link_ea_entry *)(leh + 1); /* link #0 */
7749 linkea_entry_unpack(lee, &reclen, tmpname, tmpfid);
7750 /* If set, use link #linkno for path lookup, otherwise use
7751 * link #0. Only do this for the final path ement.
7753 if (first && fp->gf_linkno < leh->leh_reccount) {
7756 for (count = 0; count < fp->gf_linkno; count++) {
7757 lee = (struct link_ea_entry *)
7758 ((char *)lee + reclen);
7759 linkea_entry_unpack(lee, &reclen, tmpname,
7762 if (fp->gf_linkno < leh->leh_reccount - 1)
7763 /* indicate to user there are more links */
7767 /* Check if it is slave stripes */
7768 rc = mdt_is_dir_stripe(info, mdt_obj);
7769 mdt_object_put(info->mti_env, mdt_obj);
7773 fp->gf_fid = *tmpfid;
7777 /* Pack the name in the end of the buffer */
7778 ptr -= tmpname->ln_namelen;
7779 if (ptr - 1 <= fp->gf_u.gf_path)
7780 GOTO(out, rc = -ENAMETOOLONG);
7781 strncpy(ptr, tmpname->ln_name, tmpname->ln_namelen);
7783 /* Pack FID before file name, so that client can build
7784 * encoded/digested form.
7786 char fidstr[FID_LEN + 1];
7788 snprintf(fidstr, sizeof(fidstr), DFID,
7790 ptr -= strlen(fidstr);
7791 if (ptr - 1 <= fp->gf_u.gf_path)
7792 GOTO(out, rc = -ENAMETOOLONG);
7793 strncpy(ptr, fidstr, strlen(fidstr));
7797 /* keep the last resolved fid to the client, so the client will
7798 * build the left path on another MDT for remote object
7800 fp->gf_fid = *tmpfid;
7805 /* non-zero will be treated as an error */
7810 ptr++; /* skip leading / unless this is an encrypted file */
7811 memmove(fp->gf_u.gf_path, ptr,
7812 fp->gf_u.gf_path + fp->gf_pathlen - ptr);
7819 * Given an MDT object, use mdt_path_current to get the path.
7820 * Essentially a wrapper to retry mdt_path_current a set number of times
7821 * if -EAGAIN is returned (usually because an object is being moved).
7823 * Part of the MDT layer implementation of lfs fid2path.
7825 * \param[in] info Per-thread common data shared by mdt level handlers.
7826 * \param[in] obj Object to do path lookup of
7827 * \param[in,out] fp User-provided struct for arguments and to store path
7830 * \retval 0 Lookup successful, path information stored in fp
7831 * \retval negative errno if there was a problem
7833 static int mdt_path(struct mdt_thread_info *info, struct mdt_object *obj,
7834 struct getinfo_fid2path *fp, struct lu_fid *root_fid)
7836 struct mdt_device *mdt = info->mti_mdt;
7842 if (fp->gf_pathlen < 3)
7845 if (root_fid == NULL)
7846 root_fid = &mdt->mdt_md_root_fid;
7848 if (lu_fid_eq(root_fid, mdt_object_fid(obj))) {
7849 fp->gf_u.gf_path[0] = '\0';
7853 /* Retry multiple times in case file is being moved */
7854 while (tries-- && rc == -EAGAIN)
7855 rc = mdt_path_current(info, obj, fp, root_fid);
7861 * Get the full path of the provided FID, as of changelog record recno.
7863 * This checks sanity and looks up object for user provided FID
7864 * before calling the actual path lookup code.
7866 * Part of the MDT layer implementation of lfs fid2path.
7868 * \param[in] info Per-thread common data shared by mdt level handlers.
7869 * \param[in,out] fp User-provided struct for arguments and to store path
7872 * \retval 0 Lookup successful, path information and recno stored in fp
7873 * \retval -ENOENT, object does not exist
7874 * \retval negative errno if there was a problem
7876 static int mdt_fid2path(struct mdt_thread_info *info,
7877 struct lu_fid *root_fid,
7878 struct getinfo_fid2path *fp)
7880 struct mdt_device *mdt = info->mti_mdt;
7881 struct mdt_object *obj;
7887 CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n",
7888 PFID(&fp->gf_fid), fp->gf_recno, fp->gf_linkno);
7890 if (!fid_is_sane(&fp->gf_fid))
7893 if (!fid_is_namespace_visible(&fp->gf_fid)) {
7894 CDEBUG(D_INFO, "%s: "DFID" is invalid, f_seq should be >= %#llx, or f_oid != 0, or f_ver == 0\n",
7896 PFID(&fp->gf_fid), (__u64)FID_SEQ_NORMAL);
7900 /* return error if client-provided root fid is not the one stored in
7903 if (root_fid && !fid_is_zero(&info->mti_exp->exp_root_fid) &&
7904 !lu_fid_eq(root_fid, &info->mti_exp->exp_root_fid)) {
7906 "%s: root fid from client "DFID" but "DFID" stored in export\n",
7907 mdt_obd_name(mdt), PFID(root_fid),
7908 PFID(&info->mti_exp->exp_root_fid));
7912 obj = mdt_object_find(info->mti_env, mdt, &fp->gf_fid);
7915 CDEBUG(D_IOCTL, "cannot find "DFID": rc = %d\n",
7916 PFID(&fp->gf_fid), rc);
7920 if (mdt_object_remote(obj))
7922 else if (!mdt_object_exists(obj))
7928 mdt_object_put(info->mti_env, obj);
7929 CDEBUG(D_IOCTL, "nonlocal object "DFID": rc = %d\n",
7930 PFID(&fp->gf_fid), rc);
7934 rc = mdt_path(info, obj, fp, root_fid);
7936 excess = fp->gf_pathlen > 3072 ? fp->gf_pathlen - 3072 : 0;
7937 CDEBUG(D_INFO, "fid "DFID", path %.*s recno %#llx linkno %u\n",
7938 PFID(&fp->gf_fid), fp->gf_pathlen - excess,
7939 fp->gf_u.gf_path + excess, fp->gf_recno, fp->gf_linkno);
7941 mdt_object_put(info->mti_env, obj);
7946 static int mdt_rpc_fid2path(struct mdt_thread_info *info, void *key, int keylen,
7947 void *val, int vallen)
7949 struct getinfo_fid2path *fpout, *fpin;
7950 struct lu_fid *root_fid = NULL;
7953 fpin = key + round_up(sizeof(KEY_FID2PATH), 8);
7956 if (req_capsule_req_need_swab(info->mti_pill))
7957 lustre_swab_fid2path(fpin);
7959 memcpy(fpout, fpin, sizeof(*fpin));
7960 if (fpout->gf_pathlen != vallen - sizeof(*fpin))
7963 if (keylen >= round_up(sizeof(KEY_FID2PATH), 8) + sizeof(*fpin) +
7964 sizeof(struct lu_fid)) {
7965 /* client sent its root FID, which is normally fileset FID */
7966 root_fid = fpin->gf_u.gf_root_fid;
7967 if (req_capsule_req_need_swab(info->mti_pill))
7968 lustre_swab_lu_fid(root_fid);
7970 if (root_fid != NULL && !fid_is_sane(root_fid))
7974 rc = mdt_fid2path(info, root_fid, fpout);
7978 int mdt_get_info(struct tgt_session_info *tsi)
7986 key = req_capsule_client_get(tsi->tsi_pill, &RMF_GETINFO_KEY);
7988 DEBUG_REQ(D_IOCTL, tgt_ses_req(tsi), "no GETINFO key");
7989 RETURN(err_serious(-EPROTO));
7991 keylen = req_capsule_get_size(tsi->tsi_pill, &RMF_GETINFO_KEY,
7993 if (KEY_IS(KEY_FID2PATH)) {
7994 struct mdt_thread_info *info;
7998 req_capsule_extend(tsi->tsi_pill, &RQF_MDS_FID2PATH);
7999 vallen = req_capsule_client_get(tsi->tsi_pill,
8000 &RMF_GETINFO_VALLEN);
8003 "%s: cannot get RMF_GETINFO_VALLEN buffer\n",
8004 tgt_name(tsi->tsi_tgt));
8005 RETURN(err_serious(-EPROTO));
8008 req_capsule_set_size(tsi->tsi_pill, &RMF_GETINFO_VAL,
8009 RCL_SERVER, *vallen);
8010 rc = req_capsule_server_pack(tsi->tsi_pill);
8012 RETURN(err_serious(rc));
8014 valout = req_capsule_server_get(tsi->tsi_pill,
8018 "%s: cannot get get-info RPC out buffer\n",
8019 tgt_name(tsi->tsi_tgt));
8022 info = tsi2mdt_info(tsi);
8023 rc = mdt_rpc_fid2path(info, key, keylen, valout, *vallen);
8024 mdt_thread_info_fini(info);
8025 } else if (KEY_IS(KEY_FIEMAP)) {
8026 rc = mdt_fiemap_get(tsi);
8028 rc = err_serious(-EOPNOTSUPP);
8033 static int mdt_ioc_version_get(struct mdt_thread_info *mti, void *karg)
8035 struct obd_ioctl_data *data = karg;
8038 struct mdt_object *obj;
8039 struct mdt_lock_handle *lh;
8044 if (data->ioc_inlbuf1 == NULL || data->ioc_inllen1 != sizeof(*fid) ||
8045 data->ioc_inlbuf2 == NULL || data->ioc_inllen2 != sizeof(version))
8048 fid = (struct lu_fid *)data->ioc_inlbuf1;
8050 if (!fid_is_sane(fid))
8053 CDEBUG(D_IOCTL, "getting version for "DFID"\n", PFID(fid));
8055 lh = &mti->mti_lh[MDT_LH_PARENT];
8056 obj = mdt_object_find_lock(mti, fid, lh, MDS_INODELOCK_UPDATE, LCK_CR);
8058 RETURN(PTR_ERR(obj));
8060 if (mdt_object_remote(obj)) {
8063 * before calling version get the correct MDS should be
8064 * fid, this is error to find remote object here
8066 CERROR("nonlocal object "DFID"\n", PFID(fid));
8067 } else if (!mdt_object_exists(obj)) {
8068 *(__u64 *)data->ioc_inlbuf2 = ENOENT_VERSION;
8071 version = dt_version_get(mti->mti_env, mdt_obj2dt(obj));
8072 *(__u64 *)data->ioc_inlbuf2 = version;
8075 mdt_object_unlock_put(mti, obj, lh, 1);
8079 /* ioctls on obd dev */
8080 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
8081 void *karg, void __user *uarg)
8083 struct obd_device *obd = exp->exp_obd;
8084 struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
8085 struct dt_device *dt = mdt->mdt_bottom;
8086 struct obd_ioctl_data *data;
8091 CDEBUG(D_IOCTL, "%s: cmd=%x len=%u karg=%pK uarg=%pK\n",
8092 obd->obd_name, cmd, len, karg, uarg);
8094 rc = lu_env_init(&env, LCT_MD_THREAD);
8098 /* handle commands that don't use @karg first */
8101 rc = mdt_device_sync(&env, mdt);
8103 case OBD_IOC_SET_READONLY:
8104 rc = dt_sync(&env, dt);
8106 rc = dt_ro(&env, dt);
8110 if (unlikely(karg == NULL)) {
8111 OBD_IOC_ERROR(obd->obd_name, cmd, "karg=NULL", rc = -EINVAL);
8117 case OBD_IOC_ABORT_RECOVERY: {
8118 if (data->ioc_type & OBD_FLG_ABORT_RECOV_MDT) {
8119 LCONSOLE_WARN("%s: Aborting MDT recovery\n",
8121 set_bit(OBDF_ABORT_MDT_RECOVERY, obd->obd_flags);
8122 wake_up(&obd->obd_next_transno_waitq);
8123 } else { /* if (data->ioc_type & OBD_FLG_ABORT_RECOV_OST) */
8124 /* lctl didn't set OBD_FLG_ABORT_RECOV_OST < 2.13.57 */
8125 LCONSOLE_WARN("%s: Aborting client recovery\n",
8127 set_bit(OBDF_ABORT_RECOVERY, obd->obd_flags);
8128 target_stop_recovery_thread(obd);
8133 case OBD_IOC_CHANGELOG_REG:
8134 case OBD_IOC_CHANGELOG_DEREG:
8135 case OBD_IOC_CHANGELOG_CLEAR:
8136 case OBD_IOC_LLOG_PRINT:
8137 case OBD_IOC_LLOG_CANCEL:
8138 rc = mdt->mdt_child->md_ops->mdo_iocontrol(&env, mdt->mdt_child,
8141 case OBD_IOC_START_LFSCK: {
8142 struct md_device *next = mdt->mdt_child;
8143 struct lfsck_start_param lsp;
8145 lsp.lsp_start = (struct lfsck_start *)(data->ioc_inlbuf1);
8146 lsp.lsp_index_valid = 0;
8147 rc = next->md_ops->mdo_iocontrol(&env, next, cmd, 0, &lsp);
8150 case OBD_IOC_STOP_LFSCK: {
8151 struct md_device *next = mdt->mdt_child;
8152 struct lfsck_stop stop;
8154 stop.ls_status = LS_STOPPED;
8155 /* Old lfsck utils may pass NULL @stop. */
8156 if (data->ioc_inlbuf1 == NULL)
8160 ((struct lfsck_stop *)(data->ioc_inlbuf1))->ls_flags;
8162 rc = next->md_ops->mdo_iocontrol(&env, next, cmd, 0, &stop);
8165 case OBD_IOC_QUERY_LFSCK: {
8166 struct md_device *next = mdt->mdt_child;
8168 rc = next->md_ops->mdo_iocontrol(&env, next, cmd, 0,
8172 case OBD_IOC_GET_OBJ_VERSION: {
8173 struct mdt_thread_info *mti;
8175 mti = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
8176 memset(mti, 0, sizeof(*mti));
8177 mti->mti_env = &env;
8181 rc = mdt_ioc_version_get(mti, karg);
8184 case OBD_IOC_CATLOGLIST: {
8185 struct mdt_thread_info *mti;
8187 mti = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
8188 lu_local_obj_fid(&mti->mti_tmp_fid1, LLOG_CATALOGS_OID);
8189 rc = llog_catalog_list(&env, mdt->mdt_bottom, 0, karg,
8190 &mti->mti_tmp_fid1);
8194 rc = OBD_IOC_ERROR(obd->obd_name, cmd, "unrecognized", -ENOTTY);
8202 static int mdt_postrecov(const struct lu_env *env, struct mdt_device *mdt)
8204 struct lu_device *ld = md2lu_dev(mdt->mdt_child);
8209 if (!mdt->mdt_skip_lfsck && !mdt->mdt_bottom->dd_rdonly) {
8210 struct lfsck_start_param lsp;
8211 struct lfsck_start start;
8213 lsp.lsp_start = NULL;
8214 lsp.lsp_index_valid = 0;
8216 if (dt2lu_dev(mdt->mdt_bottom)->ld_obd &&
8217 dt2lu_dev(mdt->mdt_bottom)->ld_obd->obd_need_scrub) {
8218 memset(&start, 0, sizeof(start));
8219 start.ls_version = LFSCK_VERSION_V1;
8220 start.ls_active = LFSCK_TYPE_SCRUB;
8221 start.ls_flags = LPF_RESET;
8223 lsp.lsp_start = &start;
8226 rc = mdt->mdt_child->md_ops->mdo_iocontrol(env, mdt->mdt_child,
8227 OBD_IOC_START_LFSCK,
8229 if (rc != 0 && rc != -EALREADY)
8230 CWARN("%s: auto trigger paused LFSCK failed: rc = %d\n",
8231 mdt_obd_name(mdt), rc);
8234 rc = ld->ld_ops->ldo_recovery_complete(env, ld);
8238 static int mdt_obd_postrecov(struct obd_device *obd)
8243 rc = lu_env_init(&env, LCT_MD_THREAD);
8246 rc = mdt_postrecov(&env, mdt_dev(obd->obd_lu_dev));
8251 static const struct obd_ops mdt_obd_device_ops = {
8252 .o_owner = THIS_MODULE,
8253 .o_set_info_async = mdt_obd_set_info_async,
8254 .o_connect = mdt_obd_connect,
8255 .o_reconnect = mdt_obd_reconnect,
8256 .o_disconnect = mdt_obd_disconnect,
8257 .o_init_export = mdt_init_export,
8258 .o_destroy_export = mdt_destroy_export,
8259 .o_iocontrol = mdt_iocontrol,
8260 .o_postrecov = mdt_obd_postrecov,
8261 /* Data-on-MDT IO methods */
8262 .o_preprw = mdt_obd_preprw,
8263 .o_commitrw = mdt_obd_commitrw,
8266 static struct lu_device *mdt_device_fini(const struct lu_env *env,
8267 struct lu_device *d)
8269 struct mdt_device *m = mdt_dev(d);
8277 static struct lu_device *mdt_device_free(const struct lu_env *env,
8278 struct lu_device *d)
8280 struct mdt_device *m = mdt_dev(d);
8284 lu_device_fini(&m->mdt_lu_dev);
8290 static struct lu_device *mdt_device_alloc(const struct lu_env *env,
8291 struct lu_device_type *t,
8292 struct lustre_cfg *cfg)
8294 struct lu_device *l;
8295 struct mdt_device *m;
8302 rc = mdt_init0(env, m, t, cfg);
8304 mdt_device_free(env, l);
8309 l = ERR_PTR(-ENOMEM);
8313 /* context key constructor/destructor: mdt_key_init, mdt_key_fini */
8314 LU_KEY_INIT(mdt, struct mdt_thread_info);
8316 static void mdt_key_fini(const struct lu_context *ctx,
8317 struct lu_context_key *key, void *data)
8319 struct mdt_thread_info *info = data;
8321 if (info->mti_big_lov) {
8322 OBD_FREE_LARGE(info->mti_big_lov, info->mti_big_lovsize);
8323 info->mti_big_lov = NULL;
8324 info->mti_big_lovsize = 0;
8327 if (info->mti_big_lmv) {
8328 OBD_FREE_LARGE(info->mti_big_lmv, info->mti_big_lmvsize);
8329 info->mti_big_lmv = NULL;
8330 info->mti_big_lmvsize = 0;
8333 if (info->mti_big_acl) {
8334 OBD_FREE_LARGE(info->mti_big_acl, info->mti_big_aclsize);
8335 info->mti_big_acl = NULL;
8336 info->mti_big_aclsize = 0;
8342 /* context key: mdt_thread_key */
8343 LU_CONTEXT_KEY_DEFINE(mdt, LCT_MD_THREAD);
8345 struct lu_ucred *mdt_ucred(const struct mdt_thread_info *info)
8347 return lu_ucred(info->mti_env);
8350 struct lu_ucred *mdt_ucred_check(const struct mdt_thread_info *info)
8352 return lu_ucred_check(info->mti_env);
8356 * Enable/disable COS (Commit On Sharing).
8358 * Set/Clear the COS flag in mdt options.
8360 * \param mdt mdt device
8361 * \param val 0 disables COS, other values enable COS
8363 void mdt_enable_cos(struct mdt_device *mdt, bool val)
8368 mdt->mdt_opts.mo_cos = val;
8369 rc = lu_env_init(&env, LCT_LOCAL);
8370 if (unlikely(rc != 0)) {
8371 CWARN("%s: lu_env initialization failed, cannot sync: rc = %d\n",
8372 mdt_obd_name(mdt), rc);
8375 mdt_device_sync(&env, mdt);
8380 * Check COS (Commit On Sharing) status.
8382 * Return COS flag status.
8384 * \param mdt mdt device
8386 int mdt_cos_is_enabled(struct mdt_device *mdt)
8388 return mdt->mdt_opts.mo_cos != 0;
8391 static const struct lu_device_type_operations mdt_device_type_ops = {
8392 .ldto_device_alloc = mdt_device_alloc,
8393 .ldto_device_free = mdt_device_free,
8394 .ldto_device_fini = mdt_device_fini
8397 static struct lu_device_type mdt_device_type = {
8398 .ldt_tags = LU_DEVICE_MD,
8399 .ldt_name = LUSTRE_MDT_NAME,
8400 .ldt_ops = &mdt_device_type_ops,
8401 .ldt_ctx_tags = LCT_MD_THREAD
8404 static int __init mdt_init(void)
8408 BUILD_BUG_ON(sizeof("0x0123456789ABCDEF:0x01234567:0x01234567") !=
8409 FID_NOBRACE_LEN + 1);
8410 BUILD_BUG_ON(sizeof("[0x0123456789ABCDEF:0x01234567:0x01234567]") !=
8413 rc = libcfs_setup();
8417 rc = lu_kmem_init(mdt_caches);
8421 rc = mds_mod_init();
8425 rc = class_register_type(&mdt_obd_device_ops, NULL, true,
8426 LUSTRE_MDT_NAME, &mdt_device_type);
8431 lu_kmem_fini(mdt_caches);
8438 static void __exit mdt_exit(void)
8440 class_unregister_type(LUSTRE_MDT_NAME);
8442 lu_kmem_fini(mdt_caches);
8445 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
8446 MODULE_DESCRIPTION("Lustre Metadata Target ("LUSTRE_MDT_NAME")");
8447 MODULE_VERSION(LUSTRE_VERSION_STRING);
8448 MODULE_LICENSE("GPL");
8450 module_init(mdt_init);
8451 module_exit(mdt_exit);