4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/mdt/mdt_lib.c
33 * Lustre Metadata Target (mdt) request unpacking helper.
35 * Author: Peter Braam <braam@clusterfs.com>
36 * Author: Andreas Dilger <adilger@clusterfs.com>
37 * Author: Phil Schwan <phil@clusterfs.com>
38 * Author: Mike Shaver <shaver@clusterfs.com>
39 * Author: Nikita Danilov <nikita@clusterfs.com>
40 * Author: Huang Hua <huanghua@clusterfs.com>
41 * Author: Fan Yong <fanyong@clusterfs.com>
44 #define DEBUG_SUBSYSTEM S_MDS
46 #include <linux/user_namespace.h>
47 #include <linux/uidgid.h>
49 #include "mdt_internal.h"
50 #include <uapi/linux/lnet/nidstr.h>
51 #include <lustre_nodemap.h>
53 typedef enum ucred_init_type {
59 static __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
61 return (__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32);
64 void mdt_exit_ucred(struct mdt_thread_info *info)
66 struct lu_ucred *uc = mdt_ucred(info);
67 struct mdt_device *mdt = info->mti_mdt;
70 if (uc->uc_valid != UCRED_INIT) {
71 uc->uc_suppgids[0] = uc->uc_suppgids[1] = -1;
73 put_group_info(uc->uc_ginfo);
76 if (uc->uc_identity) {
77 mdt_identity_put(mdt->mdt_identity_cache,
79 uc->uc_identity = NULL;
81 uc->uc_valid = UCRED_INIT;
85 static int match_nosquash_list(struct spinlock *rsi_lock,
86 struct list_head *nidlist,
87 struct lnet_nid *peernid)
92 rc = cfs_match_nid(peernid, nidlist);
93 spin_unlock(rsi_lock);
97 /* root_squash for inter-MDS operations */
98 static int mdt_root_squash(struct mdt_thread_info *info,
99 struct lnet_nid *peernid)
101 struct lu_ucred *ucred = mdt_ucred(info);
102 struct root_squash_info *squash = &info->mti_mdt->mdt_squash;
105 LASSERT(ucred != NULL);
106 if (!squash->rsi_uid || ucred->uc_fsuid)
109 if (match_nosquash_list(&squash->rsi_lock,
110 &squash->rsi_nosquash_nids,
112 CDEBUG(D_OTHER, "%s is in nosquash_nids list\n",
113 libcfs_nidstr(peernid));
117 CDEBUG(D_OTHER, "squash req from %s, (%d:%d/%x)=>(%d:%d/%x)\n",
118 libcfs_nidstr(peernid),
119 ucred->uc_fsuid, ucred->uc_fsgid, ucred->uc_cap.cap[0],
120 squash->rsi_uid, squash->rsi_gid, 0);
122 ucred->uc_fsuid = squash->rsi_uid;
123 ucred->uc_fsgid = squash->rsi_gid;
124 ucred->uc_cap = CAP_EMPTY_SET;
125 ucred->uc_suppgids[0] = -1;
126 ucred->uc_suppgids[1] = -1;
131 static void ucred_set_jobid(struct mdt_thread_info *info, struct lu_ucred *uc)
133 struct ptlrpc_request *req = mdt_info_req(info);
134 const char *jobid = mdt_req_get_jobid(req);
136 /* set jobid if specified. */
138 strlcpy(uc->uc_jobid, jobid, sizeof(uc->uc_jobid));
140 uc->uc_jobid[0] = '\0';
143 static void ucred_set_nid(struct mdt_thread_info *info, struct lu_ucred *uc)
145 if (info && info->mti_exp && info->mti_exp->exp_connection)
146 uc->uc_nid = lnet_nid_to_nid4(
147 &info->mti_exp->exp_connection->c_peer.nid);
149 uc->uc_nid = LNET_NID_ANY;
152 static void ucred_set_audit_enabled(struct mdt_thread_info *info,
155 struct lu_nodemap *nodemap = NULL;
158 if (info && info->mti_exp) {
159 nodemap = nodemap_get_from_exp(info->mti_exp);
160 if (nodemap && !IS_ERR(nodemap)) {
161 audit = nodemap->nmf_enable_audit;
162 nodemap_putref(nodemap);
166 uc->uc_enable_audit = audit;
169 static void ucred_set_rbac_roles(struct mdt_thread_info *info,
172 struct lu_nodemap *nodemap = NULL;
173 enum nodemap_rbac_roles rbac = NODEMAP_RBAC_ALL;
175 if (info && info->mti_exp) {
176 nodemap = nodemap_get_from_exp(info->mti_exp);
177 if (!IS_ERR_OR_NULL(nodemap)) {
178 rbac = nodemap->nmf_rbac;
179 nodemap_putref(nodemap);
183 uc->uc_rbac_file_perms = !!(rbac & NODEMAP_RBAC_FILE_PERMS);
184 uc->uc_rbac_dne_ops = !!(rbac & NODEMAP_RBAC_DNE_OPS);
185 uc->uc_rbac_quota_ops = !!(rbac & NODEMAP_RBAC_QUOTA_OPS);
186 uc->uc_rbac_byfid_ops = !!(rbac & NODEMAP_RBAC_BYFID_OPS);
187 uc->uc_rbac_chlg_ops = !!(rbac & NODEMAP_RBAC_CHLG_OPS);
188 uc->uc_rbac_fscrypt_admin = !!(rbac & NODEMAP_RBAC_FSCRYPT_ADMIN);
191 static int new_init_ucred(struct mdt_thread_info *info, ucred_init_type_t type,
194 struct ptlrpc_request *req = mdt_info_req(info);
195 struct mdt_device *mdt = info->mti_mdt;
196 struct ptlrpc_user_desc *pud = req->rq_user_desc;
197 struct lu_ucred *ucred = mdt_ucred(info);
198 struct lu_nodemap *nodemap;
199 struct lnet_nid peernid = req->rq_peer.nid;
203 bool is_nm_gid_squashed = false;
208 LASSERT(req->rq_auth_gss);
209 LASSERT(!req->rq_auth_usr_mdt);
210 LASSERT(req->rq_user_desc);
211 LASSERT(ucred != NULL);
213 ucred->uc_valid = UCRED_INVALID;
215 nodemap = nodemap_get_from_exp(info->mti_exp);
217 RETURN(PTR_ERR(nodemap));
219 pud->pud_uid = nodemap_map_id(nodemap, NODEMAP_UID,
220 NODEMAP_CLIENT_TO_FS, pud->pud_uid);
221 pud->pud_gid = nodemap_map_id(nodemap, NODEMAP_GID,
222 NODEMAP_CLIENT_TO_FS, pud->pud_gid);
223 pud->pud_fsuid = nodemap_map_id(nodemap, NODEMAP_UID,
224 NODEMAP_CLIENT_TO_FS, pud->pud_fsuid);
225 pud->pud_fsgid = nodemap_map_id(nodemap, NODEMAP_GID,
226 NODEMAP_CLIENT_TO_FS, pud->pud_fsgid);
228 ucred->uc_o_uid = pud->pud_uid;
229 ucred->uc_o_gid = pud->pud_gid;
230 ucred->uc_o_fsuid = pud->pud_fsuid;
231 ucred->uc_o_fsgid = pud->pud_fsgid;
233 if (nodemap && ucred->uc_o_uid == nodemap->nm_squash_uid) {
234 /* deny access before we get identity ref */
235 if (nodemap->nmf_deny_unknown) {
236 nodemap_putref(nodemap);
240 ucred->uc_suppgids[0] = -1;
241 ucred->uc_suppgids[1] = -1;
244 if (nodemap && ucred->uc_o_gid == nodemap->nm_squash_gid)
245 is_nm_gid_squashed = true;
247 nodemap_putref(nodemap);
249 if (type == BODY_INIT) {
250 struct mdt_body *body = (struct mdt_body *)buf;
252 ucred->uc_suppgids[0] = body->mbo_suppgid;
253 ucred->uc_suppgids[1] = -1;
256 if (!flvr_is_rootonly(req->rq_flvr.sf_rpc) &&
257 req->rq_auth_uid != pud->pud_uid) {
258 CDEBUG(D_SEC, "local client %s: auth uid %u "
259 "while client claims %u:%u/%u:%u\n",
260 libcfs_nidstr(&peernid), req->rq_auth_uid,
261 pud->pud_uid, pud->pud_gid,
262 pud->pud_fsuid, pud->pud_fsgid);
266 if (is_identity_get_disabled(mdt->mdt_identity_cache)) {
267 ucred->uc_identity = NULL;
268 perm = CFS_SETUID_PERM | CFS_SETGID_PERM | CFS_SETGRP_PERM;
270 struct md_identity *identity;
272 identity = mdt_identity_get(mdt->mdt_identity_cache,
274 if (IS_ERR(identity)) {
275 if (unlikely(PTR_ERR(identity) == -EREMCHG)) {
276 ucred->uc_identity = NULL;
277 perm = CFS_SETUID_PERM | CFS_SETGID_PERM |
281 "Deny access without identity: uid %u\n",
286 ucred->uc_identity = identity;
287 perm = mdt_identity_get_perm(ucred->uc_identity,
292 /* find out the setuid/setgid attempt */
293 setuid = (pud->pud_uid != pud->pud_fsuid);
294 setgid = ((pud->pud_gid != pud->pud_fsgid) ||
295 (ucred->uc_identity &&
296 (pud->pud_gid != ucred->uc_identity->mi_gid)));
298 /* check permission of setuid */
299 if (setuid && !(perm & CFS_SETUID_PERM)) {
300 CDEBUG(D_SEC, "mdt blocked setuid attempt (%u -> %u) from %s\n",
301 pud->pud_uid, pud->pud_fsuid, libcfs_nidstr(&peernid));
302 GOTO(out, rc = -EACCES);
305 /* check permission of setgid */
306 if (setgid && !(perm & CFS_SETGID_PERM)) {
307 CDEBUG(D_SEC, "mdt blocked setgid attempt (%u:%u/%u:%u -> %u) "
308 "from %s\n", pud->pud_uid, pud->pud_gid,
309 pud->pud_fsuid, pud->pud_fsgid,
310 ucred->uc_identity->mi_gid, libcfs_nidstr(&peernid));
311 GOTO(out, rc = -EACCES);
314 if (perm & CFS_SETGRP_PERM) {
315 /* only set groups if GID is not squashed */
316 if (pud->pud_ngroups && !is_nm_gid_squashed) {
317 /* setgroups for local client */
318 ucred->uc_ginfo = groups_alloc(pud->pud_ngroups);
319 if (!ucred->uc_ginfo) {
320 CERROR("failed to alloc %d groups\n",
322 GOTO(out, rc = -ENOMEM);
325 lustre_groups_from_list(ucred->uc_ginfo,
327 lustre_groups_sort(ucred->uc_ginfo);
329 ucred->uc_suppgids[0] = -1;
330 ucred->uc_suppgids[1] = -1;
331 ucred->uc_ginfo = NULL;
334 ucred->uc_suppgids[0] = -1;
335 ucred->uc_suppgids[1] = -1;
336 ucred->uc_ginfo = NULL;
339 ucred->uc_uid = pud->pud_uid;
340 ucred->uc_gid = pud->pud_gid;
342 ucred->uc_cap = CAP_EMPTY_SET;
343 if (!nodemap || ucred->uc_o_uid != nodemap->nm_squash_uid)
344 ucred->uc_cap.cap[0] = pud->pud_cap;
346 ucred->uc_fsuid = pud->pud_fsuid;
347 ucred->uc_fsgid = pud->pud_fsgid;
349 /* process root_squash here. */
350 mdt_root_squash(info, &peernid);
352 ucred->uc_valid = UCRED_NEW;
353 ucred_set_jobid(info, ucred);
354 ucred_set_nid(info, ucred);
355 ucred_set_audit_enabled(info, ucred);
356 ucred_set_rbac_roles(info, ucred);
362 if (ucred->uc_ginfo) {
363 put_group_info(ucred->uc_ginfo);
364 ucred->uc_ginfo = NULL;
366 if (ucred->uc_identity) {
367 mdt_identity_put(mdt->mdt_identity_cache,
369 ucred->uc_identity = NULL;
377 * Check whether allow the client to set supplementary group IDs or not.
379 * \param[in] info pointer to the thread context
380 * \param[in] uc pointer to the RPC user descriptor
382 * \retval true if allow to set supplementary group IDs
383 * \retval false for other cases
385 bool allow_client_chgrp(struct mdt_thread_info *info, struct lu_ucred *uc)
389 /* 1. If identity_upcall is disabled,
390 * permit local client to do anything. */
391 if (is_identity_get_disabled(info->mti_mdt->mdt_identity_cache))
394 /* 2. If fail to get related identities, then forbid any client to
395 * set supplementary group IDs. */
396 if (uc->uc_identity == NULL)
399 /* 3. Check the permission in the identities. */
400 perm = mdt_identity_get_perm(
402 &mdt_info_req(info)->rq_peer.nid);
403 if (perm & CFS_SETGRP_PERM)
409 int mdt_check_ucred(struct mdt_thread_info *info)
411 struct ptlrpc_request *req = mdt_info_req(info);
412 struct mdt_device *mdt = info->mti_mdt;
413 struct ptlrpc_user_desc *pud = req->rq_user_desc;
414 struct lu_ucred *ucred = mdt_ucred(info);
415 struct md_identity *identity = NULL;
416 struct lnet_nid peernid = req->rq_peer.nid;
424 LASSERT(ucred != NULL);
425 if ((ucred->uc_valid == UCRED_OLD) || (ucred->uc_valid == UCRED_NEW))
428 if (!req->rq_auth_gss || req->rq_auth_usr_mdt || !req->rq_user_desc)
431 /* sanity check: if we use strong authentication, we expect the
432 * uid which client claimed is true */
433 if (!flvr_is_rootonly(req->rq_flvr.sf_rpc) &&
434 req->rq_auth_uid != pud->pud_uid) {
435 CDEBUG(D_SEC, "local client %s: auth uid %u "
436 "while client claims %u:%u/%u:%u\n",
437 libcfs_nidstr(&peernid), req->rq_auth_uid,
438 pud->pud_uid, pud->pud_gid,
439 pud->pud_fsuid, pud->pud_fsgid);
443 if (is_identity_get_disabled(mdt->mdt_identity_cache))
446 identity = mdt_identity_get(mdt->mdt_identity_cache, pud->pud_uid);
447 if (IS_ERR(identity)) {
448 if (unlikely(PTR_ERR(identity) == -EREMCHG)) {
451 CDEBUG(D_SEC, "Deny access without identity: uid %u\n",
457 perm = mdt_identity_get_perm(identity, &peernid);
458 /* find out the setuid/setgid attempt */
459 setuid = (pud->pud_uid != pud->pud_fsuid);
460 setgid = (pud->pud_gid != pud->pud_fsgid ||
461 pud->pud_gid != identity->mi_gid);
463 /* check permission of setuid */
464 if (setuid && !(perm & CFS_SETUID_PERM)) {
465 CDEBUG(D_SEC, "mdt blocked setuid attempt (%u -> %u) from %s\n",
466 pud->pud_uid, pud->pud_fsuid, libcfs_nidstr(&peernid));
467 GOTO(out, rc = -EACCES);
470 /* check permission of setgid */
471 if (setgid && !(perm & CFS_SETGID_PERM)) {
473 "mdt blocked setgid attempt (%u:%u/%u:%u -> %u) from %s\n",
474 pud->pud_uid, pud->pud_gid,
475 pud->pud_fsuid, pud->pud_fsgid, identity->mi_gid,
476 libcfs_nidstr(&peernid));
477 GOTO(out, rc = -EACCES);
483 mdt_identity_put(mdt->mdt_identity_cache, identity);
487 static int old_init_ucred_common(struct mdt_thread_info *info,
488 struct lu_nodemap *nodemap)
490 struct lu_ucred *uc = mdt_ucred(info);
491 struct mdt_device *mdt = info->mti_mdt;
492 struct md_identity *identity = NULL;
494 if (nodemap && uc->uc_o_uid == nodemap->nm_squash_uid) {
495 /* deny access before we get identity ref */
496 if (nodemap->nmf_deny_unknown)
499 uc->uc_cap = CAP_EMPTY_SET;
500 uc->uc_suppgids[0] = -1;
501 uc->uc_suppgids[1] = -1;
504 if (!is_identity_get_disabled(mdt->mdt_identity_cache)) {
505 identity = mdt_identity_get(mdt->mdt_identity_cache,
507 if (IS_ERR(identity)) {
508 if (unlikely(PTR_ERR(identity) == -EREMCHG ||
509 cap_raised(uc->uc_cap,
510 CAP_DAC_READ_SEARCH))) {
513 CDEBUG(D_SEC, "Deny access without identity: "
514 "uid %u\n", uc->uc_fsuid);
519 uc->uc_identity = identity;
521 /* process root_squash here. */
522 mdt_root_squash(info,
523 &mdt_info_req(info)->rq_peer.nid);
525 uc->uc_valid = UCRED_OLD;
526 ucred_set_jobid(info, uc);
527 ucred_set_nid(info, uc);
528 ucred_set_audit_enabled(info, uc);
529 ucred_set_rbac_roles(info, uc);
536 static int old_init_ucred(struct mdt_thread_info *info,
537 struct mdt_body *body)
539 struct lu_ucred *uc = mdt_ucred(info);
540 struct lu_nodemap *nodemap;
544 nodemap = nodemap_get_from_exp(info->mti_exp);
546 RETURN(PTR_ERR(nodemap));
548 body->mbo_uid = nodemap_map_id(nodemap, NODEMAP_UID,
549 NODEMAP_CLIENT_TO_FS, body->mbo_uid);
550 body->mbo_gid = nodemap_map_id(nodemap, NODEMAP_GID,
551 NODEMAP_CLIENT_TO_FS, body->mbo_gid);
552 body->mbo_fsuid = nodemap_map_id(nodemap, NODEMAP_UID,
553 NODEMAP_CLIENT_TO_FS, body->mbo_fsuid);
554 body->mbo_fsgid = nodemap_map_id(nodemap, NODEMAP_GID,
555 NODEMAP_CLIENT_TO_FS, body->mbo_fsgid);
558 uc->uc_valid = UCRED_INVALID;
559 uc->uc_o_uid = uc->uc_uid = body->mbo_uid;
560 uc->uc_o_gid = uc->uc_gid = body->mbo_gid;
561 uc->uc_o_fsuid = uc->uc_fsuid = body->mbo_fsuid;
562 uc->uc_o_fsgid = uc->uc_fsgid = body->mbo_fsgid;
563 uc->uc_suppgids[0] = body->mbo_suppgid;
564 uc->uc_suppgids[1] = -1;
566 uc->uc_cap = CAP_EMPTY_SET;
567 uc->uc_cap.cap[0] = body->mbo_capability;
569 rc = old_init_ucred_common(info, nodemap);
570 nodemap_putref(nodemap);
575 static int old_init_ucred_reint(struct mdt_thread_info *info)
577 struct lu_ucred *uc = mdt_ucred(info);
578 struct lu_nodemap *nodemap;
582 nodemap = nodemap_get_from_exp(info->mti_exp);
584 RETURN(PTR_ERR(nodemap));
588 uc->uc_fsuid = nodemap_map_id(nodemap, NODEMAP_UID,
589 NODEMAP_CLIENT_TO_FS, uc->uc_fsuid);
590 uc->uc_fsgid = nodemap_map_id(nodemap, NODEMAP_GID,
591 NODEMAP_CLIENT_TO_FS, uc->uc_fsgid);
593 uc->uc_valid = UCRED_INVALID;
594 uc->uc_o_uid = uc->uc_o_fsuid = uc->uc_uid = uc->uc_fsuid;
595 uc->uc_o_gid = uc->uc_o_fsgid = uc->uc_gid = uc->uc_fsgid;
598 rc = old_init_ucred_common(info, nodemap);
599 nodemap_putref(nodemap);
604 static inline int __mdt_init_ucred(struct mdt_thread_info *info,
605 struct mdt_body *body)
607 struct ptlrpc_request *req = mdt_info_req(info);
608 struct lu_ucred *uc = mdt_ucred(info);
611 if ((uc->uc_valid == UCRED_OLD) || (uc->uc_valid == UCRED_NEW))
614 mdt_exit_ucred(info);
616 if (!req->rq_auth_gss || req->rq_auth_usr_mdt || !req->rq_user_desc)
617 return old_init_ucred(info, body);
619 return new_init_ucred(info, BODY_INIT, body);
622 int mdt_init_ucred(struct mdt_thread_info *info, struct mdt_body *body)
624 return __mdt_init_ucred(info, body);
627 int mdt_init_ucred_reint(struct mdt_thread_info *info)
629 struct ptlrpc_request *req = mdt_info_req(info);
630 struct lu_ucred *uc = mdt_ucred(info);
631 struct md_attr *ma = &info->mti_attr;
634 if ((uc->uc_valid == UCRED_OLD) || (uc->uc_valid == UCRED_NEW))
637 /* LU-5564: for normal close request, skip permission check */
638 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE &&
639 !(ma->ma_attr_flags & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP))) {
640 cap_raise_nfsd_set(uc->uc_cap, CAP_FULL_SET);
641 cap_raise_fs_set(uc->uc_cap, CAP_FULL_SET);
644 mdt_exit_ucred(info);
646 if (!req->rq_auth_gss || req->rq_auth_usr_mdt || !req->rq_user_desc)
647 return old_init_ucred_reint(info);
649 return new_init_ucred(info, REC_INIT, NULL);
652 /* copied from lov/lov_ea.c, just for debugging, will be removed later */
653 void mdt_dump_lmm(int level, const struct lov_mds_md *lmm, __u64 valid)
655 const struct lov_ost_data_v1 *lod;
656 __u32 lmm_magic = le32_to_cpu(lmm->lmm_magic);
660 if (likely(!cfs_cdebug_show(level, DEBUG_SUBSYSTEM)))
663 CDEBUG(level, "objid "DOSTID", magic 0x%08X, pattern %#X\n",
664 POSTID(&lmm->lmm_oi), lmm_magic,
665 le32_to_cpu(lmm->lmm_pattern));
667 /* No support for compount layouts yet */
668 if (lmm_magic != LOV_MAGIC_V1 && lmm_magic != LOV_MAGIC_V3)
671 count = le16_to_cpu(((struct lov_user_md *)lmm)->lmm_stripe_count);
672 CDEBUG(level, "stripe_size=0x%x, stripe_count=0x%x\n",
673 le32_to_cpu(lmm->lmm_stripe_size), count);
675 /* If it's a directory or a released file, then there are
676 * no actual objects to print, so bail out. */
677 if (valid & OBD_MD_FLDIREA ||
678 le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
681 LASSERT(count <= LOV_MAX_STRIPE_COUNT);
682 for (i = 0, lod = lmm->lmm_objects; i < count; i++, lod++) {
685 ostid_le_to_cpu(&lod->l_ost_oi, &oi);
686 CDEBUG(level, "stripe %u idx %u subobj "DOSTID"\n",
687 i, le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
691 void mdt_dump_lmv(unsigned int level, const union lmv_mds_md *lmv)
693 const struct lmv_mds_md_v1 *lmm1;
694 const struct lmv_foreign_md *lfm;
697 if (likely(!cfs_cdebug_show(level, DEBUG_SUBSYSTEM)))
700 /* foreign LMV case */
701 lfm = &lmv->lmv_foreign_md;
702 if (le32_to_cpu(lfm->lfm_magic) == LMV_MAGIC_FOREIGN) {
704 "foreign magic 0x%08X, length %u, type %u, flags %u, value '%.*s'\n",
705 le32_to_cpu(lfm->lfm_magic),
706 le32_to_cpu(lfm->lfm_length),
707 le32_to_cpu(lfm->lfm_type),
708 le32_to_cpu(lfm->lfm_flags),
709 le32_to_cpu(lfm->lfm_length), lfm->lfm_value);
713 lmm1 = &lmv->lmv_md_v1;
715 "magic 0x%08X, master %#X stripe_count %d hash_type %#x\n",
716 le32_to_cpu(lmm1->lmv_magic),
717 le32_to_cpu(lmm1->lmv_master_mdt_index),
718 le32_to_cpu(lmm1->lmv_stripe_count),
719 le32_to_cpu(lmm1->lmv_hash_type));
721 if (le32_to_cpu(lmm1->lmv_magic) == LMV_MAGIC_STRIPE)
724 if (le32_to_cpu(lmm1->lmv_stripe_count) > LMV_MAX_STRIPE_COUNT)
727 for (i = 0; i < le32_to_cpu(lmm1->lmv_stripe_count); i++) {
730 fid_le_to_cpu(&fid, &lmm1->lmv_stripe_fids[i]);
731 CDEBUG(level, "idx %u subobj "DFID"\n", i, PFID(&fid));
735 /* Shrink and/or grow reply buffers */
736 int mdt_fix_reply(struct mdt_thread_info *info)
738 struct req_capsule *pill = info->mti_pill;
739 struct mdt_body *body;
740 int md_size, md_packed = 0;
745 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
746 LASSERT(body != NULL);
748 if (body->mbo_valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE |
750 md_size = body->mbo_eadatasize;
754 acl_size = body->mbo_aclsize;
756 /* this replay - not send info to client */
757 if (info->mti_spec.no_create) {
762 CDEBUG(D_INFO, "Shrink to md_size = %d cookie/acl_size = %d\n",
767 &RMF_ACL, or &RMF_LOGCOOKIES
768 (optional) &RMF_CAPA1,
769 (optional) &RMF_CAPA2,
770 (optional) something else
773 /* MDT_MD buffer may be bigger than packed value, let's shrink all
774 * buffers before growing it */
775 if (info->mti_big_lmm_used) {
776 /* big_lmm buffer may be used even without packing the result
777 * into reply, just for internal server needs */
778 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
779 md_packed = req_capsule_get_size(pill, &RMF_MDT_MD,
782 /* free big lmm if md_size is not needed */
783 if (md_size == 0 || md_packed == 0) {
784 info->mti_big_lmm_used = 0;
786 /* buffer must be allocated separately */
787 LASSERT(info->mti_attr.ma_lmm !=
788 req_capsule_server_get(pill, &RMF_MDT_MD));
789 req_capsule_shrink(pill, &RMF_MDT_MD, 0, RCL_SERVER);
791 } else if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) {
792 req_capsule_shrink(pill, &RMF_MDT_MD, md_size, RCL_SERVER);
795 if (info->mti_big_acl_used) {
797 info->mti_big_acl_used = 0;
799 req_capsule_shrink(pill, &RMF_ACL, 0, RCL_SERVER);
800 } else if (req_capsule_has_field(pill, &RMF_ACL, RCL_SERVER)) {
801 req_capsule_shrink(pill, &RMF_ACL, acl_size, RCL_SERVER);
802 } else if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER)) {
803 req_capsule_shrink(pill, &RMF_LOGCOOKIES, acl_size, RCL_SERVER);
806 /* Shrink optional SECCTX buffer if it is not used */
807 if (req_capsule_has_field(pill, &RMF_FILE_SECCTX, RCL_SERVER) &&
808 req_capsule_get_size(pill, &RMF_FILE_SECCTX, RCL_SERVER) != 0 &&
809 !(body->mbo_valid & OBD_MD_SECCTX))
810 req_capsule_shrink(pill, &RMF_FILE_SECCTX, 0, RCL_SERVER);
812 /* Shrink optional ENCCTX buffer if it is not used */
813 if (req_capsule_has_field(pill, &RMF_FILE_ENCCTX, RCL_SERVER) &&
814 req_capsule_get_size(pill, &RMF_FILE_ENCCTX, RCL_SERVER) != 0 &&
815 !(body->mbo_valid & OBD_MD_ENCCTX))
816 req_capsule_shrink(pill, &RMF_FILE_ENCCTX, 0, RCL_SERVER);
818 /* Shrink optional default LMV buffer if it is not used */
819 if (req_capsule_has_field(pill, &RMF_DEFAULT_MDT_MD, RCL_SERVER) &&
820 req_capsule_get_size(pill, &RMF_DEFAULT_MDT_MD, RCL_SERVER) != 0 &&
821 !(body->mbo_valid & OBD_MD_DEFAULT_MEA))
822 req_capsule_shrink(pill, &RMF_DEFAULT_MDT_MD, 0, RCL_SERVER);
825 * Some more field should be shrinked if needed.
826 * This should be done by those who added fields to reply message.
829 /* Grow MD buffer if needed finally */
830 if (info->mti_big_lmm_used) {
833 LASSERT(md_size > md_packed);
834 CDEBUG(D_INFO, "Enlarge reply buffer, need extra %d bytes\n",
835 md_size - md_packed);
837 /* FIXME: Grow reply buffer for the batch request. */
838 if (info->mti_batch_env) {
839 body->mbo_valid &= ~(OBD_MD_FLDIREA | OBD_MD_FLEASIZE);
840 info->mti_big_lmm_used = 0;
844 rc = req_capsule_server_grow(pill, &RMF_MDT_MD, md_size);
846 /* we can't answer with proper LOV EA, drop flags,
847 * the rc is also returned so this request is
848 * considered as failed */
849 body->mbo_valid &= ~(OBD_MD_FLDIREA | OBD_MD_FLEASIZE);
850 /* don't return transno along with error */
851 lustre_msg_set_transno(pill->rc_req->rq_repmsg, 0);
853 /* now we need to pack right LOV/LMV EA */
854 lmm = req_capsule_server_get(pill, &RMF_MDT_MD);
855 if (info->mti_attr.ma_valid & MA_LOV) {
856 LASSERT(req_capsule_get_size(pill, &RMF_MDT_MD,
858 info->mti_attr.ma_lmm_size);
859 memcpy(lmm, info->mti_attr.ma_lmm,
860 info->mti_attr.ma_lmm_size);
861 } else if (info->mti_attr.ma_valid & MA_LMV) {
862 LASSERT(req_capsule_get_size(pill, &RMF_MDT_MD,
864 info->mti_attr.ma_lmv_size);
865 memcpy(lmm, info->mti_attr.ma_lmv,
866 info->mti_attr.ma_lmv_size);
870 /* update mdt_max_mdsize so clients will be aware about that */
871 if (info->mti_mdt->mdt_max_mdsize < info->mti_attr.ma_lmm_size)
872 info->mti_mdt->mdt_max_mdsize =
873 info->mti_attr.ma_lmm_size;
874 info->mti_big_lmm_used = 0;
878 if (info->mti_big_acl_used) {
879 CDEBUG(D_INFO, "Enlarge reply ACL buffer to %d bytes\n",
882 if (info->mti_batch_env) {
883 body->mbo_valid &= ~OBD_MD_FLACL;
884 info->mti_big_acl_used = 0;
888 rc = req_capsule_server_grow(pill, &RMF_ACL, acl_size);
890 body->mbo_valid &= ~OBD_MD_FLACL;
892 void *acl = req_capsule_server_get(pill, &RMF_ACL);
894 memcpy(acl, info->mti_big_acl, acl_size);
897 info->mti_big_acl_used = 0;
904 /* if object is dying, pack the lov/llog data,
905 * parameter info->mti_attr should be valid at this point!
906 * Also implements RAoLU policy */
907 int mdt_handle_last_unlink(struct mdt_thread_info *info, struct mdt_object *mo,
910 struct mdt_body *repbody = NULL;
911 const struct lu_attr *la = &ma->ma_attr;
912 struct coordinator *cdt = &info->mti_mdt->mdt_coordinator;
915 struct hsm_action_item hai = {
916 .hai_len = sizeof(hai),
917 .hai_action = HSMA_REMOVE,
918 .hai_extent.length = -1,
926 if (mdt_info_req(info) != NULL) {
927 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
928 LASSERT(repbody != NULL);
930 CDEBUG(D_INFO, "not running in a request/reply context\n");
933 if ((ma->ma_valid & MA_INODE) && repbody != NULL)
934 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(mo));
936 if (ma->ma_valid & MA_LOV) {
937 CERROR("No need in LOV EA upon unlink\n");
941 repbody->mbo_eadatasize = 0;
943 /* Only check unlinked and archived if RAoLU and upon last close */
944 if (!cdt->cdt_remove_archive_on_last_unlink ||
945 atomic_read(&mo->mot_open_count) != 0)
948 /* mdt_attr_get_complex will clear ma_valid, so check here first */
949 if ((ma->ma_valid & MA_INODE) && (ma->ma_attr.la_nlink != 0))
952 if ((ma->ma_valid & MA_HSM) && (!(ma->ma_hsm.mh_flags & HS_EXISTS)))
955 need |= (MA_INODE | MA_HSM) & ~ma->ma_valid;
957 /* ma->ma_valid is missing either MA_INODE, MA_HSM, or both,
958 * try setting them */
960 rc = mdt_attr_get_complex(info, mo, ma);
962 CERROR("%s: unable to fetch missing attributes of"
963 DFID": rc=%d\n", mdt_obd_name(info->mti_mdt),
964 PFID(mdt_object_fid(mo)), rc);
968 if (need & MA_INODE) {
969 if (ma->ma_valid & MA_INODE) {
970 if (ma->ma_attr.la_nlink != 0)
978 if (ma->ma_valid & MA_HSM) {
979 if (!(ma->ma_hsm.mh_flags & HS_EXISTS))
987 /* RAoLU policy is active, last close on file has occured,
988 * file is unlinked, file is archived, so create remove request
990 * If CDT is not running, requests will be logged for later. */
991 if (ma->ma_hsm.mh_arch_id != 0)
992 archive_id = ma->ma_hsm.mh_arch_id;
994 archive_id = cdt->cdt_default_archive_id;
996 hai.hai_fid = *mdt_object_fid(mo);
998 rc = mdt_agent_record_add(info->mti_env, info->mti_mdt, archive_id, 0,
1001 CERROR("%s: unable to add HSM remove request for "DFID
1002 ": rc=%d\n", mdt_obd_name(info->mti_mdt),
1003 PFID(mdt_object_fid(mo)), rc);
1008 static __u64 mdt_attr_valid_xlate(enum mds_attr_flags in,
1009 struct mdt_reint_record *rr,
1015 if (in & MDS_ATTR_MODE)
1017 if (in & MDS_ATTR_UID)
1019 if (in & MDS_ATTR_GID)
1021 if (in & MDS_ATTR_SIZE)
1023 if (in & MDS_ATTR_BLOCKS)
1025 if (in & MDS_ATTR_ATIME_SET)
1027 if (in & MDS_ATTR_CTIME_SET)
1029 if (in & MDS_ATTR_MTIME_SET)
1031 if (in & MDS_ATTR_ATTR_FLAG)
1033 if (in & MDS_ATTR_KILL_SUID)
1034 out |= LA_KILL_SUID;
1035 if (in & MDS_ATTR_KILL_SGID)
1036 out |= LA_KILL_SGID;
1037 if (in & MDS_ATTR_PROJID)
1039 if (in & MDS_ATTR_LSIZE)
1041 if (in & MDS_ATTR_LBLOCKS)
1044 if (in & MDS_ATTR_FROM_OPEN)
1045 rr->rr_flags |= MRF_OPEN_TRUNC;
1046 if (in & MDS_ATTR_OVERRIDE)
1047 ma->ma_attr_flags |= MDS_OWNEROVERRIDE;
1048 if (in & MDS_ATTR_FORCE)
1049 ma->ma_attr_flags |= MDS_PERM_BYPASS;
1051 in &= ~(MDS_ATTR_MODE | MDS_ATTR_UID | MDS_ATTR_GID | MDS_ATTR_PROJID |
1052 MDS_ATTR_ATIME | MDS_ATTR_MTIME | MDS_ATTR_CTIME |
1053 MDS_ATTR_ATIME_SET | MDS_ATTR_CTIME_SET | MDS_ATTR_MTIME_SET |
1054 MDS_ATTR_SIZE | MDS_ATTR_BLOCKS | MDS_ATTR_ATTR_FLAG |
1055 MDS_ATTR_FORCE | MDS_ATTR_KILL_SUID | MDS_ATTR_KILL_SGID |
1056 MDS_ATTR_FROM_OPEN | MDS_ATTR_LSIZE | MDS_ATTR_LBLOCKS |
1059 CDEBUG(D_INFO, "Unknown attr bits: %#llx\n", (u64)in);
1066 int mdt_name_unpack(struct req_capsule *pill,
1067 const struct req_msg_field *field,
1069 enum mdt_name_flags flags)
1071 ln->ln_name = req_capsule_client_get(pill, field);
1072 ln->ln_namelen = req_capsule_get_size(pill, field, RCL_CLIENT) - 1;
1074 if (!lu_name_is_valid(ln)) {
1081 if ((flags & MNF_FIX_ANON) &&
1082 ln->ln_namelen == 1 && ln->ln_name[0] == '/') {
1083 /* Newer (3.x) kernels use a name of "/" for the
1084 * "anonymous" disconnected dentries from NFS
1085 * filehandle conversion. See d_obtain_alias(). */
1093 static int mdt_file_secctx_unpack(struct req_capsule *pill,
1094 const char **secctx_name,
1095 void **secctx, size_t *secctx_size)
1100 *secctx_name = NULL;
1104 if (!req_capsule_has_field(pill, &RMF_FILE_SECCTX_NAME, RCL_CLIENT) ||
1105 !req_capsule_field_present(pill, &RMF_FILE_SECCTX_NAME, RCL_CLIENT))
1108 name_size = req_capsule_get_size(pill, &RMF_FILE_SECCTX_NAME,
1113 if (name_size > XATTR_NAME_MAX + 1)
1116 name = req_capsule_client_get(pill, &RMF_FILE_SECCTX_NAME);
1117 if (strnlen(name, name_size) != name_size - 1)
1120 if (!req_capsule_has_field(pill, &RMF_FILE_SECCTX, RCL_CLIENT) ||
1121 !req_capsule_field_present(pill, &RMF_FILE_SECCTX, RCL_CLIENT))
1124 *secctx_name = name;
1125 *secctx = req_capsule_client_get(pill, &RMF_FILE_SECCTX);
1126 *secctx_size = req_capsule_get_size(pill, &RMF_FILE_SECCTX, RCL_CLIENT);
1131 static int mdt_file_encctx_unpack(struct req_capsule *pill,
1132 void **encctx, size_t *encctx_size)
1137 if (!exp_connect_encrypt(pill->rc_req->rq_export))
1140 if (!req_capsule_has_field(pill, &RMF_FILE_ENCCTX, RCL_CLIENT) ||
1141 !req_capsule_field_present(pill, &RMF_FILE_ENCCTX, RCL_CLIENT))
1144 *encctx_size = req_capsule_get_size(pill, &RMF_FILE_ENCCTX, RCL_CLIENT);
1145 if (*encctx_size == 0)
1148 *encctx = req_capsule_client_get(pill, &RMF_FILE_ENCCTX);
1153 static int mdt_setattr_unpack_rec(struct mdt_thread_info *info)
1155 struct lu_ucred *uc = mdt_ucred(info);
1156 struct md_attr *ma = &info->mti_attr;
1157 struct lu_attr *la = &ma->ma_attr;
1158 struct req_capsule *pill = info->mti_pill;
1159 struct mdt_reint_record *rr = &info->mti_rr;
1160 struct mdt_rec_setattr *rec;
1161 struct lu_nodemap *nodemap;
1165 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1166 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1170 /* This prior initialization is needed for old_init_ucred_reint() */
1171 uc->uc_fsuid = rec->sa_fsuid;
1172 uc->uc_fsgid = rec->sa_fsgid;
1173 uc->uc_cap = CAP_EMPTY_SET;
1174 uc->uc_cap.cap[0] = rec->sa_cap;
1175 uc->uc_suppgids[0] = rec->sa_suppgid;
1176 uc->uc_suppgids[1] = -1;
1178 rr->rr_fid1 = &rec->sa_fid;
1179 la->la_valid = mdt_attr_valid_xlate(rec->sa_valid, rr, ma);
1180 la->la_mode = rec->sa_mode;
1181 la->la_flags = rec->sa_attr_flags;
1183 nodemap = nodemap_get_from_exp(info->mti_exp);
1184 if (IS_ERR(nodemap))
1185 RETURN(PTR_ERR(nodemap));
1187 la->la_uid = nodemap_map_id(nodemap, NODEMAP_UID,
1188 NODEMAP_CLIENT_TO_FS, rec->sa_uid);
1189 la->la_gid = nodemap_map_id(nodemap, NODEMAP_GID,
1190 NODEMAP_CLIENT_TO_FS, rec->sa_gid);
1191 la->la_projid = nodemap_map_id(nodemap, NODEMAP_PROJID,
1192 NODEMAP_CLIENT_TO_FS, rec->sa_projid);
1193 nodemap_putref(nodemap);
1195 la->la_size = rec->sa_size;
1196 la->la_blocks = rec->sa_blocks;
1197 la->la_ctime = rec->sa_ctime;
1198 la->la_atime = rec->sa_atime;
1199 la->la_mtime = rec->sa_mtime;
1200 ma->ma_valid = MA_INODE;
1202 ma->ma_attr_flags |= rec->sa_bias & (MDS_CLOSE_INTENT |
1203 MDS_DATA_MODIFIED | MDS_TRUNC_KEEP_LEASE |
1208 static int mdt_close_handle_unpack(struct mdt_thread_info *info)
1210 struct req_capsule *pill = info->mti_pill;
1211 struct mdt_ioepoch *ioepoch;
1214 if (req_capsule_get_size(pill, &RMF_MDT_EPOCH, RCL_CLIENT))
1215 ioepoch = req_capsule_client_get(pill, &RMF_MDT_EPOCH);
1219 if (ioepoch == NULL)
1222 info->mti_open_handle = ioepoch->mio_open_handle;
1227 static inline int mdt_dlmreq_unpack(struct mdt_thread_info *info) {
1228 struct req_capsule *pill = info->mti_pill;
1230 if (req_capsule_get_size(pill, &RMF_DLM_REQ, RCL_CLIENT)) {
1231 info->mti_dlm_req = req_capsule_client_get(pill, &RMF_DLM_REQ);
1232 if (info->mti_dlm_req == NULL)
1239 static int mdt_setattr_unpack(struct mdt_thread_info *info)
1241 struct mdt_reint_record *rr = &info->mti_rr;
1242 struct md_attr *ma = &info->mti_attr;
1243 struct req_capsule *pill = info->mti_pill;
1247 rc = mdt_setattr_unpack_rec(info);
1251 if (req_capsule_field_present(pill, &RMF_EADATA, RCL_CLIENT)) {
1252 rr->rr_eadata = req_capsule_client_get(pill, &RMF_EADATA);
1253 rr->rr_eadatalen = req_capsule_get_size(pill, &RMF_EADATA,
1256 if (rr->rr_eadatalen > 0) {
1257 const struct lmv_user_md *lum;
1259 lum = rr->rr_eadata;
1260 /* Sigh ma_valid(from req) does not indicate whether
1261 * it will set LOV/LMV EA, so we have to check magic */
1262 if (le32_to_cpu(lum->lum_magic) == LMV_USER_MAGIC) {
1263 ma->ma_valid |= MA_LMV;
1264 ma->ma_lmv = (void *)rr->rr_eadata;
1265 ma->ma_lmv_size = rr->rr_eadatalen;
1267 ma->ma_valid |= MA_LOV;
1268 ma->ma_lmm = (void *)rr->rr_eadata;
1269 ma->ma_lmm_size = rr->rr_eadatalen;
1274 rc = mdt_dlmreq_unpack(info);
1278 static int mdt_close_intent_unpack(struct mdt_thread_info *info)
1280 struct md_attr *ma = &info->mti_attr;
1281 struct req_capsule *pill = info->mti_pill;
1284 if (!(ma->ma_attr_flags & MDS_CLOSE_INTENT))
1287 req_capsule_extend(pill, &RQF_MDS_CLOSE_INTENT);
1289 if (!(req_capsule_has_field(pill, &RMF_CLOSE_DATA, RCL_CLIENT) &&
1290 req_capsule_field_present(pill, &RMF_CLOSE_DATA, RCL_CLIENT)))
1296 int mdt_close_unpack(struct mdt_thread_info *info)
1301 rc = mdt_close_handle_unpack(info);
1305 rc = mdt_setattr_unpack_rec(info);
1309 rc = mdt_close_intent_unpack(info);
1313 RETURN(mdt_init_ucred_reint(info));
1316 static int mdt_create_unpack(struct mdt_thread_info *info)
1318 struct lu_ucred *uc = mdt_ucred(info);
1319 struct mdt_rec_create *rec;
1320 struct lu_attr *attr = &info->mti_attr.ma_attr;
1321 struct mdt_reint_record *rr = &info->mti_rr;
1322 struct req_capsule *pill = info->mti_pill;
1323 struct md_op_spec *sp = &info->mti_spec;
1328 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1329 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1333 /* This prior initialization is needed for old_init_ucred_reint() */
1334 uc->uc_fsuid = rec->cr_fsuid;
1335 uc->uc_fsgid = rec->cr_fsgid;
1336 uc->uc_cap = CAP_EMPTY_SET;
1337 uc->uc_cap.cap[0] = rec->cr_cap;
1338 uc->uc_suppgids[0] = rec->cr_suppgid1;
1339 uc->uc_suppgids[1] = -1;
1340 uc->uc_umask = rec->cr_umask;
1342 rr->rr_fid1 = &rec->cr_fid1;
1343 rr->rr_fid2 = &rec->cr_fid2;
1344 attr->la_mode = rec->cr_mode;
1345 attr->la_rdev = rec->cr_rdev;
1346 attr->la_uid = rec->cr_fsuid;
1347 attr->la_gid = rec->cr_fsgid;
1348 attr->la_ctime = rec->cr_time;
1349 attr->la_mtime = rec->cr_time;
1350 attr->la_atime = rec->cr_time;
1351 attr->la_valid = LA_MODE | LA_RDEV | LA_UID | LA_GID | LA_TYPE |
1352 LA_CTIME | LA_MTIME | LA_ATIME;
1353 memset(&sp->u, 0, sizeof(sp->u));
1354 sp->sp_cr_flags = get_mrc_cr_flags(rec);
1356 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1360 if (S_ISLNK(attr->la_mode)) {
1361 const char *tgt = NULL;
1364 req_capsule_extend(pill, &RQF_MDS_REINT_CREATE_SYM);
1365 sz = req_capsule_get_size(pill, &RMF_SYMTGT, RCL_CLIENT);
1367 tgt = req_capsule_client_get(pill, &RMF_SYMTGT);
1368 sp->u.sp_symname.ln_name = tgt;
1369 sp->u.sp_symname.ln_namelen = sz - 1; /* skip NUL */
1374 req_capsule_extend(pill, &RQF_MDS_REINT_CREATE_ACL);
1375 if (S_ISDIR(attr->la_mode) &&
1376 req_capsule_get_size(pill, &RMF_EADATA, RCL_CLIENT) > 0) {
1377 sp->u.sp_ea.eadata =
1378 req_capsule_client_get(pill, &RMF_EADATA);
1379 sp->u.sp_ea.eadatalen =
1380 req_capsule_get_size(pill, &RMF_EADATA,
1382 sp->sp_cr_flags |= MDS_OPEN_HAS_EA;
1386 rc = mdt_file_secctx_unpack(pill, &sp->sp_cr_file_secctx_name,
1387 &sp->sp_cr_file_secctx,
1388 &sp->sp_cr_file_secctx_size);
1392 rc = mdt_file_encctx_unpack(pill, &sp->sp_cr_file_encctx,
1393 &sp->sp_cr_file_encctx_size);
1397 rc = req_check_sepol(pill);
1401 rc = mdt_dlmreq_unpack(info);
1405 static int mdt_link_unpack(struct mdt_thread_info *info)
1407 struct lu_ucred *uc = mdt_ucred(info);
1408 struct mdt_rec_link *rec;
1409 struct lu_attr *attr = &info->mti_attr.ma_attr;
1410 struct mdt_reint_record *rr = &info->mti_rr;
1411 struct req_capsule *pill = info->mti_pill;
1416 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1417 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1421 /* This prior initialization is needed for old_init_ucred_reint() */
1422 uc->uc_fsuid = rec->lk_fsuid;
1423 uc->uc_fsgid = rec->lk_fsgid;
1424 uc->uc_cap = CAP_EMPTY_SET;
1425 uc->uc_cap.cap[0] = rec->lk_cap;
1426 uc->uc_suppgids[0] = rec->lk_suppgid1;
1427 uc->uc_suppgids[1] = rec->lk_suppgid2;
1429 attr->la_uid = rec->lk_fsuid;
1430 attr->la_gid = rec->lk_fsgid;
1431 rr->rr_fid1 = &rec->lk_fid1;
1432 rr->rr_fid2 = &rec->lk_fid2;
1433 attr->la_ctime = rec->lk_time;
1434 attr->la_mtime = rec->lk_time;
1435 attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME;
1437 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1441 rc = req_check_sepol(pill);
1445 rc = mdt_dlmreq_unpack(info);
1450 static int mdt_unlink_unpack(struct mdt_thread_info *info)
1452 struct lu_ucred *uc = mdt_ucred(info);
1453 struct mdt_rec_unlink *rec;
1454 struct lu_attr *attr = &info->mti_attr.ma_attr;
1455 struct mdt_reint_record *rr = &info->mti_rr;
1456 struct req_capsule *pill = info->mti_pill;
1461 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1462 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1466 /* This prior initialization is needed for old_init_ucred_reint() */
1467 uc->uc_fsuid = rec->ul_fsuid;
1468 uc->uc_fsgid = rec->ul_fsgid;
1469 uc->uc_cap = CAP_EMPTY_SET;
1470 uc->uc_cap.cap[0] = rec->ul_cap;
1471 uc->uc_suppgids[0] = rec->ul_suppgid1;
1472 uc->uc_suppgids[1] = -1;
1474 attr->la_uid = rec->ul_fsuid;
1475 attr->la_gid = rec->ul_fsgid;
1476 rr->rr_fid1 = &rec->ul_fid1;
1477 rr->rr_fid2 = &rec->ul_fid2;
1478 attr->la_ctime = rec->ul_time;
1479 attr->la_mtime = rec->ul_time;
1480 attr->la_mode = rec->ul_mode;
1481 attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME | LA_MODE;
1482 if (rec->ul_bias & MDS_FID_OP)
1483 info->mti_spec.sp_cr_flags |= MDS_OP_WITH_FID;
1485 info->mti_spec.sp_cr_flags &= ~MDS_OP_WITH_FID;
1487 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1491 info->mti_spec.no_create = !!req_is_replay(mdt_info_req(info));
1493 rc = req_check_sepol(pill);
1497 rc = mdt_dlmreq_unpack(info);
1501 static int mdt_rmentry_unpack(struct mdt_thread_info *info)
1503 info->mti_spec.sp_rm_entry = 1;
1504 return mdt_unlink_unpack(info);
1507 static int mdt_rename_unpack(struct mdt_thread_info *info)
1509 struct lu_ucred *uc = mdt_ucred(info);
1510 struct mdt_rec_rename *rec;
1511 struct lu_attr *attr = &info->mti_attr.ma_attr;
1512 struct mdt_reint_record *rr = &info->mti_rr;
1513 struct req_capsule *pill = info->mti_pill;
1514 struct md_op_spec *spec = &info->mti_spec;
1519 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1520 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1524 /* This prior initialization is needed for old_init_ucred_reint() */
1525 uc->uc_fsuid = rec->rn_fsuid;
1526 uc->uc_fsgid = rec->rn_fsgid;
1527 uc->uc_cap = CAP_EMPTY_SET;
1528 uc->uc_cap.cap[0] = rec->rn_cap;
1529 uc->uc_suppgids[0] = rec->rn_suppgid1;
1530 uc->uc_suppgids[1] = rec->rn_suppgid2;
1532 attr->la_uid = rec->rn_fsuid;
1533 attr->la_gid = rec->rn_fsgid;
1534 rr->rr_fid1 = &rec->rn_fid1;
1535 rr->rr_fid2 = &rec->rn_fid2;
1536 attr->la_ctime = rec->rn_time;
1537 attr->la_mtime = rec->rn_time;
1538 /* rename_tgt contains the mode already */
1539 attr->la_mode = rec->rn_mode;
1540 attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME | LA_MODE;
1542 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1546 rc = mdt_name_unpack(pill, &RMF_SYMTGT, &rr->rr_tgt_name, 0);
1550 spec->no_create = !!req_is_replay(mdt_info_req(info));
1552 rc = req_check_sepol(pill);
1556 rc = mdt_dlmreq_unpack(info);
1561 static int mdt_migrate_unpack(struct mdt_thread_info *info)
1563 struct lu_ucred *uc = mdt_ucred(info);
1564 struct mdt_rec_rename *rec;
1565 struct lu_attr *attr = &info->mti_attr.ma_attr;
1566 struct mdt_reint_record *rr = &info->mti_rr;
1567 struct req_capsule *pill = info->mti_pill;
1568 struct md_op_spec *spec = &info->mti_spec;
1573 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1574 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1578 /* This prior initialization is needed for old_init_ucred_reint() */
1579 uc->uc_fsuid = rec->rn_fsuid;
1580 uc->uc_fsgid = rec->rn_fsgid;
1581 uc->uc_cap = CAP_EMPTY_SET;
1582 uc->uc_cap.cap[0] = rec->rn_cap;
1583 uc->uc_suppgids[0] = rec->rn_suppgid1;
1584 uc->uc_suppgids[1] = rec->rn_suppgid2;
1586 attr->la_uid = rec->rn_fsuid;
1587 attr->la_gid = rec->rn_fsgid;
1588 rr->rr_fid1 = &rec->rn_fid1;
1589 rr->rr_fid2 = &rec->rn_fid2;
1590 attr->la_ctime = rec->rn_time;
1591 attr->la_mtime = rec->rn_time;
1592 /* rename_tgt contains the mode already */
1593 attr->la_mode = rec->rn_mode;
1594 attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME | LA_MODE;
1595 spec->sp_cr_flags = 0;
1597 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1601 if (rec->rn_bias & MDS_CLOSE_MIGRATE) {
1602 rc = mdt_close_handle_unpack(info);
1606 spec->sp_migrate_close = 1;
1608 spec->sp_migrate_close = 0;
1611 spec->sp_migrate_nsonly = !!(rec->rn_bias & MDS_MIGRATE_NSONLY);
1613 /* lustre version > 2.11 migration packs lum */
1614 if (req_capsule_has_field(pill, &RMF_EADATA, RCL_CLIENT)) {
1615 if (req_capsule_field_present(pill, &RMF_EADATA, RCL_CLIENT)) {
1616 rr->rr_eadatalen = req_capsule_get_size(pill,
1620 if (rr->rr_eadatalen > 0) {
1621 struct lmv_user_md_v1 *lmu;
1623 lmu = req_capsule_client_get(pill, &RMF_EADATA);
1624 lmu->lum_hash_type |=
1625 cpu_to_le32(LMV_HASH_FLAG_FIXED);
1626 rr->rr_eadata = lmu;
1627 spec->u.sp_ea.eadatalen = rr->rr_eadatalen;
1628 spec->u.sp_ea.eadata = rr->rr_eadata;
1629 spec->sp_cr_flags |= MDS_OPEN_HAS_EA;
1632 /* old client doesn't provide lum. */
1633 RETURN(-EOPNOTSUPP);
1637 spec->no_create = !!req_is_replay(mdt_info_req(info));
1639 rc = mdt_dlmreq_unpack(info);
1645 * please see comment above LOV_MAGIC_V1_DEFINED
1647 void mdt_fix_lov_magic(struct mdt_thread_info *info, void *eadata)
1649 struct lov_user_md_v1 *v1 = eadata;
1653 if (unlikely(req_is_replay(mdt_info_req(info)))) {
1654 if ((v1->lmm_magic & LOV_MAGIC_MASK) == LOV_MAGIC_MAGIC)
1655 v1->lmm_magic |= LOV_MAGIC_DEFINED;
1656 else if ((v1->lmm_magic & __swab32(LOV_MAGIC_MAGIC)) ==
1657 __swab32(LOV_MAGIC_MAGIC))
1658 v1->lmm_magic |= __swab32(LOV_MAGIC_DEFINED);
1662 static int mdt_open_unpack(struct mdt_thread_info *info)
1664 struct lu_ucred *uc = mdt_ucred(info);
1665 struct mdt_rec_create *rec;
1666 struct lu_attr *attr = &info->mti_attr.ma_attr;
1667 struct req_capsule *pill = info->mti_pill;
1668 struct mdt_reint_record *rr = &info->mti_rr;
1669 struct ptlrpc_request *req = mdt_info_req(info);
1670 struct md_op_spec *sp = &info->mti_spec;
1674 BUILD_BUG_ON(sizeof(struct mdt_rec_create) !=
1675 sizeof(struct mdt_rec_reint));
1676 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1680 /* This prior initialization is needed for old_init_ucred_reint() */
1681 uc->uc_fsuid = rec->cr_fsuid;
1682 uc->uc_fsgid = rec->cr_fsgid;
1683 uc->uc_cap = CAP_EMPTY_SET;
1684 uc->uc_cap.cap[0] = rec->cr_cap;
1685 uc->uc_suppgids[0] = rec->cr_suppgid1;
1686 uc->uc_suppgids[1] = rec->cr_suppgid2;
1687 uc->uc_umask = rec->cr_umask;
1689 rr->rr_fid1 = &rec->cr_fid1;
1690 rr->rr_fid2 = &rec->cr_fid2;
1691 rr->rr_open_handle = &rec->cr_open_handle_old;
1692 attr->la_mode = rec->cr_mode;
1693 attr->la_rdev = rec->cr_rdev;
1694 attr->la_uid = rec->cr_fsuid;
1695 attr->la_gid = rec->cr_fsgid;
1696 attr->la_ctime = rec->cr_time;
1697 attr->la_mtime = rec->cr_time;
1698 attr->la_atime = rec->cr_time;
1699 attr->la_valid = LA_MODE | LA_RDEV | LA_UID | LA_GID |
1700 LA_CTIME | LA_MTIME | LA_ATIME;
1701 memset(&info->mti_spec.u, 0, sizeof(info->mti_spec.u));
1702 info->mti_spec.sp_cr_flags = get_mrc_cr_flags(rec);
1703 /* Do not trigger ASSERTION if client miss to set such flags. */
1704 if (unlikely(info->mti_spec.sp_cr_flags == 0))
1707 info->mti_cross_ref = !!(rec->cr_bias & MDS_CROSS_REF);
1709 mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, MNF_FIX_ANON);
1711 if (req_capsule_field_present(pill, &RMF_EADATA, RCL_CLIENT)) {
1712 rr->rr_eadatalen = req_capsule_get_size(pill, &RMF_EADATA,
1715 if (rr->rr_eadatalen > 0) {
1716 rr->rr_eadata = req_capsule_client_get(pill,
1718 sp->u.sp_ea.eadatalen = rr->rr_eadatalen;
1719 sp->u.sp_ea.eadata = rr->rr_eadata;
1720 sp->sp_archive_id = rec->cr_archive_id;
1721 sp->no_create = !!req_is_replay(req);
1722 mdt_fix_lov_magic(info, rr->rr_eadata);
1726 * Client default md_size may be 0 right after client start,
1727 * until all osc are connected, set here just some reasonable
1728 * value to prevent misbehavior.
1730 if (rr->rr_eadatalen == 0 &&
1731 !(info->mti_spec.sp_cr_flags & MDS_OPEN_DELAY_CREATE))
1732 rr->rr_eadatalen = MIN_MD_SIZE;
1735 rc = mdt_file_secctx_unpack(pill, &sp->sp_cr_file_secctx_name,
1736 &sp->sp_cr_file_secctx,
1737 &sp->sp_cr_file_secctx_size);
1741 rc = mdt_file_encctx_unpack(pill, &sp->sp_cr_file_encctx,
1742 &sp->sp_cr_file_encctx_size);
1746 rc = req_check_sepol(pill);
1753 static int mdt_setxattr_unpack(struct mdt_thread_info *info)
1755 struct mdt_reint_record *rr = &info->mti_rr;
1756 struct lu_ucred *uc = mdt_ucred(info);
1757 struct lu_attr *attr = &info->mti_attr.ma_attr;
1758 struct req_capsule *pill = info->mti_pill;
1759 struct mdt_rec_setxattr *rec;
1764 BUILD_BUG_ON(sizeof(struct mdt_rec_setxattr) !=
1765 sizeof(struct mdt_rec_reint));
1766 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1770 /* This prior initialization is needed for old_init_ucred_reint() */
1771 uc->uc_fsuid = rec->sx_fsuid;
1772 uc->uc_fsgid = rec->sx_fsgid;
1773 uc->uc_cap = CAP_EMPTY_SET;
1774 uc->uc_cap.cap[0] = rec->sx_cap;
1775 uc->uc_suppgids[0] = rec->sx_suppgid1;
1776 uc->uc_suppgids[1] = -1;
1778 rr->rr_opcode = rec->sx_opcode;
1779 rr->rr_fid1 = &rec->sx_fid;
1780 attr->la_valid = rec->sx_valid;
1781 attr->la_ctime = rec->sx_time;
1782 attr->la_size = rec->sx_size;
1783 attr->la_flags = rec->sx_flags;
1785 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1789 if (req_capsule_field_present(pill, &RMF_EADATA, RCL_CLIENT)) {
1790 rr->rr_eadatalen = req_capsule_get_size(pill, &RMF_EADATA,
1793 if (rr->rr_eadatalen > info->mti_mdt->mdt_max_ea_size)
1796 if (rr->rr_eadatalen > 0) {
1797 rr->rr_eadata = req_capsule_client_get(pill,
1799 if (rr->rr_eadata == NULL)
1802 rr->rr_eadata = NULL;
1804 } else if (!(attr->la_valid & OBD_MD_FLXATTRRM)) {
1805 CDEBUG(D_INFO, "no xattr data supplied\n");
1809 rc = req_check_sepol(pill);
1813 if (mdt_dlmreq_unpack(info) < 0)
1819 static int mdt_resync_unpack(struct mdt_thread_info *info)
1821 struct req_capsule *pill = info->mti_pill;
1822 struct mdt_reint_record *rr = &info->mti_rr;
1823 struct lu_ucred *uc = mdt_ucred(info);
1824 struct mdt_rec_resync *rec;
1827 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1828 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1832 /* This prior initialization is needed for old_init_ucred_reint() */
1833 uc->uc_fsuid = rec->rs_fsuid;
1834 uc->uc_fsgid = rec->rs_fsgid;
1835 uc->uc_cap = CAP_EMPTY_SET;
1836 uc->uc_cap.cap[0] = rec->rs_cap;
1838 rr->rr_fid1 = &rec->rs_fid;
1839 rr->rr_mirror_id = rec->rs_mirror_id;
1841 /* cookie doesn't need to be swapped but it has been swapped
1842 * in lustre_swab_mdt_rec_reint() as rr_mtime, so here it needs
1844 if (req_capsule_req_need_swab(pill))
1845 __swab64s(&rec->rs_lease_handle.cookie);
1846 rr->rr_lease_handle = &rec->rs_lease_handle;
1848 RETURN(mdt_dlmreq_unpack(info));
1851 typedef int (*reint_unpacker)(struct mdt_thread_info *info);
1853 static reint_unpacker mdt_reint_unpackers[REINT_MAX] = {
1854 [REINT_SETATTR] = mdt_setattr_unpack,
1855 [REINT_CREATE] = mdt_create_unpack,
1856 [REINT_LINK] = mdt_link_unpack,
1857 [REINT_UNLINK] = mdt_unlink_unpack,
1858 [REINT_RENAME] = mdt_rename_unpack,
1859 [REINT_OPEN] = mdt_open_unpack,
1860 [REINT_SETXATTR] = mdt_setxattr_unpack,
1861 [REINT_RMENTRY] = mdt_rmentry_unpack,
1862 [REINT_MIGRATE] = mdt_migrate_unpack,
1863 [REINT_RESYNC] = mdt_resync_unpack,
1866 int mdt_reint_unpack(struct mdt_thread_info *info, __u32 op)
1871 memset(&info->mti_rr, 0, sizeof(info->mti_rr));
1872 if (op < REINT_MAX && mdt_reint_unpackers[op] != NULL) {
1873 info->mti_rr.rr_opcode = op;
1874 rc = mdt_reint_unpackers[op](info);
1876 CERROR("Unexpected opcode %d\n", op);
1882 int mdt_pack_secctx_in_reply(struct mdt_thread_info *info,
1883 struct mdt_object *child)
1886 struct lu_buf *buffer;
1887 struct mdt_body *repbody;
1888 struct req_capsule *pill = info->mti_pill;
1891 if (req_capsule_has_field(pill, &RMF_FILE_SECCTX, RCL_SERVER) &&
1892 req_capsule_get_size(pill, &RMF_FILE_SECCTX, RCL_SERVER) != 0) {
1894 req_capsule_client_get(pill, &RMF_FILE_SECCTX_NAME);
1895 buffer = &info->mti_buf;
1897 /* fill reply buffer with security context now */
1898 buffer->lb_len = req_capsule_get_size(pill, &RMF_FILE_SECCTX,
1900 buffer->lb_buf = req_capsule_server_get(info->mti_pill,
1902 rc = mo_xattr_get(info->mti_env, mdt_object_child(child),
1903 buffer, secctx_name);
1906 "found security context of size %d for "DFID"\n",
1907 rc, PFID(mdt_object_fid(child)));
1909 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1910 repbody->mbo_valid |= OBD_MD_SECCTX;
1911 if (rc < buffer->lb_len)
1912 req_capsule_shrink(pill, &RMF_FILE_SECCTX, rc,
1917 "security context not found for "DFID": rc = %d\n",
1918 PFID(mdt_object_fid(child)), rc);
1919 req_capsule_shrink(pill, &RMF_FILE_SECCTX, 0,
1921 /* handling -ENOENT is important because it may change
1922 * object state in DNE env dropping LOHA_EXISTS flag,
1923 * it is important to return that to the caller.
1924 * Check LU-13115 for details.
1933 /* check whether two FIDs belong to different MDT. */
1934 static int mdt_fids_different_target(struct mdt_thread_info *info,
1935 const struct lu_fid *fid1,
1936 const struct lu_fid *fid2)
1938 const struct lu_env *env = info->mti_env;
1939 struct mdt_device *mdt = info->mti_mdt;
1940 struct lu_seq_range *range = &info->mti_range;
1941 struct seq_server_site *ss;
1942 __u32 index1, index2;
1945 if (fid_seq(fid1) == fid_seq(fid2))
1948 ss = mdt->mdt_lu_dev.ld_site->ld_seq_site;
1950 range->lsr_flags = LU_SEQ_RANGE_MDT;
1951 rc = fld_server_lookup(env, ss->ss_server_fld, fid1->f_seq, range);
1955 index1 = range->lsr_index;
1957 rc = fld_server_lookup(env, ss->ss_server_fld, fid2->f_seq, range);
1961 index2 = range->lsr_index;
1963 return index1 != index2;
1967 * Check whether \a child is remote object on \a parent.
1969 * \param[in] info thread environment
1970 * \param[in] parent parent object, it's the same as child object in
1972 * \param[in] child child object
1974 * \retval 1 is remote object.
1975 * \retval 0 isn't remote object.
1976 * \retval < 1 error code
1978 int mdt_is_remote_object(struct mdt_thread_info *info,
1979 struct mdt_object *parent,
1980 struct mdt_object *child)
1982 struct lu_buf *buf = &info->mti_big_buf;
1983 struct linkea_data ldata = { NULL };
1984 struct link_ea_header *leh;
1985 struct link_ea_entry *lee;
1986 struct lu_name name;
1994 if (fid_is_root(mdt_object_fid(child)))
1997 if (likely(parent != child)) {
1998 if (mdt_object_remote(parent) ^ mdt_object_remote(child))
2001 if (!mdt_object_remote(parent) && !mdt_object_remote(child))
2004 rc = mdt_fids_different_target(info, mdt_object_fid(parent),
2005 mdt_object_fid(child));
2009 /* client < 2.13.52 getattr_by_fid parent and child are the same */
2010 buf = lu_buf_check_and_alloc(buf, PATH_MAX);
2015 rc = mdt_links_read(info, child, &ldata);
2016 /* can't read linkea, just assume it's remote object */
2017 if (rc == -ENOENT || rc == -ENODATA)
2023 lee = (struct link_ea_entry *)(leh + 1);
2024 for (i = 0; i < leh->leh_reccount; i++) {
2025 linkea_entry_unpack(lee, &reclen, &name, &pfid);
2026 lee = (struct link_ea_entry *) ((char *)lee + reclen);
2027 if (mdt_fids_different_target(info, &pfid,
2028 mdt_object_fid(child)))
2035 int mdt_pack_encctx_in_reply(struct mdt_thread_info *info,
2036 struct mdt_object *child)
2038 struct lu_buf *buffer;
2039 struct mdt_body *repbody;
2040 struct req_capsule *pill = info->mti_pill;
2041 struct obd_export *exp = mdt_info_req(info)->rq_export;
2044 if (!exp_connect_encrypt(exp))
2047 if (req_capsule_has_field(pill, &RMF_FILE_ENCCTX, RCL_SERVER) &&
2048 req_capsule_get_size(pill, &RMF_FILE_ENCCTX, RCL_SERVER) != 0) {
2049 struct lu_attr la = { 0 };
2050 struct dt_object *dt = mdt_obj2dt(child);
2052 if (dt && dt->do_ops && dt->do_ops->do_attr_get)
2053 dt_attr_get(info->mti_env, mdt_obj2dt(child), &la);
2055 if (la.la_valid & LA_FLAGS && la.la_flags & LUSTRE_ENCRYPT_FL) {
2056 buffer = &info->mti_buf;
2058 /* fill reply buffer with encryption context now */
2060 req_capsule_get_size(pill, &RMF_FILE_ENCCTX,
2063 req_capsule_server_get(pill, &RMF_FILE_ENCCTX);
2064 rc = mo_xattr_get(info->mti_env,
2065 mdt_object_child(child),
2067 LL_XATTR_NAME_ENCRYPTION_CONTEXT);
2068 if (unlikely(rc == -ENODATA))
2069 /* For compatibility with 2.14 */
2070 rc = mo_xattr_get(info->mti_env,
2071 mdt_object_child(child),
2073 LL_XATTR_NAME_ENCRYPTION_CONTEXT_OLD);
2076 "found encryption ctx of size %d for "DFID"\n",
2077 rc, PFID(mdt_object_fid(child)));
2079 repbody = req_capsule_server_get(pill,
2081 repbody->mbo_valid |= OBD_MD_ENCCTX;
2082 if (rc < buffer->lb_len)
2083 req_capsule_shrink(pill,
2084 &RMF_FILE_ENCCTX, rc,
2089 "encryption ctx not found for "DFID": rc = %d\n",
2090 PFID(mdt_object_fid(child)), rc);
2091 req_capsule_shrink(pill, &RMF_FILE_ENCCTX, 0,
2093 /* handling -ENOENT is important because it may
2094 * change object state in DNE env dropping
2095 * LOHA_EXISTS flag, it is important to return
2096 * that to the caller.
2097 * Check LU-13115 for details.
2103 req_capsule_shrink(pill, &RMF_FILE_ENCCTX, 0,