4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/mdt/mdt_lib.c
33 * Lustre Metadata Target (mdt) request unpacking helper.
35 * Author: Peter Braam <braam@clusterfs.com>
36 * Author: Andreas Dilger <adilger@clusterfs.com>
37 * Author: Phil Schwan <phil@clusterfs.com>
38 * Author: Mike Shaver <shaver@clusterfs.com>
39 * Author: Nikita Danilov <nikita@clusterfs.com>
40 * Author: Huang Hua <huanghua@clusterfs.com>
41 * Author: Fan Yong <fanyong@clusterfs.com>
44 #define DEBUG_SUBSYSTEM S_MDS
46 #include <linux/user_namespace.h>
47 #include <linux/uidgid.h>
49 #include "mdt_internal.h"
50 #include <uapi/linux/lnet/nidstr.h>
51 #include <lustre_nodemap.h>
53 typedef enum ucred_init_type {
59 static __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
61 return (__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32);
64 void mdt_exit_ucred(struct mdt_thread_info *info)
66 struct lu_ucred *uc = mdt_ucred(info);
67 struct mdt_device *mdt = info->mti_mdt;
70 if (uc->uc_valid != UCRED_INIT) {
71 uc->uc_suppgids[0] = uc->uc_suppgids[1] = -1;
73 put_group_info(uc->uc_ginfo);
76 if (uc->uc_identity) {
77 mdt_identity_put(mdt->mdt_identity_cache,
79 uc->uc_identity = NULL;
81 uc->uc_valid = UCRED_INIT;
85 static int match_nosquash_list(struct spinlock *rsi_lock,
86 struct list_head *nidlist,
87 struct lnet_nid *peernid)
92 rc = cfs_match_nid(peernid, nidlist);
93 spin_unlock(rsi_lock);
97 /* root_squash for inter-MDS operations */
98 static int mdt_root_squash(struct mdt_thread_info *info,
99 struct lnet_nid *peernid)
101 struct lu_ucred *ucred = mdt_ucred(info);
102 struct root_squash_info *squash = &info->mti_mdt->mdt_squash;
105 LASSERT(ucred != NULL);
106 if (!squash->rsi_uid || ucred->uc_fsuid)
109 if (match_nosquash_list(&squash->rsi_lock,
110 &squash->rsi_nosquash_nids,
112 CDEBUG(D_OTHER, "%s is in nosquash_nids list\n",
113 libcfs_nidstr(peernid));
117 CDEBUG(D_OTHER, "squash req from %s, (%d:%d/%x)=>(%d:%d/%x)\n",
118 libcfs_nidstr(peernid), ucred->uc_fsuid, ucred->uc_fsgid,
119 (u32)ll_capability_u32(ucred->uc_cap),
120 squash->rsi_uid, squash->rsi_gid, 0);
122 ucred->uc_fsuid = squash->rsi_uid;
123 ucred->uc_fsgid = squash->rsi_gid;
124 ucred->uc_cap = CAP_EMPTY_SET;
125 ucred->uc_suppgids[0] = -1;
126 ucred->uc_suppgids[1] = -1;
131 static void ucred_set_jobid(struct mdt_thread_info *info, struct lu_ucred *uc)
133 struct ptlrpc_request *req = mdt_info_req(info);
134 const char *jobid = mdt_req_get_jobid(req);
136 /* set jobid if specified. */
138 strlcpy(uc->uc_jobid, jobid, sizeof(uc->uc_jobid));
140 uc->uc_jobid[0] = '\0';
143 static void ucred_set_nid(struct mdt_thread_info *info, struct lu_ucred *uc)
145 if (info && info->mti_exp && info->mti_exp->exp_connection)
146 uc->uc_nid = lnet_nid_to_nid4(
147 &info->mti_exp->exp_connection->c_peer.nid);
149 uc->uc_nid = LNET_NID_ANY;
152 static void ucred_set_audit_enabled(struct mdt_thread_info *info,
155 struct lu_nodemap *nodemap = NULL;
158 if (info && info->mti_exp) {
159 nodemap = nodemap_get_from_exp(info->mti_exp);
160 if (nodemap && !IS_ERR(nodemap)) {
161 audit = nodemap->nmf_enable_audit;
162 nodemap_putref(nodemap);
166 uc->uc_enable_audit = audit;
169 static void ucred_set_rbac_roles(struct mdt_thread_info *info,
172 struct lu_nodemap *nodemap = NULL;
173 enum nodemap_rbac_roles rbac = NODEMAP_RBAC_ALL;
175 if (info && info->mti_exp) {
176 nodemap = nodemap_get_from_exp(info->mti_exp);
177 if (!IS_ERR_OR_NULL(nodemap)) {
178 rbac = nodemap->nmf_rbac;
179 nodemap_putref(nodemap);
183 uc->uc_rbac_file_perms = !!(rbac & NODEMAP_RBAC_FILE_PERMS);
184 uc->uc_rbac_dne_ops = !!(rbac & NODEMAP_RBAC_DNE_OPS);
185 uc->uc_rbac_quota_ops = !!(rbac & NODEMAP_RBAC_QUOTA_OPS);
186 uc->uc_rbac_byfid_ops = !!(rbac & NODEMAP_RBAC_BYFID_OPS);
187 uc->uc_rbac_chlg_ops = !!(rbac & NODEMAP_RBAC_CHLG_OPS);
188 uc->uc_rbac_fscrypt_admin = !!(rbac & NODEMAP_RBAC_FSCRYPT_ADMIN);
191 static int new_init_ucred(struct mdt_thread_info *info, ucred_init_type_t type,
194 struct ptlrpc_request *req = mdt_info_req(info);
195 struct mdt_device *mdt = info->mti_mdt;
196 struct ptlrpc_user_desc *pud = req->rq_user_desc;
197 struct lu_ucred *ucred = mdt_ucred(info);
198 struct lu_nodemap *nodemap;
199 struct lnet_nid peernid = req->rq_peer.nid;
203 bool is_nm_gid_squashed = false;
208 LASSERT(req->rq_auth_gss);
209 LASSERT(!req->rq_auth_usr_mdt);
210 LASSERT(req->rq_user_desc);
211 LASSERT(ucred != NULL);
213 ucred->uc_valid = UCRED_INVALID;
215 nodemap = nodemap_get_from_exp(info->mti_exp);
217 RETURN(PTR_ERR(nodemap));
219 pud->pud_uid = nodemap_map_id(nodemap, NODEMAP_UID,
220 NODEMAP_CLIENT_TO_FS, pud->pud_uid);
221 pud->pud_gid = nodemap_map_id(nodemap, NODEMAP_GID,
222 NODEMAP_CLIENT_TO_FS, pud->pud_gid);
223 pud->pud_fsuid = nodemap_map_id(nodemap, NODEMAP_UID,
224 NODEMAP_CLIENT_TO_FS, pud->pud_fsuid);
225 pud->pud_fsgid = nodemap_map_id(nodemap, NODEMAP_GID,
226 NODEMAP_CLIENT_TO_FS, pud->pud_fsgid);
228 ucred->uc_o_uid = pud->pud_uid;
229 ucred->uc_o_gid = pud->pud_gid;
230 ucred->uc_o_fsuid = pud->pud_fsuid;
231 ucred->uc_o_fsgid = pud->pud_fsgid;
233 if (nodemap && ucred->uc_o_uid == nodemap->nm_squash_uid) {
234 /* deny access before we get identity ref */
235 if (nodemap->nmf_deny_unknown) {
236 nodemap_putref(nodemap);
240 ucred->uc_suppgids[0] = -1;
241 ucred->uc_suppgids[1] = -1;
244 if (nodemap && ucred->uc_o_gid == nodemap->nm_squash_gid)
245 is_nm_gid_squashed = true;
247 nodemap_putref(nodemap);
249 if (type == BODY_INIT) {
250 struct mdt_body *body = (struct mdt_body *)buf;
252 ucred->uc_suppgids[0] = body->mbo_suppgid;
253 ucred->uc_suppgids[1] = -1;
256 if (!flvr_is_rootonly(req->rq_flvr.sf_rpc) &&
257 req->rq_auth_uid != pud->pud_uid) {
258 CDEBUG(D_SEC, "local client %s: auth uid %u "
259 "while client claims %u:%u/%u:%u\n",
260 libcfs_nidstr(&peernid), req->rq_auth_uid,
261 pud->pud_uid, pud->pud_gid,
262 pud->pud_fsuid, pud->pud_fsgid);
266 if (is_identity_get_disabled(mdt->mdt_identity_cache)) {
267 ucred->uc_identity = NULL;
268 perm = CFS_SETUID_PERM | CFS_SETGID_PERM | CFS_SETGRP_PERM;
270 struct md_identity *identity;
272 identity = mdt_identity_get(mdt->mdt_identity_cache,
274 if (IS_ERR(identity)) {
275 if (unlikely(PTR_ERR(identity) == -EREMCHG)) {
276 ucred->uc_identity = NULL;
277 perm = CFS_SETUID_PERM | CFS_SETGID_PERM |
281 "Deny access without identity: uid %u\n",
286 ucred->uc_identity = identity;
287 perm = mdt_identity_get_perm(ucred->uc_identity,
292 /* find out the setuid/setgid attempt */
293 setuid = (pud->pud_uid != pud->pud_fsuid);
294 setgid = ((pud->pud_gid != pud->pud_fsgid) ||
295 (ucred->uc_identity &&
296 (pud->pud_gid != ucred->uc_identity->mi_gid)));
298 /* check permission of setuid */
299 if (setuid && !(perm & CFS_SETUID_PERM)) {
300 CDEBUG(D_SEC, "mdt blocked setuid attempt (%u -> %u) from %s\n",
301 pud->pud_uid, pud->pud_fsuid, libcfs_nidstr(&peernid));
302 GOTO(out, rc = -EACCES);
305 /* check permission of setgid */
306 if (setgid && !(perm & CFS_SETGID_PERM)) {
307 CDEBUG(D_SEC, "mdt blocked setgid attempt (%u:%u/%u:%u -> %u) "
308 "from %s\n", pud->pud_uid, pud->pud_gid,
309 pud->pud_fsuid, pud->pud_fsgid,
310 ucred->uc_identity->mi_gid, libcfs_nidstr(&peernid));
311 GOTO(out, rc = -EACCES);
314 if (perm & CFS_SETGRP_PERM) {
315 /* only set groups if GID is not squashed */
316 if (pud->pud_ngroups && !is_nm_gid_squashed) {
317 /* setgroups for local client */
318 ucred->uc_ginfo = groups_alloc(pud->pud_ngroups);
319 if (!ucred->uc_ginfo) {
320 CERROR("failed to alloc %d groups\n",
322 GOTO(out, rc = -ENOMEM);
325 lustre_groups_from_list(ucred->uc_ginfo,
327 lustre_groups_sort(ucred->uc_ginfo);
329 ucred->uc_suppgids[0] = -1;
330 ucred->uc_suppgids[1] = -1;
331 ucred->uc_ginfo = NULL;
334 ucred->uc_suppgids[0] = -1;
335 ucred->uc_suppgids[1] = -1;
336 ucred->uc_ginfo = NULL;
339 ucred->uc_uid = pud->pud_uid;
340 ucred->uc_gid = pud->pud_gid;
342 ucred->uc_cap = CAP_EMPTY_SET;
343 if (!nodemap || ucred->uc_o_uid != nodemap->nm_squash_uid)
344 ll_set_capability_u32(&ucred->uc_cap, pud->pud_cap);
346 ucred->uc_fsuid = pud->pud_fsuid;
347 ucred->uc_fsgid = pud->pud_fsgid;
349 /* process root_squash here. */
350 mdt_root_squash(info, &peernid);
352 if (ucred->uc_fsuid) {
353 if (!cap_issubset(ucred->uc_cap, mdt->mdt_enable_cap_mask))
354 CDEBUG(D_SEC, "%s: drop capabilities %llx for NID %s\n",
356 #ifdef CAP_FOR_EACH_U32
357 ucred->uc_cap.cap[0] |
358 ((u64)ucred->uc_cap.cap[1] << 32),
362 libcfs_nidstr(&mdt_info_req(info)->rq_peer.nid));
363 ucred->uc_cap = cap_intersect(ucred->uc_cap,
364 mdt->mdt_enable_cap_mask);
367 ucred->uc_valid = UCRED_NEW;
368 ucred_set_jobid(info, ucred);
369 ucred_set_nid(info, ucred);
370 ucred_set_audit_enabled(info, ucred);
371 ucred_set_rbac_roles(info, ucred);
377 if (ucred->uc_ginfo) {
378 put_group_info(ucred->uc_ginfo);
379 ucred->uc_ginfo = NULL;
381 if (ucred->uc_identity) {
382 mdt_identity_put(mdt->mdt_identity_cache,
384 ucred->uc_identity = NULL;
392 * Check whether allow the client to set supplementary group IDs or not.
394 * \param[in] info pointer to the thread context
395 * \param[in] uc pointer to the RPC user descriptor
397 * \retval true if allow to set supplementary group IDs
398 * \retval false for other cases
400 bool allow_client_chgrp(struct mdt_thread_info *info, struct lu_ucred *uc)
404 /* 1. If identity_upcall is disabled,
405 * permit local client to do anything. */
406 if (is_identity_get_disabled(info->mti_mdt->mdt_identity_cache))
409 /* 2. If fail to get related identities, then forbid any client to
410 * set supplementary group IDs. */
411 if (uc->uc_identity == NULL)
414 /* 3. Check the permission in the identities. */
415 perm = mdt_identity_get_perm(
417 &mdt_info_req(info)->rq_peer.nid);
418 if (perm & CFS_SETGRP_PERM)
424 int mdt_check_ucred(struct mdt_thread_info *info)
426 struct ptlrpc_request *req = mdt_info_req(info);
427 struct mdt_device *mdt = info->mti_mdt;
428 struct ptlrpc_user_desc *pud = req->rq_user_desc;
429 struct lu_ucred *ucred = mdt_ucred(info);
430 struct md_identity *identity = NULL;
431 struct lnet_nid peernid = req->rq_peer.nid;
439 LASSERT(ucred != NULL);
440 if ((ucred->uc_valid == UCRED_OLD) || (ucred->uc_valid == UCRED_NEW))
443 if (!req->rq_auth_gss || req->rq_auth_usr_mdt || !req->rq_user_desc)
446 /* sanity check: if we use strong authentication, we expect the
447 * uid which client claimed is true */
448 if (!flvr_is_rootonly(req->rq_flvr.sf_rpc) &&
449 req->rq_auth_uid != pud->pud_uid) {
450 CDEBUG(D_SEC, "local client %s: auth uid %u "
451 "while client claims %u:%u/%u:%u\n",
452 libcfs_nidstr(&peernid), req->rq_auth_uid,
453 pud->pud_uid, pud->pud_gid,
454 pud->pud_fsuid, pud->pud_fsgid);
458 if (is_identity_get_disabled(mdt->mdt_identity_cache))
461 identity = mdt_identity_get(mdt->mdt_identity_cache, pud->pud_uid);
462 if (IS_ERR(identity)) {
463 if (unlikely(PTR_ERR(identity) == -EREMCHG)) {
466 CDEBUG(D_SEC, "Deny access without identity: uid %u\n",
472 perm = mdt_identity_get_perm(identity, &peernid);
473 /* find out the setuid/setgid attempt */
474 setuid = (pud->pud_uid != pud->pud_fsuid);
475 setgid = (pud->pud_gid != pud->pud_fsgid ||
476 pud->pud_gid != identity->mi_gid);
478 /* check permission of setuid */
479 if (setuid && !(perm & CFS_SETUID_PERM)) {
480 CDEBUG(D_SEC, "mdt blocked setuid attempt (%u -> %u) from %s\n",
481 pud->pud_uid, pud->pud_fsuid, libcfs_nidstr(&peernid));
482 GOTO(out, rc = -EACCES);
485 /* check permission of setgid */
486 if (setgid && !(perm & CFS_SETGID_PERM)) {
488 "mdt blocked setgid attempt (%u:%u/%u:%u -> %u) from %s\n",
489 pud->pud_uid, pud->pud_gid,
490 pud->pud_fsuid, pud->pud_fsgid, identity->mi_gid,
491 libcfs_nidstr(&peernid));
492 GOTO(out, rc = -EACCES);
498 mdt_identity_put(mdt->mdt_identity_cache, identity);
502 static int old_init_ucred_common(struct mdt_thread_info *info,
503 struct lu_nodemap *nodemap)
505 struct lu_ucred *uc = mdt_ucred(info);
506 struct mdt_device *mdt = info->mti_mdt;
507 struct md_identity *identity = NULL;
509 if (nodemap && uc->uc_o_uid == nodemap->nm_squash_uid) {
510 /* deny access before we get identity ref */
511 if (nodemap->nmf_deny_unknown)
514 uc->uc_cap = CAP_EMPTY_SET;
515 uc->uc_suppgids[0] = -1;
516 uc->uc_suppgids[1] = -1;
519 if (!is_identity_get_disabled(mdt->mdt_identity_cache)) {
520 identity = mdt_identity_get(mdt->mdt_identity_cache,
522 if (IS_ERR(identity)) {
523 if (unlikely(PTR_ERR(identity) == -EREMCHG ||
524 cap_raised(uc->uc_cap,
525 CAP_DAC_READ_SEARCH))) {
528 CDEBUG(D_SEC, "Deny access without identity: "
529 "uid %u\n", uc->uc_fsuid);
534 uc->uc_identity = identity;
536 /* process root_squash here. */
537 mdt_root_squash(info,
538 &mdt_info_req(info)->rq_peer.nid);
541 if (!cap_issubset(uc->uc_cap, mdt->mdt_enable_cap_mask))
542 CDEBUG(D_SEC, "%s: drop capabilities %llx for NID %s\n",
544 #ifdef CAP_FOR_EACH_U32
545 uc->uc_cap.cap[0] | ((u64)uc->uc_cap.cap[1]<<32),
549 libcfs_nidstr(&mdt_info_req(info)->rq_peer.nid));
550 uc->uc_cap = cap_intersect(uc->uc_cap,mdt->mdt_enable_cap_mask);
552 uc->uc_valid = UCRED_OLD;
553 ucred_set_jobid(info, uc);
554 ucred_set_nid(info, uc);
555 ucred_set_audit_enabled(info, uc);
556 ucred_set_rbac_roles(info, uc);
563 static int old_init_ucred(struct mdt_thread_info *info,
564 struct mdt_body *body)
566 struct lu_ucred *uc = mdt_ucred(info);
567 struct lu_nodemap *nodemap;
571 nodemap = nodemap_get_from_exp(info->mti_exp);
573 RETURN(PTR_ERR(nodemap));
575 body->mbo_uid = nodemap_map_id(nodemap, NODEMAP_UID,
576 NODEMAP_CLIENT_TO_FS, body->mbo_uid);
577 body->mbo_gid = nodemap_map_id(nodemap, NODEMAP_GID,
578 NODEMAP_CLIENT_TO_FS, body->mbo_gid);
579 body->mbo_fsuid = nodemap_map_id(nodemap, NODEMAP_UID,
580 NODEMAP_CLIENT_TO_FS, body->mbo_fsuid);
581 body->mbo_fsgid = nodemap_map_id(nodemap, NODEMAP_GID,
582 NODEMAP_CLIENT_TO_FS, body->mbo_fsgid);
585 uc->uc_valid = UCRED_INVALID;
586 uc->uc_o_uid = uc->uc_uid = body->mbo_uid;
587 uc->uc_o_gid = uc->uc_gid = body->mbo_gid;
588 uc->uc_o_fsuid = uc->uc_fsuid = body->mbo_fsuid;
589 uc->uc_o_fsgid = uc->uc_fsgid = body->mbo_fsgid;
590 uc->uc_suppgids[0] = body->mbo_suppgid;
591 uc->uc_suppgids[1] = -1;
593 uc->uc_cap = CAP_EMPTY_SET;
594 ll_set_capability_u32(&uc->uc_cap, body->mbo_capability);
596 rc = old_init_ucred_common(info, nodemap);
597 nodemap_putref(nodemap);
602 static int old_init_ucred_reint(struct mdt_thread_info *info)
604 struct lu_ucred *uc = mdt_ucred(info);
605 struct lu_nodemap *nodemap;
609 nodemap = nodemap_get_from_exp(info->mti_exp);
611 RETURN(PTR_ERR(nodemap));
615 uc->uc_fsuid = nodemap_map_id(nodemap, NODEMAP_UID,
616 NODEMAP_CLIENT_TO_FS, uc->uc_fsuid);
617 uc->uc_fsgid = nodemap_map_id(nodemap, NODEMAP_GID,
618 NODEMAP_CLIENT_TO_FS, uc->uc_fsgid);
620 uc->uc_valid = UCRED_INVALID;
621 uc->uc_o_uid = uc->uc_o_fsuid = uc->uc_uid = uc->uc_fsuid;
622 uc->uc_o_gid = uc->uc_o_fsgid = uc->uc_gid = uc->uc_fsgid;
625 rc = old_init_ucred_common(info, nodemap);
626 nodemap_putref(nodemap);
631 static inline int __mdt_init_ucred(struct mdt_thread_info *info,
632 struct mdt_body *body)
634 struct ptlrpc_request *req = mdt_info_req(info);
635 struct lu_ucred *uc = mdt_ucred(info);
638 if ((uc->uc_valid == UCRED_OLD) || (uc->uc_valid == UCRED_NEW))
641 mdt_exit_ucred(info);
643 if (!req->rq_auth_gss || req->rq_auth_usr_mdt || !req->rq_user_desc)
644 return old_init_ucred(info, body);
646 return new_init_ucred(info, BODY_INIT, body);
649 int mdt_init_ucred(struct mdt_thread_info *info, struct mdt_body *body)
651 return __mdt_init_ucred(info, body);
654 int mdt_init_ucred_reint(struct mdt_thread_info *info)
656 struct ptlrpc_request *req = mdt_info_req(info);
657 struct lu_ucred *uc = mdt_ucred(info);
658 struct md_attr *ma = &info->mti_attr;
661 if ((uc->uc_valid == UCRED_OLD) || (uc->uc_valid == UCRED_NEW))
664 /* LU-5564: for normal close request, skip permission check */
665 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE &&
666 !(ma->ma_attr_flags & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP))) {
667 cap_raise_nfsd_set(uc->uc_cap, CAP_FULL_SET);
668 cap_raise_fs_set(uc->uc_cap, CAP_FULL_SET);
671 mdt_exit_ucred(info);
673 if (!req->rq_auth_gss || req->rq_auth_usr_mdt || !req->rq_user_desc)
674 return old_init_ucred_reint(info);
676 return new_init_ucred(info, REC_INIT, NULL);
679 /* copied from lov/lov_ea.c, just for debugging, will be removed later */
680 void mdt_dump_lmm(int level, const struct lov_mds_md *lmm, __u64 valid)
682 const struct lov_ost_data_v1 *lod;
683 __u32 lmm_magic = le32_to_cpu(lmm->lmm_magic);
687 if (likely(!cfs_cdebug_show(level, DEBUG_SUBSYSTEM)))
690 CDEBUG_LIMIT(level, "objid="DOSTID" magic=0x%08X pattern=%#X\n",
691 POSTID(&lmm->lmm_oi), lmm_magic,
692 le32_to_cpu(lmm->lmm_pattern));
694 /* No support for compound layouts yet */
695 if (lmm_magic != LOV_MAGIC_V1 && lmm_magic != LOV_MAGIC_V3)
698 count = le16_to_cpu(((struct lov_user_md *)lmm)->lmm_stripe_count);
699 CDEBUG_LIMIT(level, "stripe_size=0x%x, %sstripe_count=0x%x\n",
700 le32_to_cpu(lmm->lmm_stripe_size),
701 count > LOV_MAX_STRIPE_COUNT ? "bad " : "", count);
703 /* If it's a directory or a released file, then there are
704 * no actual objects to print, so bail out. */
705 if (valid & OBD_MD_FLDIREA ||
706 le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
709 if (unlikely(count > LOV_MAX_STRIPE_COUNT))
712 for (i = 0, lod = lmm->lmm_objects; i < count; i++, lod++) {
715 ostid_le_to_cpu(&lod->l_ost_oi, &oi);
716 CDEBUG_LIMIT(level, "stripe %u idx %u subobj "DOSTID"\n",
717 i, le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
721 void mdt_dump_lmv(unsigned int level, const union lmv_mds_md *lmv)
723 const struct lmv_mds_md_v1 *lmm1;
724 const struct lmv_foreign_md *lfm;
727 if (likely(!cfs_cdebug_show(level, DEBUG_SUBSYSTEM)))
730 /* foreign LMV case */
731 lfm = &lmv->lmv_foreign_md;
732 if (le32_to_cpu(lfm->lfm_magic) == LMV_MAGIC_FOREIGN) {
734 "foreign magic 0x%08X, length %u, type %u, flags %u, value '%.*s'\n",
735 le32_to_cpu(lfm->lfm_magic),
736 le32_to_cpu(lfm->lfm_length),
737 le32_to_cpu(lfm->lfm_type),
738 le32_to_cpu(lfm->lfm_flags),
739 le32_to_cpu(lfm->lfm_length), lfm->lfm_value);
743 lmm1 = &lmv->lmv_md_v1;
745 "magic 0x%08X, master %#X stripe_count %d hash_type %#x\n",
746 le32_to_cpu(lmm1->lmv_magic),
747 le32_to_cpu(lmm1->lmv_master_mdt_index),
748 le32_to_cpu(lmm1->lmv_stripe_count),
749 le32_to_cpu(lmm1->lmv_hash_type));
751 if (le32_to_cpu(lmm1->lmv_magic) == LMV_MAGIC_STRIPE)
754 if (le32_to_cpu(lmm1->lmv_stripe_count) > LMV_MAX_STRIPE_COUNT)
757 for (i = 0; i < le32_to_cpu(lmm1->lmv_stripe_count); i++) {
760 fid_le_to_cpu(&fid, &lmm1->lmv_stripe_fids[i]);
761 CDEBUG(level, "idx %u subobj "DFID"\n", i, PFID(&fid));
765 /* Shrink and/or grow reply buffers */
766 int mdt_fix_reply(struct mdt_thread_info *info)
768 struct req_capsule *pill = info->mti_pill;
769 struct mdt_body *body;
770 int md_size, md_packed = 0;
775 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
776 LASSERT(body != NULL);
778 if (body->mbo_valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE |
780 md_size = body->mbo_eadatasize;
784 acl_size = body->mbo_aclsize;
786 /* this replay - not send info to client */
787 if (info->mti_spec.no_create) {
792 CDEBUG(D_INFO, "Shrink to md_size = %d cookie/acl_size = %d\n",
797 &RMF_ACL, or &RMF_LOGCOOKIES
798 (optional) &RMF_CAPA1,
799 (optional) &RMF_CAPA2,
800 (optional) something else
803 /* MDT_MD buffer may be bigger than packed value, let's shrink all
804 * buffers before growing it */
805 if (info->mti_big_lmm_used) {
806 /* big_lmm buffer may be used even without packing the result
807 * into reply, just for internal server needs */
808 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
809 md_packed = req_capsule_get_size(pill, &RMF_MDT_MD,
812 /* free big lmm if md_size is not needed */
813 if (md_size == 0 || md_packed == 0) {
814 info->mti_big_lmm_used = 0;
816 /* buffer must be allocated separately */
817 LASSERT(info->mti_attr.ma_lmm !=
818 req_capsule_server_get(pill, &RMF_MDT_MD));
819 req_capsule_shrink(pill, &RMF_MDT_MD, 0, RCL_SERVER);
821 } else if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) {
822 req_capsule_shrink(pill, &RMF_MDT_MD, md_size, RCL_SERVER);
825 if (info->mti_big_acl_used) {
827 info->mti_big_acl_used = 0;
829 req_capsule_shrink(pill, &RMF_ACL, 0, RCL_SERVER);
830 } else if (req_capsule_has_field(pill, &RMF_ACL, RCL_SERVER)) {
831 req_capsule_shrink(pill, &RMF_ACL, acl_size, RCL_SERVER);
832 } else if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER)) {
833 req_capsule_shrink(pill, &RMF_LOGCOOKIES, acl_size, RCL_SERVER);
836 /* Shrink optional SECCTX buffer if it is not used */
837 if (req_capsule_has_field(pill, &RMF_FILE_SECCTX, RCL_SERVER) &&
838 req_capsule_get_size(pill, &RMF_FILE_SECCTX, RCL_SERVER) != 0 &&
839 !(body->mbo_valid & OBD_MD_SECCTX))
840 req_capsule_shrink(pill, &RMF_FILE_SECCTX, 0, RCL_SERVER);
842 /* Shrink optional ENCCTX buffer if it is not used */
843 if (req_capsule_has_field(pill, &RMF_FILE_ENCCTX, RCL_SERVER) &&
844 req_capsule_get_size(pill, &RMF_FILE_ENCCTX, RCL_SERVER) != 0 &&
845 !(body->mbo_valid & OBD_MD_ENCCTX))
846 req_capsule_shrink(pill, &RMF_FILE_ENCCTX, 0, RCL_SERVER);
848 /* Shrink optional default LMV buffer if it is not used */
849 if (req_capsule_has_field(pill, &RMF_DEFAULT_MDT_MD, RCL_SERVER) &&
850 req_capsule_get_size(pill, &RMF_DEFAULT_MDT_MD, RCL_SERVER) != 0 &&
851 !(body->mbo_valid & OBD_MD_DEFAULT_MEA))
852 req_capsule_shrink(pill, &RMF_DEFAULT_MDT_MD, 0, RCL_SERVER);
855 * Some more field should be shrinked if needed.
856 * This should be done by those who added fields to reply message.
859 /* Grow MD buffer if needed finally */
860 if (info->mti_big_lmm_used) {
863 LASSERT(md_size > md_packed);
864 CDEBUG(D_INFO, "Enlarge reply buffer, need extra %d bytes\n",
865 md_size - md_packed);
867 rc = req_capsule_server_grow(pill, &RMF_MDT_MD, md_size);
869 /* we can't answer with proper LOV EA, drop flags,
870 * the rc is also returned so this request is
871 * considered as failed */
872 body->mbo_valid &= ~(OBD_MD_FLDIREA | OBD_MD_FLEASIZE);
873 /* don't return transno along with error */
874 lustre_msg_set_transno(pill->rc_req->rq_repmsg, 0);
876 /* now we need to pack right LOV/LMV EA */
877 lmm = req_capsule_server_get(pill, &RMF_MDT_MD);
878 if (info->mti_attr.ma_valid & MA_LOV) {
879 LASSERT(req_capsule_get_size(pill, &RMF_MDT_MD,
881 info->mti_attr.ma_lmm_size);
882 memcpy(lmm, info->mti_attr.ma_lmm,
883 info->mti_attr.ma_lmm_size);
884 } else if (info->mti_attr.ma_valid & MA_LMV) {
885 LASSERT(req_capsule_get_size(pill, &RMF_MDT_MD,
887 info->mti_attr.ma_lmv_size);
888 memcpy(lmm, info->mti_attr.ma_lmv,
889 info->mti_attr.ma_lmv_size);
893 /* update mdt_max_mdsize so clients will be aware about that */
894 if (info->mti_mdt->mdt_max_mdsize < info->mti_attr.ma_lmm_size)
895 info->mti_mdt->mdt_max_mdsize =
896 info->mti_attr.ma_lmm_size;
897 info->mti_big_lmm_used = 0;
900 if (info->mti_big_acl_used) {
901 CDEBUG(D_INFO, "Enlarge reply ACL buffer to %d bytes\n",
904 rc = req_capsule_server_grow(pill, &RMF_ACL, acl_size);
906 body->mbo_valid &= ~OBD_MD_FLACL;
908 void *acl = req_capsule_server_get(pill, &RMF_ACL);
910 memcpy(acl, info->mti_big_acl, acl_size);
913 info->mti_big_acl_used = 0;
920 /* if object is dying, pack the lov/llog data,
921 * parameter info->mti_attr should be valid at this point!
922 * Also implements RAoLU policy */
923 int mdt_handle_last_unlink(struct mdt_thread_info *info, struct mdt_object *mo,
926 struct mdt_body *repbody = NULL;
927 const struct lu_attr *la = &ma->ma_attr;
928 struct coordinator *cdt = &info->mti_mdt->mdt_coordinator;
931 struct hsm_action_item hai = {
932 .hai_len = sizeof(hai),
933 .hai_action = HSMA_REMOVE,
934 .hai_extent.length = -1,
942 if (mdt_info_req(info) != NULL) {
943 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
944 LASSERT(repbody != NULL);
946 CDEBUG(D_INFO, "not running in a request/reply context\n");
949 if ((ma->ma_valid & MA_INODE) && repbody != NULL)
950 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(mo));
952 if (ma->ma_valid & MA_LOV) {
953 CERROR("No need in LOV EA upon unlink\n");
957 repbody->mbo_eadatasize = 0;
959 /* Only check unlinked and archived if RAoLU and upon last close */
960 if (!cdt->cdt_remove_archive_on_last_unlink ||
961 atomic_read(&mo->mot_open_count) != 0)
964 /* mdt_attr_get_complex will clear ma_valid, so check here first */
965 if ((ma->ma_valid & MA_INODE) && (ma->ma_attr.la_nlink != 0))
968 if ((ma->ma_valid & MA_HSM) && (!(ma->ma_hsm.mh_flags & HS_EXISTS)))
971 need |= (MA_INODE | MA_HSM) & ~ma->ma_valid;
973 /* ma->ma_valid is missing either MA_INODE, MA_HSM, or both,
974 * try setting them */
976 rc = mdt_attr_get_complex(info, mo, ma);
978 CERROR("%s: unable to fetch missing attributes of"
979 DFID": rc=%d\n", mdt_obd_name(info->mti_mdt),
980 PFID(mdt_object_fid(mo)), rc);
984 if (need & MA_INODE) {
985 if (ma->ma_valid & MA_INODE) {
986 if (ma->ma_attr.la_nlink != 0)
994 if (ma->ma_valid & MA_HSM) {
995 if (!(ma->ma_hsm.mh_flags & HS_EXISTS))
1003 /* RAoLU policy is active, last close on file has occured,
1004 * file is unlinked, file is archived, so create remove request
1006 * If CDT is not running, requests will be logged for later. */
1007 if (ma->ma_hsm.mh_arch_id != 0)
1008 archive_id = ma->ma_hsm.mh_arch_id;
1010 archive_id = cdt->cdt_default_archive_id;
1012 hai.hai_fid = *mdt_object_fid(mo);
1014 rc = mdt_agent_record_add(info->mti_env, info->mti_mdt, archive_id, 0,
1017 CERROR("%s: unable to add HSM remove request for "DFID
1018 ": rc=%d\n", mdt_obd_name(info->mti_mdt),
1019 PFID(mdt_object_fid(mo)), rc);
1024 static __u64 mdt_attr_valid_xlate(enum mds_attr_flags in,
1025 struct mdt_reint_record *rr,
1031 if (in & MDS_ATTR_MODE)
1033 if (in & MDS_ATTR_UID)
1035 if (in & MDS_ATTR_GID)
1037 if (in & MDS_ATTR_SIZE)
1039 if (in & MDS_ATTR_BLOCKS)
1041 if (in & MDS_ATTR_ATIME_SET)
1043 if (in & MDS_ATTR_CTIME_SET)
1045 if (in & MDS_ATTR_MTIME_SET)
1047 if (in & MDS_ATTR_ATTR_FLAG)
1049 if (in & MDS_ATTR_KILL_SUID)
1050 out |= LA_KILL_SUID;
1051 if (in & MDS_ATTR_KILL_SGID)
1052 out |= LA_KILL_SGID;
1053 if (in & MDS_ATTR_PROJID)
1055 if (in & MDS_ATTR_LSIZE)
1057 if (in & MDS_ATTR_LBLOCKS)
1060 if (in & MDS_ATTR_FROM_OPEN)
1061 rr->rr_flags |= MRF_OPEN_TRUNC;
1062 if (in & MDS_ATTR_OVERRIDE)
1063 ma->ma_attr_flags |= MDS_OWNEROVERRIDE;
1064 if (in & MDS_ATTR_FORCE)
1065 ma->ma_attr_flags |= MDS_PERM_BYPASS;
1067 in &= ~(MDS_ATTR_MODE | MDS_ATTR_UID | MDS_ATTR_GID | MDS_ATTR_PROJID |
1068 MDS_ATTR_ATIME | MDS_ATTR_MTIME | MDS_ATTR_CTIME |
1069 MDS_ATTR_ATIME_SET | MDS_ATTR_CTIME_SET | MDS_ATTR_MTIME_SET |
1070 MDS_ATTR_SIZE | MDS_ATTR_BLOCKS | MDS_ATTR_ATTR_FLAG |
1071 MDS_ATTR_FORCE | MDS_ATTR_KILL_SUID | MDS_ATTR_KILL_SGID |
1072 MDS_ATTR_FROM_OPEN | MDS_ATTR_LSIZE | MDS_ATTR_LBLOCKS |
1075 CDEBUG(D_INFO, "Unknown attr bits: %#llx\n", (u64)in);
1082 int mdt_name_unpack(struct req_capsule *pill,
1083 const struct req_msg_field *field,
1085 enum mdt_name_flags flags)
1087 ln->ln_name = req_capsule_client_get(pill, field);
1088 ln->ln_namelen = req_capsule_get_size(pill, field, RCL_CLIENT) - 1;
1090 if (!lu_name_is_valid(ln)) {
1097 if ((flags & MNF_FIX_ANON) &&
1098 ln->ln_namelen == 1 && ln->ln_name[0] == '/') {
1099 /* Newer (3.x) kernels use a name of "/" for the
1100 * "anonymous" disconnected dentries from NFS
1101 * filehandle conversion. See d_obtain_alias(). */
1109 static int mdt_file_secctx_unpack(struct req_capsule *pill,
1110 const char **secctx_name,
1111 void **secctx, size_t *secctx_size)
1116 *secctx_name = NULL;
1120 if (!req_capsule_has_field(pill, &RMF_FILE_SECCTX_NAME, RCL_CLIENT) ||
1121 !req_capsule_field_present(pill, &RMF_FILE_SECCTX_NAME, RCL_CLIENT))
1124 name_size = req_capsule_get_size(pill, &RMF_FILE_SECCTX_NAME,
1129 if (name_size > XATTR_NAME_MAX + 1)
1132 name = req_capsule_client_get(pill, &RMF_FILE_SECCTX_NAME);
1133 if (strnlen(name, name_size) != name_size - 1)
1136 if (!req_capsule_has_field(pill, &RMF_FILE_SECCTX, RCL_CLIENT) ||
1137 !req_capsule_field_present(pill, &RMF_FILE_SECCTX, RCL_CLIENT))
1140 *secctx_name = name;
1141 *secctx = req_capsule_client_get(pill, &RMF_FILE_SECCTX);
1142 *secctx_size = req_capsule_get_size(pill, &RMF_FILE_SECCTX, RCL_CLIENT);
1147 static int mdt_file_encctx_unpack(struct req_capsule *pill,
1148 void **encctx, size_t *encctx_size)
1153 if (!exp_connect_encrypt(pill->rc_req->rq_export))
1156 if (!req_capsule_has_field(pill, &RMF_FILE_ENCCTX, RCL_CLIENT) ||
1157 !req_capsule_field_present(pill, &RMF_FILE_ENCCTX, RCL_CLIENT))
1160 *encctx_size = req_capsule_get_size(pill, &RMF_FILE_ENCCTX, RCL_CLIENT);
1161 if (*encctx_size == 0)
1164 *encctx = req_capsule_client_get(pill, &RMF_FILE_ENCCTX);
1169 static int mdt_setattr_unpack_rec(struct mdt_thread_info *info)
1171 struct lu_ucred *uc = mdt_ucred(info);
1172 struct md_attr *ma = &info->mti_attr;
1173 struct lu_attr *la = &ma->ma_attr;
1174 struct req_capsule *pill = info->mti_pill;
1175 struct mdt_reint_record *rr = &info->mti_rr;
1176 struct mdt_rec_setattr *rec;
1177 struct lu_nodemap *nodemap;
1181 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1182 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1186 /* This prior initialization is needed for old_init_ucred_reint() */
1187 uc->uc_fsuid = rec->sa_fsuid;
1188 uc->uc_fsgid = rec->sa_fsgid;
1189 uc->uc_cap = CAP_EMPTY_SET;
1190 ll_set_capability_u32(&uc->uc_cap, rec->sa_cap);
1191 uc->uc_suppgids[0] = rec->sa_suppgid;
1192 uc->uc_suppgids[1] = -1;
1194 rr->rr_fid1 = &rec->sa_fid;
1195 la->la_valid = mdt_attr_valid_xlate(rec->sa_valid, rr, ma);
1196 la->la_mode = rec->sa_mode;
1197 la->la_flags = rec->sa_attr_flags;
1199 nodemap = nodemap_get_from_exp(info->mti_exp);
1200 if (IS_ERR(nodemap))
1201 RETURN(PTR_ERR(nodemap));
1203 la->la_uid = nodemap_map_id(nodemap, NODEMAP_UID,
1204 NODEMAP_CLIENT_TO_FS, rec->sa_uid);
1205 la->la_gid = nodemap_map_id(nodemap, NODEMAP_GID,
1206 NODEMAP_CLIENT_TO_FS, rec->sa_gid);
1207 la->la_projid = nodemap_map_id(nodemap, NODEMAP_PROJID,
1208 NODEMAP_CLIENT_TO_FS, rec->sa_projid);
1209 nodemap_putref(nodemap);
1211 la->la_size = rec->sa_size;
1212 la->la_blocks = rec->sa_blocks;
1213 la->la_ctime = rec->sa_ctime;
1214 la->la_atime = rec->sa_atime;
1215 la->la_mtime = rec->sa_mtime;
1216 ma->ma_valid = MA_INODE;
1218 ma->ma_attr_flags |= rec->sa_bias & (MDS_CLOSE_INTENT |
1219 MDS_DATA_MODIFIED | MDS_TRUNC_KEEP_LEASE |
1224 static int mdt_close_handle_unpack(struct mdt_thread_info *info)
1226 struct req_capsule *pill = info->mti_pill;
1227 struct mdt_ioepoch *ioepoch;
1230 if (req_capsule_get_size(pill, &RMF_MDT_EPOCH, RCL_CLIENT))
1231 ioepoch = req_capsule_client_get(pill, &RMF_MDT_EPOCH);
1235 if (ioepoch == NULL)
1238 info->mti_open_handle = ioepoch->mio_open_handle;
1243 static inline int mdt_dlmreq_unpack(struct mdt_thread_info *info) {
1244 struct req_capsule *pill = info->mti_pill;
1246 if (req_capsule_get_size(pill, &RMF_DLM_REQ, RCL_CLIENT)) {
1247 info->mti_dlm_req = req_capsule_client_get(pill, &RMF_DLM_REQ);
1248 if (info->mti_dlm_req == NULL)
1255 static int mdt_setattr_unpack(struct mdt_thread_info *info)
1257 struct mdt_reint_record *rr = &info->mti_rr;
1258 struct md_attr *ma = &info->mti_attr;
1259 struct req_capsule *pill = info->mti_pill;
1263 rc = mdt_setattr_unpack_rec(info);
1267 if (req_capsule_field_present(pill, &RMF_EADATA, RCL_CLIENT)) {
1268 rr->rr_eadata = req_capsule_client_get(pill, &RMF_EADATA);
1269 rr->rr_eadatalen = req_capsule_get_size(pill, &RMF_EADATA,
1272 if (rr->rr_eadatalen > 0) {
1273 const struct lmv_user_md *lum;
1275 lum = rr->rr_eadata;
1276 /* Sigh ma_valid(from req) does not indicate whether
1277 * it will set LOV/LMV EA, so we have to check magic */
1278 if (le32_to_cpu(lum->lum_magic) == LMV_USER_MAGIC) {
1279 ma->ma_valid |= MA_LMV;
1280 ma->ma_lmv = (void *)rr->rr_eadata;
1281 ma->ma_lmv_size = rr->rr_eadatalen;
1283 ma->ma_valid |= MA_LOV;
1284 ma->ma_lmm = (void *)rr->rr_eadata;
1285 ma->ma_lmm_size = rr->rr_eadatalen;
1290 rc = mdt_dlmreq_unpack(info);
1294 static int mdt_close_intent_unpack(struct mdt_thread_info *info)
1296 struct md_attr *ma = &info->mti_attr;
1297 struct req_capsule *pill = info->mti_pill;
1300 if (!(ma->ma_attr_flags & MDS_CLOSE_INTENT))
1303 req_capsule_extend(pill, &RQF_MDS_CLOSE_INTENT);
1305 if (!(req_capsule_has_field(pill, &RMF_CLOSE_DATA, RCL_CLIENT) &&
1306 req_capsule_field_present(pill, &RMF_CLOSE_DATA, RCL_CLIENT)))
1312 int mdt_close_unpack(struct mdt_thread_info *info)
1317 rc = mdt_close_handle_unpack(info);
1321 rc = mdt_setattr_unpack_rec(info);
1325 rc = mdt_close_intent_unpack(info);
1329 RETURN(mdt_init_ucred_reint(info));
1332 static int mdt_create_unpack(struct mdt_thread_info *info)
1334 struct lu_ucred *uc = mdt_ucred(info);
1335 struct mdt_rec_create *rec;
1336 struct lu_attr *attr = &info->mti_attr.ma_attr;
1337 struct mdt_reint_record *rr = &info->mti_rr;
1338 struct req_capsule *pill = info->mti_pill;
1339 struct md_op_spec *sp = &info->mti_spec;
1344 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1345 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1349 /* This prior initialization is needed for old_init_ucred_reint() */
1350 uc->uc_fsuid = rec->cr_fsuid;
1351 uc->uc_fsgid = rec->cr_fsgid;
1352 uc->uc_cap = CAP_EMPTY_SET;
1353 ll_set_capability_u32(&uc->uc_cap, rec->cr_cap);
1354 uc->uc_suppgids[0] = rec->cr_suppgid1;
1355 uc->uc_suppgids[1] = -1;
1356 uc->uc_umask = rec->cr_umask;
1358 rr->rr_fid1 = &rec->cr_fid1;
1359 rr->rr_fid2 = &rec->cr_fid2;
1360 attr->la_mode = rec->cr_mode;
1361 attr->la_rdev = rec->cr_rdev;
1362 attr->la_uid = rec->cr_fsuid;
1363 attr->la_gid = rec->cr_fsgid;
1364 attr->la_ctime = rec->cr_time;
1365 attr->la_mtime = rec->cr_time;
1366 attr->la_atime = rec->cr_time;
1367 attr->la_valid = LA_MODE | LA_RDEV | LA_UID | LA_GID | LA_TYPE |
1368 LA_CTIME | LA_MTIME | LA_ATIME;
1369 memset(&sp->u, 0, sizeof(sp->u));
1370 sp->sp_cr_flags = get_mrc_cr_flags(rec);
1372 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1376 if (S_ISLNK(attr->la_mode)) {
1377 const char *tgt = NULL;
1380 req_capsule_extend(pill, &RQF_MDS_REINT_CREATE_SYM);
1381 sz = req_capsule_get_size(pill, &RMF_SYMTGT, RCL_CLIENT);
1383 tgt = req_capsule_client_get(pill, &RMF_SYMTGT);
1384 sp->u.sp_symname.ln_name = tgt;
1385 sp->u.sp_symname.ln_namelen = sz - 1; /* skip NUL */
1390 req_capsule_extend(pill, &RQF_MDS_REINT_CREATE_ACL);
1391 if (S_ISDIR(attr->la_mode)) {
1392 struct obd_export *exp = mdt_info_req(info)->rq_export;
1394 sp->sp_dmv_imp_inherit =
1395 info->mti_mdt->mdt_enable_dmv_implicit_inherit;
1396 if (req_capsule_get_size(pill, &RMF_EADATA, RCL_CLIENT)
1398 sp->u.sp_ea.eadata =
1399 req_capsule_client_get(pill,
1401 sp->u.sp_ea.eadatalen =
1402 req_capsule_get_size(pill, &RMF_EADATA,
1404 sp->sp_cr_flags |= MDS_OPEN_HAS_EA;
1406 if (OCD_HAS_FLAG2(&exp->exp_connect_data,
1408 if ((sp->sp_cr_flags & MDS_OPEN_DEFAULT_LMV) &&
1409 !(sp->sp_cr_flags & MDS_OPEN_HAS_EA))
1411 } else if (sp->sp_cr_flags & MDS_OPEN_DEFAULT_LMV) {
1417 rc = mdt_file_secctx_unpack(pill, &sp->sp_cr_file_secctx_name,
1418 &sp->sp_cr_file_secctx,
1419 &sp->sp_cr_file_secctx_size);
1423 rc = mdt_file_encctx_unpack(pill, &sp->sp_cr_file_encctx,
1424 &sp->sp_cr_file_encctx_size);
1428 rc = req_check_sepol(pill);
1432 rc = mdt_dlmreq_unpack(info);
1436 static int mdt_link_unpack(struct mdt_thread_info *info)
1438 struct lu_ucred *uc = mdt_ucred(info);
1439 struct mdt_rec_link *rec;
1440 struct lu_attr *attr = &info->mti_attr.ma_attr;
1441 struct mdt_reint_record *rr = &info->mti_rr;
1442 struct req_capsule *pill = info->mti_pill;
1447 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1448 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1452 /* This prior initialization is needed for old_init_ucred_reint() */
1453 uc->uc_fsuid = rec->lk_fsuid;
1454 uc->uc_fsgid = rec->lk_fsgid;
1455 uc->uc_cap = CAP_EMPTY_SET;
1456 ll_set_capability_u32(&uc->uc_cap, rec->lk_cap);
1457 uc->uc_suppgids[0] = rec->lk_suppgid1;
1458 uc->uc_suppgids[1] = rec->lk_suppgid2;
1460 attr->la_uid = rec->lk_fsuid;
1461 attr->la_gid = rec->lk_fsgid;
1462 rr->rr_fid1 = &rec->lk_fid1;
1463 rr->rr_fid2 = &rec->lk_fid2;
1464 attr->la_ctime = rec->lk_time;
1465 attr->la_mtime = rec->lk_time;
1466 attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME;
1468 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1472 rc = req_check_sepol(pill);
1476 rc = mdt_dlmreq_unpack(info);
1481 static int mdt_unlink_unpack(struct mdt_thread_info *info)
1483 struct lu_ucred *uc = mdt_ucred(info);
1484 struct mdt_rec_unlink *rec;
1485 struct lu_attr *attr = &info->mti_attr.ma_attr;
1486 struct mdt_reint_record *rr = &info->mti_rr;
1487 struct req_capsule *pill = info->mti_pill;
1492 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1493 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1497 /* This prior initialization is needed for old_init_ucred_reint() */
1498 uc->uc_fsuid = rec->ul_fsuid;
1499 uc->uc_fsgid = rec->ul_fsgid;
1500 uc->uc_cap = CAP_EMPTY_SET;
1501 ll_set_capability_u32(&uc->uc_cap, rec->ul_cap);
1502 uc->uc_suppgids[0] = rec->ul_suppgid1;
1503 uc->uc_suppgids[1] = -1;
1505 attr->la_uid = rec->ul_fsuid;
1506 attr->la_gid = rec->ul_fsgid;
1507 rr->rr_fid1 = &rec->ul_fid1;
1508 rr->rr_fid2 = &rec->ul_fid2;
1509 attr->la_ctime = rec->ul_time;
1510 attr->la_mtime = rec->ul_time;
1511 attr->la_mode = rec->ul_mode;
1512 attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME | LA_MODE;
1513 if (rec->ul_bias & MDS_FID_OP)
1514 info->mti_spec.sp_cr_flags |= MDS_OP_WITH_FID;
1516 info->mti_spec.sp_cr_flags &= ~MDS_OP_WITH_FID;
1518 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1522 info->mti_spec.no_create = !!req_is_replay(mdt_info_req(info));
1524 rc = req_check_sepol(pill);
1528 rc = mdt_dlmreq_unpack(info);
1532 static int mdt_rmentry_unpack(struct mdt_thread_info *info)
1534 info->mti_spec.sp_rm_entry = 1;
1535 return mdt_unlink_unpack(info);
1538 static int mdt_rename_unpack(struct mdt_thread_info *info)
1540 struct lu_ucred *uc = mdt_ucred(info);
1541 struct mdt_rec_rename *rec;
1542 struct lu_attr *attr = &info->mti_attr.ma_attr;
1543 struct mdt_reint_record *rr = &info->mti_rr;
1544 struct req_capsule *pill = info->mti_pill;
1545 struct md_op_spec *spec = &info->mti_spec;
1550 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1551 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1555 /* This prior initialization is needed for old_init_ucred_reint() */
1556 uc->uc_fsuid = rec->rn_fsuid;
1557 uc->uc_fsgid = rec->rn_fsgid;
1558 uc->uc_cap = CAP_EMPTY_SET;
1559 ll_set_capability_u32(&uc->uc_cap, rec->rn_cap);
1560 uc->uc_suppgids[0] = rec->rn_suppgid1;
1561 uc->uc_suppgids[1] = rec->rn_suppgid2;
1563 attr->la_uid = rec->rn_fsuid;
1564 attr->la_gid = rec->rn_fsgid;
1565 rr->rr_fid1 = &rec->rn_fid1;
1566 rr->rr_fid2 = &rec->rn_fid2;
1567 attr->la_ctime = rec->rn_time;
1568 attr->la_mtime = rec->rn_time;
1569 /* rename_tgt contains the mode already */
1570 attr->la_mode = rec->rn_mode;
1571 attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME | LA_MODE;
1573 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1577 rc = mdt_name_unpack(pill, &RMF_SYMTGT, &rr->rr_tgt_name, 0);
1581 spec->no_create = !!req_is_replay(mdt_info_req(info));
1583 rc = req_check_sepol(pill);
1587 rc = mdt_dlmreq_unpack(info);
1592 static int mdt_migrate_unpack(struct mdt_thread_info *info)
1594 struct lu_ucred *uc = mdt_ucred(info);
1595 struct mdt_rec_rename *rec;
1596 struct lu_attr *attr = &info->mti_attr.ma_attr;
1597 struct mdt_reint_record *rr = &info->mti_rr;
1598 struct req_capsule *pill = info->mti_pill;
1599 struct md_op_spec *spec = &info->mti_spec;
1604 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1605 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1609 /* This prior initialization is needed for old_init_ucred_reint() */
1610 uc->uc_fsuid = rec->rn_fsuid;
1611 uc->uc_fsgid = rec->rn_fsgid;
1612 uc->uc_cap = CAP_EMPTY_SET;
1613 ll_set_capability_u32(&uc->uc_cap, rec->rn_cap);
1614 uc->uc_suppgids[0] = rec->rn_suppgid1;
1615 uc->uc_suppgids[1] = rec->rn_suppgid2;
1617 attr->la_uid = rec->rn_fsuid;
1618 attr->la_gid = rec->rn_fsgid;
1619 rr->rr_fid1 = &rec->rn_fid1;
1620 rr->rr_fid2 = &rec->rn_fid2;
1621 attr->la_ctime = rec->rn_time;
1622 attr->la_mtime = rec->rn_time;
1623 /* rename_tgt contains the mode already */
1624 attr->la_mode = rec->rn_mode;
1625 attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME | LA_MODE;
1626 spec->sp_cr_flags = 0;
1628 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1632 if (rec->rn_bias & MDS_CLOSE_MIGRATE) {
1633 rc = mdt_close_handle_unpack(info);
1637 spec->sp_migrate_close = 1;
1639 spec->sp_migrate_close = 0;
1642 spec->sp_migrate_nsonly = !!(rec->rn_bias & MDS_MIGRATE_NSONLY);
1644 /* lustre version > 2.11 migration packs lum */
1645 if (req_capsule_has_field(pill, &RMF_EADATA, RCL_CLIENT)) {
1646 if (req_capsule_field_present(pill, &RMF_EADATA, RCL_CLIENT)) {
1647 rr->rr_eadatalen = req_capsule_get_size(pill,
1651 if (rr->rr_eadatalen > 0) {
1652 struct lmv_user_md_v1 *lmu;
1654 lmu = req_capsule_client_get(pill, &RMF_EADATA);
1655 lmu->lum_hash_type |=
1656 cpu_to_le32(LMV_HASH_FLAG_FIXED);
1657 rr->rr_eadata = lmu;
1658 spec->u.sp_ea.eadatalen = rr->rr_eadatalen;
1659 spec->u.sp_ea.eadata = rr->rr_eadata;
1660 spec->sp_cr_flags |= MDS_OPEN_HAS_EA;
1663 /* old client doesn't provide lum. */
1664 RETURN(-EOPNOTSUPP);
1668 spec->no_create = !!req_is_replay(mdt_info_req(info));
1670 rc = mdt_dlmreq_unpack(info);
1676 * please see comment above LOV_MAGIC_V1_DEFINED
1678 void mdt_fix_lov_magic(struct mdt_thread_info *info, void *eadata)
1680 struct lov_user_md_v1 *v1 = eadata;
1684 if (unlikely(req_is_replay(mdt_info_req(info)))) {
1685 if ((v1->lmm_magic & LOV_MAGIC_MASK) == LOV_MAGIC_MAGIC)
1686 v1->lmm_magic |= LOV_MAGIC_DEFINED;
1687 else if ((v1->lmm_magic & __swab32(LOV_MAGIC_MAGIC)) ==
1688 __swab32(LOV_MAGIC_MAGIC))
1689 v1->lmm_magic |= __swab32(LOV_MAGIC_DEFINED);
1693 static int mdt_open_unpack(struct mdt_thread_info *info)
1695 struct lu_ucred *uc = mdt_ucred(info);
1696 struct mdt_rec_create *rec;
1697 struct lu_attr *attr = &info->mti_attr.ma_attr;
1698 struct req_capsule *pill = info->mti_pill;
1699 struct mdt_reint_record *rr = &info->mti_rr;
1700 struct ptlrpc_request *req = mdt_info_req(info);
1701 struct md_op_spec *sp = &info->mti_spec;
1705 BUILD_BUG_ON(sizeof(struct mdt_rec_create) !=
1706 sizeof(struct mdt_rec_reint));
1707 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1711 /* This prior initialization is needed for old_init_ucred_reint() */
1712 uc->uc_fsuid = rec->cr_fsuid;
1713 uc->uc_fsgid = rec->cr_fsgid;
1714 uc->uc_cap = CAP_EMPTY_SET;
1715 ll_set_capability_u32(&uc->uc_cap, rec->cr_cap);
1716 uc->uc_suppgids[0] = rec->cr_suppgid1;
1717 uc->uc_suppgids[1] = rec->cr_suppgid2;
1718 uc->uc_umask = rec->cr_umask;
1720 rr->rr_fid1 = &rec->cr_fid1;
1721 rr->rr_fid2 = &rec->cr_fid2;
1722 rr->rr_open_handle = &rec->cr_open_handle_old;
1723 attr->la_mode = rec->cr_mode;
1724 attr->la_rdev = rec->cr_rdev;
1725 attr->la_uid = rec->cr_fsuid;
1726 attr->la_gid = rec->cr_fsgid;
1727 attr->la_ctime = rec->cr_time;
1728 attr->la_mtime = rec->cr_time;
1729 attr->la_atime = rec->cr_time;
1730 attr->la_valid = LA_MODE | LA_RDEV | LA_UID | LA_GID |
1731 LA_CTIME | LA_MTIME | LA_ATIME;
1732 memset(&info->mti_spec.u, 0, sizeof(info->mti_spec.u));
1733 info->mti_spec.sp_cr_flags = get_mrc_cr_flags(rec);
1734 /* Do not trigger ASSERTION if client miss to set such flags. */
1735 if (unlikely(info->mti_spec.sp_cr_flags == 0))
1738 info->mti_cross_ref = !!(rec->cr_bias & MDS_CROSS_REF);
1740 mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, MNF_FIX_ANON);
1742 if (req_capsule_field_present(pill, &RMF_EADATA, RCL_CLIENT)) {
1743 rr->rr_eadatalen = req_capsule_get_size(pill, &RMF_EADATA,
1746 if (rr->rr_eadatalen > 0) {
1747 rr->rr_eadata = req_capsule_client_get(pill,
1749 sp->u.sp_ea.eadatalen = rr->rr_eadatalen;
1750 sp->u.sp_ea.eadata = rr->rr_eadata;
1751 sp->sp_archive_id = rec->cr_archive_id;
1752 sp->no_create = !!req_is_replay(req);
1753 mdt_fix_lov_magic(info, rr->rr_eadata);
1757 * Client default md_size may be 0 right after client start,
1758 * until all osc are connected, set here just some reasonable
1759 * value to prevent misbehavior.
1761 if (rr->rr_eadatalen == 0 &&
1762 !(info->mti_spec.sp_cr_flags & MDS_OPEN_DELAY_CREATE))
1763 rr->rr_eadatalen = MIN_MD_SIZE;
1766 rc = mdt_file_secctx_unpack(pill, &sp->sp_cr_file_secctx_name,
1767 &sp->sp_cr_file_secctx,
1768 &sp->sp_cr_file_secctx_size);
1772 rc = mdt_file_encctx_unpack(pill, &sp->sp_cr_file_encctx,
1773 &sp->sp_cr_file_encctx_size);
1777 rc = req_check_sepol(pill);
1784 static int mdt_setxattr_unpack(struct mdt_thread_info *info)
1786 struct mdt_reint_record *rr = &info->mti_rr;
1787 struct lu_ucred *uc = mdt_ucred(info);
1788 struct lu_attr *attr = &info->mti_attr.ma_attr;
1789 struct req_capsule *pill = info->mti_pill;
1790 struct mdt_rec_setxattr *rec;
1795 BUILD_BUG_ON(sizeof(struct mdt_rec_setxattr) !=
1796 sizeof(struct mdt_rec_reint));
1797 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1801 /* This prior initialization is needed for old_init_ucred_reint() */
1802 uc->uc_fsuid = rec->sx_fsuid;
1803 uc->uc_fsgid = rec->sx_fsgid;
1804 uc->uc_cap = CAP_EMPTY_SET;
1805 ll_set_capability_u32(&uc->uc_cap, rec->sx_cap);
1806 uc->uc_suppgids[0] = rec->sx_suppgid1;
1807 uc->uc_suppgids[1] = -1;
1809 rr->rr_opcode = rec->sx_opcode;
1810 rr->rr_fid1 = &rec->sx_fid;
1811 attr->la_valid = rec->sx_valid;
1812 attr->la_ctime = rec->sx_time;
1813 attr->la_size = rec->sx_size;
1814 attr->la_flags = rec->sx_flags;
1816 rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0);
1820 if (req_capsule_field_present(pill, &RMF_EADATA, RCL_CLIENT)) {
1821 rr->rr_eadatalen = req_capsule_get_size(pill, &RMF_EADATA,
1824 if (rr->rr_eadatalen > info->mti_mdt->mdt_max_ea_size)
1827 if (rr->rr_eadatalen > 0) {
1828 rr->rr_eadata = req_capsule_client_get(pill,
1830 if (rr->rr_eadata == NULL)
1833 rr->rr_eadata = NULL;
1835 } else if (!(attr->la_valid & OBD_MD_FLXATTRRM)) {
1836 CDEBUG(D_INFO, "no xattr data supplied\n");
1840 rc = req_check_sepol(pill);
1844 if (mdt_dlmreq_unpack(info) < 0)
1850 static int mdt_resync_unpack(struct mdt_thread_info *info)
1852 struct req_capsule *pill = info->mti_pill;
1853 struct mdt_reint_record *rr = &info->mti_rr;
1854 struct lu_ucred *uc = mdt_ucred(info);
1855 struct mdt_rec_resync *rec;
1858 BUILD_BUG_ON(sizeof(*rec) != sizeof(struct mdt_rec_reint));
1859 rec = req_capsule_client_get(pill, &RMF_REC_REINT);
1863 /* This prior initialization is needed for old_init_ucred_reint() */
1864 uc->uc_fsuid = rec->rs_fsuid;
1865 uc->uc_fsgid = rec->rs_fsgid;
1866 uc->uc_cap = CAP_EMPTY_SET;
1867 ll_set_capability_u32(&uc->uc_cap, rec->rs_cap);
1869 rr->rr_fid1 = &rec->rs_fid;
1870 rr->rr_mirror_id = rec->rs_mirror_id;
1872 /* cookie doesn't need to be swapped but it has been swapped
1873 * in lustre_swab_mdt_rec_reint() as rr_mtime, so here it needs
1875 if (req_capsule_req_need_swab(pill))
1876 __swab64s(&rec->rs_lease_handle.cookie);
1877 rr->rr_lease_handle = &rec->rs_lease_handle;
1879 RETURN(mdt_dlmreq_unpack(info));
1882 typedef int (*reint_unpacker)(struct mdt_thread_info *info);
1884 static reint_unpacker mdt_reint_unpackers[REINT_MAX] = {
1885 [REINT_SETATTR] = mdt_setattr_unpack,
1886 [REINT_CREATE] = mdt_create_unpack,
1887 [REINT_LINK] = mdt_link_unpack,
1888 [REINT_UNLINK] = mdt_unlink_unpack,
1889 [REINT_RENAME] = mdt_rename_unpack,
1890 [REINT_OPEN] = mdt_open_unpack,
1891 [REINT_SETXATTR] = mdt_setxattr_unpack,
1892 [REINT_RMENTRY] = mdt_rmentry_unpack,
1893 [REINT_MIGRATE] = mdt_migrate_unpack,
1894 [REINT_RESYNC] = mdt_resync_unpack,
1897 int mdt_reint_unpack(struct mdt_thread_info *info, __u32 op)
1902 memset(&info->mti_rr, 0, sizeof(info->mti_rr));
1903 if (op < REINT_MAX && mdt_reint_unpackers[op] != NULL) {
1904 info->mti_rr.rr_opcode = op;
1905 rc = mdt_reint_unpackers[op](info);
1907 CERROR("Unexpected opcode %d\n", op);
1913 int mdt_pack_secctx_in_reply(struct mdt_thread_info *info,
1914 struct mdt_object *child)
1917 struct lu_buf *buffer;
1918 struct mdt_body *repbody;
1919 struct req_capsule *pill = info->mti_pill;
1922 if (req_capsule_has_field(pill, &RMF_FILE_SECCTX, RCL_SERVER) &&
1923 req_capsule_get_size(pill, &RMF_FILE_SECCTX, RCL_SERVER) != 0) {
1925 req_capsule_client_get(pill, &RMF_FILE_SECCTX_NAME);
1926 buffer = &info->mti_buf;
1928 /* fill reply buffer with security context now */
1929 buffer->lb_len = req_capsule_get_size(pill, &RMF_FILE_SECCTX,
1931 buffer->lb_buf = req_capsule_server_get(info->mti_pill,
1933 rc = mo_xattr_get(info->mti_env, mdt_object_child(child),
1934 buffer, secctx_name);
1937 "found security context of size %d for "DFID"\n",
1938 rc, PFID(mdt_object_fid(child)));
1940 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1941 repbody->mbo_valid |= OBD_MD_SECCTX;
1942 if (rc < buffer->lb_len)
1943 req_capsule_shrink(pill, &RMF_FILE_SECCTX, rc,
1948 "security context not found for "DFID": rc = %d\n",
1949 PFID(mdt_object_fid(child)), rc);
1950 req_capsule_shrink(pill, &RMF_FILE_SECCTX, 0,
1952 /* handling -ENOENT is important because it may change
1953 * object state in DNE env dropping LOHA_EXISTS flag,
1954 * it is important to return that to the caller.
1955 * Check LU-13115 for details.
1964 /* check whether two FIDs belong to different MDTs.
1965 * \retval 1 on different MDTs.
1966 * 0 on the same MDT.
1969 int mdt_fids_different_target(struct mdt_thread_info *info,
1970 const struct lu_fid *fid1,
1971 const struct lu_fid *fid2)
1973 const struct lu_env *env = info->mti_env;
1974 struct mdt_device *mdt = info->mti_mdt;
1975 struct lu_seq_range *range = &info->mti_range;
1976 struct seq_server_site *ss;
1977 __u32 index1, index2;
1980 if (fid_seq(fid1) == fid_seq(fid2))
1983 ss = mdt->mdt_lu_dev.ld_site->ld_seq_site;
1985 range->lsr_flags = LU_SEQ_RANGE_MDT;
1986 rc = fld_server_lookup(env, ss->ss_server_fld, fid1->f_seq, range);
1990 index1 = range->lsr_index;
1992 rc = fld_server_lookup(env, ss->ss_server_fld, fid2->f_seq, range);
1996 index2 = range->lsr_index;
1998 return index1 != index2;
2002 * Check whether \a child is remote object on \a parent.
2004 * \param[in] info thread environment
2005 * \param[in] parent parent object, it's the same as child object in
2007 * \param[in] child child object
2009 * \retval 1 is remote object.
2010 * \retval 0 isn't remote object.
2011 * \retval < 1 error code
2013 int mdt_is_remote_object(struct mdt_thread_info *info,
2014 struct mdt_object *parent,
2015 struct mdt_object *child)
2017 struct lu_buf *buf = &info->mti_big_buf;
2018 struct linkea_data ldata = { NULL };
2019 struct link_ea_header *leh;
2020 struct link_ea_entry *lee;
2021 struct lu_name name;
2029 if (fid_is_root(mdt_object_fid(child)))
2032 if (likely(parent != child)) {
2033 if (mdt_object_remote(parent) ^ mdt_object_remote(child))
2036 if (!mdt_object_remote(parent) && !mdt_object_remote(child))
2039 rc = mdt_fids_different_target(info, mdt_object_fid(parent),
2040 mdt_object_fid(child));
2044 /* client < 2.13.52 getattr_by_fid parent and child are the same */
2045 buf = lu_buf_check_and_alloc(buf, PATH_MAX);
2050 rc = mdt_links_read(info, child, &ldata);
2051 /* can't read linkea, just assume it's remote object */
2052 if (rc == -ENOENT || rc == -ENODATA)
2058 lee = (struct link_ea_entry *)(leh + 1);
2059 for (i = 0; i < leh->leh_reccount; i++) {
2060 linkea_entry_unpack(lee, &reclen, &name, &pfid);
2061 lee = (struct link_ea_entry *) ((char *)lee + reclen);
2062 rc = mdt_fids_different_target(info, &pfid,
2063 mdt_object_fid(child));
2071 int mdt_pack_encctx_in_reply(struct mdt_thread_info *info,
2072 struct mdt_object *child)
2074 struct lu_buf *buffer;
2075 struct mdt_body *repbody;
2076 struct req_capsule *pill = info->mti_pill;
2077 struct obd_export *exp = mdt_info_req(info)->rq_export;
2080 if (!exp_connect_encrypt(exp))
2083 if (req_capsule_has_field(pill, &RMF_FILE_ENCCTX, RCL_SERVER) &&
2084 req_capsule_get_size(pill, &RMF_FILE_ENCCTX, RCL_SERVER) != 0) {
2085 struct lu_attr la = { 0 };
2086 struct dt_object *dt = mdt_obj2dt(child);
2088 if (dt && dt->do_ops && dt->do_ops->do_attr_get)
2089 dt_attr_get(info->mti_env, mdt_obj2dt(child), &la);
2091 if (la.la_valid & LA_FLAGS && la.la_flags & LUSTRE_ENCRYPT_FL) {
2092 buffer = &info->mti_buf;
2094 /* fill reply buffer with encryption context now */
2096 req_capsule_get_size(pill, &RMF_FILE_ENCCTX,
2099 req_capsule_server_get(pill, &RMF_FILE_ENCCTX);
2100 rc = mo_xattr_get(info->mti_env,
2101 mdt_object_child(child),
2103 LL_XATTR_NAME_ENCRYPTION_CONTEXT);
2104 if (unlikely(rc == -ENODATA))
2105 /* For compatibility with 2.14 */
2106 rc = mo_xattr_get(info->mti_env,
2107 mdt_object_child(child),
2109 LL_XATTR_NAME_ENCRYPTION_CONTEXT_OLD);
2112 "found encryption ctx of size %d for "DFID"\n",
2113 rc, PFID(mdt_object_fid(child)));
2115 repbody = req_capsule_server_get(pill,
2117 repbody->mbo_valid |= OBD_MD_ENCCTX;
2118 if (rc < buffer->lb_len)
2119 req_capsule_shrink(pill,
2120 &RMF_FILE_ENCCTX, rc,
2125 "encryption ctx not found for "DFID": rc = %d\n",
2126 PFID(mdt_object_fid(child)), rc);
2127 req_capsule_shrink(pill, &RMF_FILE_ENCCTX, 0,
2129 /* handling -ENOENT is important because it may
2130 * change object state in DNE env dropping
2131 * LOHA_EXISTS flag, it is important to return
2132 * that to the caller.
2133 * Check LU-13115 for details.
2139 req_capsule_shrink(pill, &RMF_FILE_ENCCTX, 0,