4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mdt/mdt_handler.c
38 * Lustre Metadata Target (mdt) request handler
40 * Author: Peter Braam <braam@clusterfs.com>
41 * Author: Andreas Dilger <adilger@clusterfs.com>
42 * Author: Phil Schwan <phil@clusterfs.com>
43 * Author: Mike Shaver <shaver@clusterfs.com>
44 * Author: Nikita Danilov <nikita@clusterfs.com>
45 * Author: Huang Hua <huanghua@clusterfs.com>
46 * Author: Yury Umanets <umka@clusterfs.com>
49 #define DEBUG_SUBSYSTEM S_MDS
51 #include <linux/module.h>
53 * struct OBD_{ALLOC,FREE}*()
55 #include <obd_support.h>
56 /* struct ptlrpc_request */
57 #include <lustre_net.h>
58 /* struct obd_export */
59 #include <lustre_export.h>
60 /* struct obd_device */
63 #include <dt_object.h>
64 #include <lustre_mds.h>
65 #include <lustre_mdt.h>
66 #include <lustre_log.h>
67 #include "mdt_internal.h"
68 #include <lustre_acl.h>
69 #include <lustre_param.h>
70 #include <lustre_quota.h>
71 #include <lustre_linkea.h>
72 #include <lustre_lfsck.h>
74 mdl_mode_t mdt_mdl_lock_modes[] = {
75 [LCK_MINMODE] = MDL_MINMODE,
82 [LCK_GROUP] = MDL_GROUP
85 ldlm_mode_t mdt_dlm_lock_modes[] = {
86 [MDL_MINMODE] = LCK_MINMODE,
93 [MDL_GROUP] = LCK_GROUP
96 static struct mdt_device *mdt_dev(struct lu_device *d);
97 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
99 static const struct lu_object_operations mdt_obj_ops;
101 /* Slab for MDT object allocation */
102 static struct kmem_cache *mdt_object_kmem;
104 static struct lu_kmem_descr mdt_caches[] = {
106 .ckd_cache = &mdt_object_kmem,
107 .ckd_name = "mdt_obj",
108 .ckd_size = sizeof(struct mdt_object)
115 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
119 return (rep->lock_policy_res1 & flag);
122 void mdt_clear_disposition(struct mdt_thread_info *info,
123 struct ldlm_reply *rep, int flag)
126 info->mti_opdata &= ~flag;
128 rep->lock_policy_res1 &= ~flag;
131 void mdt_set_disposition(struct mdt_thread_info *info,
132 struct ldlm_reply *rep, int flag)
135 info->mti_opdata |= flag;
137 rep->lock_policy_res1 |= flag;
140 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
142 lh->mlh_pdo_hash = 0;
143 lh->mlh_reg_mode = lm;
144 lh->mlh_rreg_mode = lm;
145 lh->mlh_type = MDT_REG_LOCK;
148 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
149 const char *name, int namelen)
151 lh->mlh_reg_mode = lm;
152 lh->mlh_rreg_mode = lm;
153 lh->mlh_type = MDT_PDO_LOCK;
155 if (name != NULL && (name[0] != '\0')) {
156 LASSERT(namelen > 0);
157 lh->mlh_pdo_hash = full_name_hash(name, namelen);
158 /* XXX Workaround for LU-2856
159 * Zero is a valid return value of full_name_hash, but several
160 * users of mlh_pdo_hash assume a non-zero hash value. We
161 * therefore map zero onto an arbitrary, but consistent
162 * value (1) to avoid problems further down the road. */
163 if (unlikely(!lh->mlh_pdo_hash))
164 lh->mlh_pdo_hash = 1;
166 LASSERT(namelen == 0);
167 lh->mlh_pdo_hash = 0ull;
171 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
172 struct mdt_lock_handle *lh)
178 * Any dir access needs couple of locks:
180 * 1) on part of dir we gonna take lookup/modify;
182 * 2) on whole dir to protect it from concurrent splitting and/or to
183 * flush client's cache for readdir().
185 * so, for a given mode and object this routine decides what lock mode
186 * to use for lock #2:
188 * 1) if caller's gonna lookup in dir then we need to protect dir from
189 * being splitted only - LCK_CR
191 * 2) if caller's gonna modify dir then we need to protect dir from
192 * being splitted and to flush cache - LCK_CW
194 * 3) if caller's gonna modify dir and that dir seems ready for
195 * splitting then we need to protect it from any type of access
196 * (lookup/modify/split) - LCK_EX --bzzz
199 LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
200 LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
203 * Ask underlaying level its opinion about preferable PDO lock mode
204 * having access type passed as regular lock mode:
206 * - MDL_MINMODE means that lower layer does not want to specify lock
209 * - MDL_NL means that no PDO lock should be taken. This is used in some
210 * cases. Say, for non-splittable directories no need to use PDO locks
213 mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
214 mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
216 if (mode != MDL_MINMODE) {
217 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
220 * Lower layer does not want to specify locking mode. We do it
221 * our selves. No special protection is needed, just flush
222 * client's cache on modification and allow concurrent
225 switch (lh->mlh_reg_mode) {
227 lh->mlh_pdo_mode = LCK_EX;
230 lh->mlh_pdo_mode = LCK_CR;
233 lh->mlh_pdo_mode = LCK_CW;
236 CERROR("Not expected lock type (0x%x)\n",
237 (int)lh->mlh_reg_mode);
242 LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
246 int mdt_getstatus(struct mdt_thread_info *info)
248 struct mdt_device *mdt = info->mti_mdt;
249 struct mdt_body *repbody;
253 rc = mdt_check_ucred(info);
255 RETURN(err_serious(rc));
257 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
258 RETURN(err_serious(-ENOMEM));
260 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
261 repbody->fid1 = mdt->mdt_md_root_fid;
262 repbody->valid |= OBD_MD_FLID;
264 if (mdt->mdt_opts.mo_mds_capa &&
265 exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
266 struct mdt_object *root;
267 struct lustre_capa *capa;
269 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
271 RETURN(PTR_ERR(root));
273 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
275 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
276 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
278 mdt_object_put(info->mti_env, root);
280 repbody->valid |= OBD_MD_FLMDSCAPA;
286 int mdt_statfs(struct mdt_thread_info *info)
288 struct ptlrpc_request *req = mdt_info_req(info);
289 struct md_device *next = info->mti_mdt->mdt_child;
290 struct ptlrpc_service_part *svcpt;
291 struct obd_statfs *osfs;
296 svcpt = info->mti_pill->rc_req->rq_rqbd->rqbd_svcpt;
298 /* This will trigger a watchdog timeout */
299 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
300 (MDT_SERVICE_WATCHDOG_FACTOR *
301 at_get(&svcpt->scp_at_estimate)) + 1);
303 rc = mdt_check_ucred(info);
305 RETURN(err_serious(rc));
307 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
308 RETURN(err_serious(-ENOMEM));
310 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
314 /** statfs information are cached in the mdt_device */
315 if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age,
316 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
317 /** statfs data is too old, get up-to-date one */
318 rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
321 spin_lock(&info->mti_mdt->mdt_osfs_lock);
322 info->mti_mdt->mdt_osfs = *osfs;
323 info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
324 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
326 /** use cached statfs data */
327 spin_lock(&info->mti_mdt->mdt_osfs_lock);
328 *osfs = info->mti_mdt->mdt_osfs;
329 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
333 mdt_counter_incr(req, LPROC_MDT_STATFS);
339 * Pack SOM attributes into the reply.
340 * Call under a DLM UPDATE lock.
342 static void mdt_pack_size2body(struct mdt_thread_info *info,
343 struct mdt_object *mo)
346 struct md_attr *ma = &info->mti_attr;
348 LASSERT(ma->ma_attr.la_valid & LA_MODE);
349 b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
351 /* Check if Size-on-MDS is supported, if this is a regular file,
352 * if SOM is enabled on the object and if SOM cache exists and valid.
353 * Otherwise do not pack Size-on-MDS attributes to the reply. */
354 if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
355 !S_ISREG(ma->ma_attr.la_mode) ||
356 !mdt_object_is_som_enabled(mo) ||
357 !(ma->ma_valid & MA_SOM))
360 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
361 b->size = ma->ma_som->msd_size;
362 b->blocks = ma->ma_som->msd_blocks;
365 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
366 const struct lu_attr *attr, const struct lu_fid *fid)
368 struct md_attr *ma = &info->mti_attr;
370 LASSERT(ma->ma_valid & MA_INODE);
372 b->atime = attr->la_atime;
373 b->mtime = attr->la_mtime;
374 b->ctime = attr->la_ctime;
375 b->mode = attr->la_mode;
376 b->size = attr->la_size;
377 b->blocks = attr->la_blocks;
378 b->uid = attr->la_uid;
379 b->gid = attr->la_gid;
380 b->flags = attr->la_flags;
381 b->nlink = attr->la_nlink;
382 b->rdev = attr->la_rdev;
384 /*XXX should pack the reply body according to lu_valid*/
385 b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID |
386 OBD_MD_FLGID | OBD_MD_FLTYPE |
387 OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
388 OBD_MD_FLATIME | OBD_MD_FLMTIME ;
390 if (!S_ISREG(attr->la_mode)) {
391 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
392 } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
393 /* means no objects are allocated on osts. */
394 LASSERT(!(ma->ma_valid & MA_LOV));
395 /* just ignore blocks occupied by extend attributes on MDS */
397 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
398 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
399 } else if ((ma->ma_valid & MA_LOV) && ma->ma_lmm &&
400 (ma->ma_lmm->lmm_pattern & LOV_PATTERN_F_RELEASED)) {
401 /* A released file stores its size on MDS. */
403 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
408 b->valid |= OBD_MD_FLID;
409 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
410 PFID(fid), b->nlink, b->mode, b->size);
414 mdt_body_reverse_idmap(info, b);
416 if (b->valid & OBD_MD_FLSIZE)
417 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
418 PFID(fid), (unsigned long long)b->size);
421 static inline int mdt_body_has_lov(const struct lu_attr *la,
422 const struct mdt_body *body)
424 return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
425 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
428 void mdt_client_compatibility(struct mdt_thread_info *info)
430 struct mdt_body *body;
431 struct ptlrpc_request *req = mdt_info_req(info);
432 struct obd_export *exp = req->rq_export;
433 struct md_attr *ma = &info->mti_attr;
434 struct lu_attr *la = &ma->ma_attr;
437 if (exp_connect_layout(exp))
438 /* the client can deal with 16-bit lmm_stripe_count */
441 body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
443 if (!mdt_body_has_lov(la, body))
446 /* now we have a reply with a lov for a client not compatible with the
447 * layout lock so we have to clean the layout generation number */
448 if (S_ISREG(la->la_mode))
449 ma->ma_lmm->lmm_layout_gen = 0;
453 static int mdt_big_xattr_get(struct mdt_thread_info *info, struct mdt_object *o,
456 const struct lu_env *env = info->mti_env;
460 LASSERT(info->mti_big_lmm_used == 0);
461 rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL, name);
465 /* big_lmm may need to be grown */
466 if (info->mti_big_lmmsize < rc) {
467 int size = size_roundup_power2(rc);
469 if (info->mti_big_lmmsize > 0) {
470 /* free old buffer */
471 LASSERT(info->mti_big_lmm);
472 OBD_FREE_LARGE(info->mti_big_lmm,
473 info->mti_big_lmmsize);
474 info->mti_big_lmm = NULL;
475 info->mti_big_lmmsize = 0;
478 OBD_ALLOC_LARGE(info->mti_big_lmm, size);
479 if (info->mti_big_lmm == NULL)
481 info->mti_big_lmmsize = size;
483 LASSERT(info->mti_big_lmmsize >= rc);
485 info->mti_buf.lb_buf = info->mti_big_lmm;
486 info->mti_buf.lb_len = info->mti_big_lmmsize;
487 rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf, name);
492 int mdt_attr_get_lov(struct mdt_thread_info *info,
493 struct mdt_object *o, struct md_attr *ma)
495 struct md_object *next = mdt_object_child(o);
496 struct lu_buf *buf = &info->mti_buf;
499 buf->lb_buf = ma->ma_lmm;
500 buf->lb_len = ma->ma_lmm_size;
501 rc = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_LOV);
503 ma->ma_lmm_size = rc;
504 ma->ma_valid |= MA_LOV;
506 } else if (rc == -ENODATA) {
509 } else if (rc == -ERANGE) {
510 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LOV);
512 info->mti_big_lmm_used = 1;
513 ma->ma_valid |= MA_LOV;
514 ma->ma_lmm = info->mti_big_lmm;
515 ma->ma_lmm_size = rc;
516 /* update mdt_max_mdsize so all clients
517 * will be aware about that */
518 if (info->mti_mdt->mdt_max_mdsize < rc)
519 info->mti_mdt->mdt_max_mdsize = rc;
527 int mdt_attr_get_pfid(struct mdt_thread_info *info,
528 struct mdt_object *o, struct lu_fid *pfid)
530 struct lu_buf *buf = &info->mti_buf;
531 struct link_ea_header *leh;
532 struct link_ea_entry *lee;
536 buf->lb_buf = info->mti_big_lmm;
537 buf->lb_len = info->mti_big_lmmsize;
538 rc = mo_xattr_get(info->mti_env, mdt_object_child(o),
539 buf, XATTR_NAME_LINK);
540 /* ignore errors, MA_PFID won't be set and it is
541 * up to the caller to treat this as an error */
542 if (rc == -ERANGE || buf->lb_len == 0) {
543 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LINK);
544 buf->lb_buf = info->mti_big_lmm;
545 buf->lb_len = info->mti_big_lmmsize;
550 if (rc < sizeof(*leh)) {
551 CERROR("short LinkEA on "DFID": rc = %d\n",
552 PFID(mdt_object_fid(o)), rc);
556 leh = (struct link_ea_header *) buf->lb_buf;
557 lee = (struct link_ea_entry *)(leh + 1);
558 if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
559 leh->leh_magic = LINK_EA_MAGIC;
560 leh->leh_reccount = __swab32(leh->leh_reccount);
561 leh->leh_len = __swab64(leh->leh_len);
563 if (leh->leh_magic != LINK_EA_MAGIC)
565 if (leh->leh_reccount == 0)
568 memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
569 fid_be_to_cpu(pfid, pfid);
574 int mdt_attr_get_complex(struct mdt_thread_info *info,
575 struct mdt_object *o, struct md_attr *ma)
577 const struct lu_env *env = info->mti_env;
578 struct md_object *next = mdt_object_child(o);
579 struct lu_buf *buf = &info->mti_buf;
580 u32 mode = lu_object_attr(&next->mo_lu);
581 int need = ma->ma_need;
587 if (need & MA_INODE) {
588 ma->ma_need = MA_INODE;
589 rc = mo_attr_get(env, next, ma);
592 ma->ma_valid |= MA_INODE;
595 if (need & MA_PFID) {
596 rc = mdt_attr_get_pfid(info, o, &ma->ma_pfid);
598 ma->ma_valid |= MA_PFID;
599 /* ignore this error, parent fid is not mandatory */
603 if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
604 rc = mdt_attr_get_lov(info, o, ma);
609 if (need & MA_LMV && S_ISDIR(mode)) {
610 buf->lb_buf = ma->ma_lmv;
611 buf->lb_len = ma->ma_lmv_size;
612 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
614 ma->ma_lmv_size = rc2;
615 ma->ma_valid |= MA_LMV;
616 } else if (rc2 == -ENODATA) {
623 if (need & MA_SOM && S_ISREG(mode)) {
624 buf->lb_buf = info->mti_xattr_buf;
625 buf->lb_len = sizeof(info->mti_xattr_buf);
626 CLASSERT(sizeof(struct som_attrs) <=
627 sizeof(info->mti_xattr_buf));
628 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_SOM);
629 rc2 = lustre_buf2som(info->mti_xattr_buf, rc2, ma->ma_som);
631 ma->ma_valid |= MA_SOM;
632 else if (rc2 < 0 && rc2 != -ENODATA)
636 if (need & MA_HSM && S_ISREG(mode)) {
637 buf->lb_buf = info->mti_xattr_buf;
638 buf->lb_len = sizeof(info->mti_xattr_buf);
639 CLASSERT(sizeof(struct hsm_attrs) <=
640 sizeof(info->mti_xattr_buf));
641 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_HSM);
642 rc2 = lustre_buf2hsm(info->mti_xattr_buf, rc2, &ma->ma_hsm);
644 ma->ma_valid |= MA_HSM;
645 else if (rc2 < 0 && rc2 != -ENODATA)
649 #ifdef CONFIG_FS_POSIX_ACL
650 if (need & MA_ACL_DEF && S_ISDIR(mode)) {
651 buf->lb_buf = ma->ma_acl;
652 buf->lb_len = ma->ma_acl_size;
653 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
655 ma->ma_acl_size = rc2;
656 ma->ma_valid |= MA_ACL_DEF;
657 } else if (rc2 == -ENODATA) {
666 CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
667 rc, ma->ma_valid, ma->ma_lmm);
671 static int mdt_getattr_internal(struct mdt_thread_info *info,
672 struct mdt_object *o, int ma_need)
674 struct md_object *next = mdt_object_child(o);
675 const struct mdt_body *reqbody = info->mti_body;
676 struct ptlrpc_request *req = mdt_info_req(info);
677 struct md_attr *ma = &info->mti_attr;
678 struct lu_attr *la = &ma->ma_attr;
679 struct req_capsule *pill = info->mti_pill;
680 const struct lu_env *env = info->mti_env;
681 struct mdt_body *repbody;
682 struct lu_buf *buffer = &info->mti_buf;
687 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
688 RETURN(err_serious(-ENOMEM));
690 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
694 if (mdt_object_remote(o)) {
695 /* This object is located on remote node.*/
696 /* Return -EIO for old client */
697 if (!mdt_is_dne_client(req->rq_export))
698 GOTO(out, rc = -EIO);
700 repbody->fid1 = *mdt_object_fid(o);
701 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
705 buffer->lb_len = reqbody->eadatasize;
706 if (buffer->lb_len > 0)
707 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
709 buffer->lb_buf = NULL;
711 /* If it is dir object and client require MEA, then we got MEA */
712 if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
713 reqbody->valid & OBD_MD_MEA) {
714 /* Assumption: MDT_MD size is enough for lmv size. */
715 ma->ma_lmv = buffer->lb_buf;
716 ma->ma_lmv_size = buffer->lb_len;
717 ma->ma_need = MA_LMV | MA_INODE;
719 ma->ma_lmm = buffer->lb_buf;
720 ma->ma_lmm_size = buffer->lb_len;
721 ma->ma_need = MA_LOV | MA_INODE | MA_HSM;
724 if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
725 reqbody->valid & OBD_MD_FLDIREA &&
726 lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
727 /* get default stripe info for this dir. */
728 ma->ma_need |= MA_LOV_DEF;
730 ma->ma_need |= ma_need;
731 if (ma->ma_need & MA_SOM)
732 ma->ma_som = &info->mti_u.som.data;
734 rc = mdt_attr_get_complex(info, o, ma);
736 CERROR("%s: getattr error for "DFID": rc = %d\n",
737 mdt_obd_name(info->mti_mdt),
738 PFID(mdt_object_fid(o)), rc);
742 /* if file is released, check if a restore is running */
743 if ((ma->ma_valid & MA_HSM) && (ma->ma_hsm.mh_flags & HS_RELEASED) &&
744 mdt_hsm_restore_is_running(info, mdt_object_fid(o))) {
745 repbody->t_state = MS_RESTORE;
746 repbody->valid |= OBD_MD_TSTATE;
749 is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid);
751 /* the Lustre protocol supposes to return default striping
752 * on the user-visible root if explicitly requested */
753 if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) &&
754 (ma->ma_need & MA_LOV_DEF && is_root) && (ma->ma_need & MA_LOV)) {
755 struct lu_fid rootfid;
756 struct mdt_object *root;
757 struct mdt_device *mdt = info->mti_mdt;
759 rc = dt_root_get(env, mdt->mdt_bottom, &rootfid);
762 root = mdt_object_find(env, mdt, &rootfid);
764 RETURN(PTR_ERR(root));
765 rc = mdt_attr_get_lov(info, root, ma);
766 mdt_object_put(info->mti_env, root);
768 CERROR("%s: getattr error for "DFID": rc = %d\n",
769 mdt_obd_name(info->mti_mdt),
770 PFID(mdt_object_fid(o)), rc);
775 if (likely(ma->ma_valid & MA_INODE))
776 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
780 if (mdt_body_has_lov(la, reqbody)) {
781 if (ma->ma_valid & MA_LOV) {
782 LASSERT(ma->ma_lmm_size);
783 mdt_dump_lmm(D_INFO, ma->ma_lmm);
784 repbody->eadatasize = ma->ma_lmm_size;
785 if (S_ISDIR(la->la_mode))
786 repbody->valid |= OBD_MD_FLDIREA;
788 repbody->valid |= OBD_MD_FLEASIZE;
790 if (ma->ma_valid & MA_LMV) {
791 LASSERT(S_ISDIR(la->la_mode));
792 repbody->eadatasize = ma->ma_lmv_size;
793 repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
795 } else if (S_ISLNK(la->la_mode) &&
796 reqbody->valid & OBD_MD_LINKNAME) {
797 buffer->lb_buf = ma->ma_lmm;
798 /* eadatasize from client includes NULL-terminator, so
799 * there is no need to read it */
800 buffer->lb_len = reqbody->eadatasize - 1;
801 rc = mo_readlink(env, next, buffer);
802 if (unlikely(rc <= 0)) {
803 CERROR("%s: readlink failed for "DFID": rc = %d\n",
804 mdt_obd_name(info->mti_mdt),
805 PFID(mdt_object_fid(o)), rc);
808 int print_limit = min_t(int, PAGE_CACHE_SIZE - 128, rc);
810 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
812 repbody->valid |= OBD_MD_LINKNAME;
813 /* we need to report back size with NULL-terminator
814 * because client expects that */
815 repbody->eadatasize = rc + 1;
816 if (repbody->eadatasize != reqbody->eadatasize)
817 CDEBUG(D_INODE, "%s: Read shorter symlink %d "
818 "on "DFID ", expected %d\n",
819 mdt_obd_name(info->mti_mdt),
820 rc, PFID(mdt_object_fid(o)),
821 reqbody->eadatasize - 1);
823 ((char *)ma->ma_lmm)[rc] = 0;
825 /* If the total CDEBUG() size is larger than a page, it
826 * will print a warning to the console, avoid this by
827 * printing just the last part of the symlink. */
828 CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
829 print_limit < rc ? "..." : "", print_limit,
830 (char *)ma->ma_lmm + rc - print_limit, rc);
835 if (reqbody->valid & OBD_MD_FLMODEASIZE) {
836 repbody->max_cookiesize = 0;
837 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
838 repbody->valid |= OBD_MD_FLMODEASIZE;
839 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
840 "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
841 repbody->max_cookiesize);
844 if (exp_connect_rmtclient(info->mti_exp) &&
845 reqbody->valid & OBD_MD_FLRMTPERM) {
846 void *buf = req_capsule_server_get(pill, &RMF_ACL);
848 /* mdt_getattr_lock only */
849 rc = mdt_pack_remote_perm(info, o, buf);
851 repbody->valid &= ~OBD_MD_FLRMTPERM;
852 repbody->aclsize = 0;
855 repbody->valid |= OBD_MD_FLRMTPERM;
856 repbody->aclsize = sizeof(struct mdt_remote_perm);
859 #ifdef CONFIG_FS_POSIX_ACL
860 else if ((exp_connect_flags(req->rq_export) & OBD_CONNECT_ACL) &&
861 (reqbody->valid & OBD_MD_FLACL)) {
862 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
863 buffer->lb_len = req_capsule_get_size(pill,
864 &RMF_ACL, RCL_SERVER);
865 if (buffer->lb_len > 0) {
866 rc = mo_xattr_get(env, next, buffer,
867 XATTR_NAME_ACL_ACCESS);
869 if (rc == -ENODATA) {
870 repbody->aclsize = 0;
871 repbody->valid |= OBD_MD_FLACL;
873 } else if (rc == -EOPNOTSUPP) {
876 CERROR("%s: unable to read "DFID
878 mdt_obd_name(info->mti_mdt),
879 PFID(mdt_object_fid(o)), rc);
882 repbody->aclsize = rc;
883 repbody->valid |= OBD_MD_FLACL;
890 if (reqbody->valid & OBD_MD_FLMDSCAPA &&
891 info->mti_mdt->mdt_opts.mo_mds_capa &&
892 exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
893 struct lustre_capa *capa;
895 capa = req_capsule_server_get(pill, &RMF_CAPA1);
897 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
898 rc = mo_capa_get(env, next, capa, 0);
901 repbody->valid |= OBD_MD_FLMDSCAPA;
906 mdt_counter_incr(req, LPROC_MDT_GETATTR);
911 static int mdt_renew_capa(struct mdt_thread_info *info)
913 struct mdt_object *obj = info->mti_object;
914 struct mdt_body *body;
915 struct lustre_capa *capa, *c;
919 /* if object doesn't exist, or server has disabled capability,
920 * return directly, client will find body->valid OBD_MD_FLOSSCAPA
923 if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
924 !(exp_connect_flags(info->mti_exp) & OBD_CONNECT_OSS_CAPA))
927 body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
928 LASSERT(body != NULL);
930 c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
933 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
937 rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
939 body->valid |= OBD_MD_FLOSSCAPA;
943 int mdt_getattr(struct mdt_thread_info *info)
945 struct mdt_object *obj = info->mti_object;
946 struct req_capsule *pill = info->mti_pill;
947 struct mdt_body *reqbody;
948 struct mdt_body *repbody;
953 reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
956 if (reqbody->valid & OBD_MD_FLOSSCAPA) {
957 rc = req_capsule_server_pack(pill);
959 RETURN(err_serious(rc));
960 rc = mdt_renew_capa(info);
961 GOTO(out_shrink, rc);
964 LASSERT(obj != NULL);
965 LASSERT(lu_object_assert_exists(&obj->mot_obj));
967 mode = lu_object_attr(&obj->mot_obj);
969 /* old clients may not report needed easize, use max value then */
970 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
971 reqbody->eadatasize == 0 ?
972 info->mti_mdt->mdt_max_mdsize :
973 reqbody->eadatasize);
975 rc = req_capsule_server_pack(pill);
976 if (unlikely(rc != 0))
977 RETURN(err_serious(rc));
979 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
980 LASSERT(repbody != NULL);
981 repbody->eadatasize = 0;
982 repbody->aclsize = 0;
984 if (reqbody->valid & OBD_MD_FLRMTPERM)
985 rc = mdt_init_ucred(info, reqbody);
987 rc = mdt_check_ucred(info);
989 GOTO(out_shrink, rc);
991 info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
994 * Don't check capability at all, because rename might getattr for
995 * remote obj, and at that time no capability is available.
997 mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
998 rc = mdt_getattr_internal(info, obj, 0);
999 if (reqbody->valid & OBD_MD_FLRMTPERM)
1000 mdt_exit_ucred(info);
1003 mdt_client_compatibility(info);
1004 rc2 = mdt_fix_reply(info);
1010 int mdt_is_subdir(struct mdt_thread_info *info)
1012 struct mdt_object *o = info->mti_object;
1013 struct req_capsule *pill = info->mti_pill;
1014 const struct mdt_body *body = info->mti_body;
1015 struct mdt_body *repbody;
1021 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1024 * We save last checked parent fid to @repbody->fid1 for remote
1027 LASSERT(fid_is_sane(&body->fid2));
1028 LASSERT(mdt_object_exists(o) && !mdt_object_remote(o));
1029 rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
1030 &body->fid2, &repbody->fid1);
1031 if (rc == 0 || rc == -EREMOTE)
1032 repbody->valid |= OBD_MD_FLID;
1037 int mdt_swap_layouts(struct mdt_thread_info *info)
1039 struct ptlrpc_request *req = mdt_info_req(info);
1040 struct obd_export *exp = req->rq_export;
1041 struct mdt_object *o1, *o2, *o;
1042 struct mdt_lock_handle *lh1, *lh2;
1043 struct mdc_swap_layouts *msl;
1047 /* client does not support layout lock, so layout swaping
1049 * FIXME: there is a problem for old clients which don't support
1050 * layout lock yet. If those clients have already opened the file
1051 * they won't be notified at all so that old layout may still be
1052 * used to do IO. This can be fixed after file release is landed by
1053 * doing exclusive open and taking full EX ibits lock. - Jinshan */
1054 if (!exp_connect_layout(exp))
1055 RETURN(-EOPNOTSUPP);
1057 if (req_capsule_get_size(info->mti_pill, &RMF_CAPA1, RCL_CLIENT))
1058 mdt_set_capainfo(info, 0, &info->mti_body->fid1,
1059 req_capsule_client_get(info->mti_pill,
1062 if (req_capsule_get_size(info->mti_pill, &RMF_CAPA2, RCL_CLIENT))
1063 mdt_set_capainfo(info, 1, &info->mti_body->fid2,
1064 req_capsule_client_get(info->mti_pill,
1067 o1 = info->mti_object;
1068 o = o2 = mdt_object_find(info->mti_env, info->mti_mdt,
1069 &info->mti_body->fid2);
1071 GOTO(out, rc = PTR_ERR(o));
1073 if (mdt_object_remote(o) || !mdt_object_exists(o)) /* remote object */
1074 GOTO(put, rc = -ENOENT);
1076 rc = lu_fid_cmp(&info->mti_body->fid1, &info->mti_body->fid2);
1077 if (unlikely(rc == 0)) /* same file, you kidding me? no-op. */
1083 /* permission check. Make sure the calling process having permission
1084 * to write both files. */
1085 rc = mo_permission(info->mti_env, NULL, mdt_object_child(o1), NULL,
1090 rc = mo_permission(info->mti_env, NULL, mdt_object_child(o2), NULL,
1095 msl = req_capsule_client_get(info->mti_pill, &RMF_SWAP_LAYOUTS);
1097 GOTO(put, rc = -EPROTO);
1099 lh1 = &info->mti_lh[MDT_LH_NEW];
1100 mdt_lock_reg_init(lh1, LCK_EX);
1101 lh2 = &info->mti_lh[MDT_LH_OLD];
1102 mdt_lock_reg_init(lh2, LCK_EX);
1104 rc = mdt_object_lock(info, o1, lh1, MDS_INODELOCK_LAYOUT,
1109 rc = mdt_object_lock(info, o2, lh2, MDS_INODELOCK_LAYOUT,
1114 rc = mo_swap_layouts(info->mti_env, mdt_object_child(o1),
1115 mdt_object_child(o2), msl->msl_flags);
1118 mdt_object_unlock(info, o2, lh2, rc);
1120 mdt_object_unlock(info, o1, lh1, rc);
1122 mdt_object_put(info->mti_env, o);
1127 static int mdt_raw_lookup(struct mdt_thread_info *info,
1128 struct mdt_object *parent,
1129 const struct lu_name *lname,
1130 struct ldlm_reply *ldlm_rep)
1132 struct md_object *next = mdt_object_child(info->mti_object);
1133 const struct mdt_body *reqbody = info->mti_body;
1134 struct lu_fid *child_fid = &info->mti_tmp_fid1;
1135 struct mdt_body *repbody;
1139 if (reqbody->valid != OBD_MD_FLID)
1142 LASSERT(!info->mti_cross_ref);
1144 /* Only got the fid of this obj by name */
1145 fid_zero(child_fid);
1146 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1149 /* XXX is raw_lookup possible as intent operation? */
1152 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
1155 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1157 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1160 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1161 repbody->fid1 = *child_fid;
1162 repbody->valid = OBD_MD_FLID;
1168 * UPDATE lock should be taken against parent, and be release before exit;
1169 * child_bits lock should be taken against child, and be returned back:
1170 * (1)normal request should release the child lock;
1171 * (2)intent request will grant the lock to client.
1173 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
1174 struct mdt_lock_handle *lhc,
1176 struct ldlm_reply *ldlm_rep)
1178 struct ptlrpc_request *req = mdt_info_req(info);
1179 struct mdt_body *reqbody = NULL;
1180 struct mdt_object *parent = info->mti_object;
1181 struct mdt_object *child;
1182 struct md_object *next = mdt_object_child(parent);
1183 struct lu_fid *child_fid = &info->mti_tmp_fid1;
1184 struct lu_name *lname = NULL;
1185 const char *name = NULL;
1187 struct mdt_lock_handle *lhp = NULL;
1188 struct ldlm_lock *lock;
1189 struct ldlm_res_id *res_id;
1196 is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
1197 LASSERT(ergo(is_resent,
1198 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
1200 LASSERT(parent != NULL);
1201 name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
1203 RETURN(err_serious(-EFAULT));
1205 namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
1207 if (!info->mti_cross_ref) {
1209 * XXX: Check for "namelen == 0" is for getattr by fid
1210 * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
1211 * that is the name must contain at least one character and
1212 * the terminating '\0'
1215 reqbody = req_capsule_client_get(info->mti_pill,
1217 if (unlikely(reqbody == NULL))
1218 RETURN(err_serious(-EFAULT));
1220 if (unlikely(!fid_is_sane(&reqbody->fid2)))
1221 RETURN(err_serious(-EINVAL));
1224 CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
1226 PFID(mdt_object_fid(parent)),
1227 PFID(&reqbody->fid2), ldlm_rep);
1229 lname = mdt_name(info->mti_env, (char *)name, namelen);
1230 CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
1231 "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
1235 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
1237 if (unlikely(!mdt_object_exists(parent))) {
1238 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1240 "Parent doesn't exist!\n");
1242 } else if (!info->mti_cross_ref) {
1243 LASSERTF(!mdt_object_remote(parent),
1244 "Parent "DFID" is on remote server\n",
1245 PFID(mdt_object_fid(parent)));
1248 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
1256 if (info->mti_cross_ref) {
1257 /* Only getattr on the child. Parent is on another node. */
1258 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1260 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
1261 "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
1264 /* Do not take lock for resent case. */
1265 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1266 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1267 lhc->mlh_reg_lh.cookie);
1268 LASSERT(fid_res_name_eq(mdt_object_fid(child),
1269 &lock->l_resource->lr_name));
1270 LDLM_LOCK_PUT(lock);
1273 mdt_lock_handle_init(lhc);
1274 mdt_lock_reg_init(lhc, LCK_PR);
1277 * Object's name is on another MDS, no lookup lock is
1278 * needed here but update is.
1280 child_bits &= ~MDS_INODELOCK_LOOKUP;
1281 child_bits |= MDS_INODELOCK_PERM | MDS_INODELOCK_UPDATE;
1283 rc = mdt_object_lock(info, child, lhc, child_bits,
1287 /* Finally, we can get attr for child. */
1288 mdt_set_capainfo(info, 0, mdt_object_fid(child),
1290 rc = mdt_getattr_internal(info, child, 0);
1291 if (unlikely(rc != 0))
1292 mdt_object_unlock(info, child, lhc, 1);
1298 /* step 1: lock parent only if parent is a directory */
1299 if (S_ISDIR(lu_object_attr(&parent->mot_obj))) {
1300 lhp = &info->mti_lh[MDT_LH_PARENT];
1301 mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
1302 rc = mdt_object_lock(info, parent, lhp,
1303 MDS_INODELOCK_UPDATE,
1305 if (unlikely(rc != 0))
1309 /* step 2: lookup child's fid by name */
1310 fid_zero(child_fid);
1311 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1316 mdt_set_disposition(info, ldlm_rep,
1318 GOTO(out_parent, rc);
1320 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1322 *child_fid = reqbody->fid2;
1323 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1327 *step 3: find the child object by fid & lock it.
1328 * regardless if it is local or remote.
1330 child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
1332 if (unlikely(IS_ERR(child)))
1333 GOTO(out_parent, rc = PTR_ERR(child));
1335 /* Do not take lock for resent case. */
1336 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1337 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1338 lhc->mlh_reg_lh.cookie);
1340 res_id = &lock->l_resource->lr_name;
1341 if (!fid_res_name_eq(mdt_object_fid(child),
1342 &lock->l_resource->lr_name)) {
1343 LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
1344 &lock->l_resource->lr_name),
1345 "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1346 (unsigned long)res_id->name[0],
1347 (unsigned long)res_id->name[1],
1348 (unsigned long)res_id->name[2],
1349 PFID(mdt_object_fid(parent)));
1350 CWARN("Although resent, but still not get child lock"
1351 "parent:"DFID" child:"DFID"\n",
1352 PFID(mdt_object_fid(parent)),
1353 PFID(mdt_object_fid(child)));
1354 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1355 LDLM_LOCK_PUT(lock);
1358 LDLM_LOCK_PUT(lock);
1361 bool try_layout = false;
1364 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
1365 mdt_lock_handle_init(lhc);
1366 mdt_lock_reg_init(lhc, LCK_PR);
1368 if (!mdt_object_exists(child)) {
1369 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1371 "Object doesn't exist!\n");
1372 GOTO(out_child, rc = -ENOENT);
1375 if (!(child_bits & MDS_INODELOCK_UPDATE) &&
1376 mdt_object_exists(child) && !mdt_object_remote(child)) {
1377 struct md_attr *ma = &info->mti_attr;
1380 ma->ma_need = MA_INODE;
1381 rc = mdt_attr_get_complex(info, child, ma);
1382 if (unlikely(rc != 0))
1383 GOTO(out_child, rc);
1385 /* If the file has not been changed for some time, we
1386 * return not only a LOOKUP lock, but also an UPDATE
1387 * lock and this might save us RPC on later STAT. For
1388 * directories, it also let negative dentry starts
1389 * working for this dir. */
1390 if (ma->ma_valid & MA_INODE &&
1391 ma->ma_attr.la_valid & LA_CTIME &&
1392 info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1393 ma->ma_attr.la_ctime < cfs_time_current_sec())
1394 child_bits |= MDS_INODELOCK_UPDATE;
1397 /* layout lock must be granted in a best-effort way
1398 * for IT operations */
1399 LASSERT(!(child_bits & MDS_INODELOCK_LAYOUT));
1400 if (!OBD_FAIL_CHECK(OBD_FAIL_MDS_NO_LL_GETATTR) &&
1401 exp_connect_layout(info->mti_exp) &&
1402 S_ISREG(lu_object_attr(&child->mot_obj)) &&
1404 /* try to grant layout lock for regular file. */
1410 child_bits |= MDS_INODELOCK_LAYOUT;
1411 /* try layout lock, it may fail to be granted due to
1412 * contention at LOOKUP or UPDATE */
1413 if (!mdt_object_lock_try(info, child, lhc, child_bits,
1415 child_bits &= ~MDS_INODELOCK_LAYOUT;
1416 LASSERT(child_bits != 0);
1417 rc = mdt_object_lock(info, child, lhc,
1418 child_bits, MDT_CROSS_LOCK);
1423 rc = mdt_object_lock(info, child, lhc, child_bits,
1426 if (unlikely(rc != 0))
1427 GOTO(out_child, rc);
1430 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1431 /* Get MA_SOM attributes if update lock is given. */
1433 lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1434 S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1437 /* finally, we can get attr for child. */
1438 mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1439 rc = mdt_getattr_internal(info, child, ma_need);
1440 if (unlikely(rc != 0)) {
1441 mdt_object_unlock(info, child, lhc, 1);
1443 /* Debugging code. */
1444 res_id = &lock->l_resource->lr_name;
1445 LDLM_DEBUG(lock, "Returning lock to client");
1446 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1447 &lock->l_resource->lr_name),
1448 "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1449 (unsigned long)res_id->name[0],
1450 (unsigned long)res_id->name[1],
1451 (unsigned long)res_id->name[2],
1452 PFID(mdt_object_fid(child)));
1453 if (mdt_object_exists(child) && !mdt_object_remote(child))
1454 mdt_pack_size2body(info, child);
1457 LDLM_LOCK_PUT(lock);
1461 mdt_object_put(info->mti_env, child);
1464 mdt_object_unlock(info, parent, lhp, 1);
1468 /* normal handler: should release the child lock */
1469 int mdt_getattr_name(struct mdt_thread_info *info)
1471 struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1472 struct mdt_body *reqbody;
1473 struct mdt_body *repbody;
1477 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1478 LASSERT(reqbody != NULL);
1479 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1480 LASSERT(repbody != NULL);
1482 info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1483 repbody->eadatasize = 0;
1484 repbody->aclsize = 0;
1486 rc = mdt_init_ucred(info, reqbody);
1488 GOTO(out_shrink, rc);
1490 rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1491 if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1492 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1493 lhc->mlh_reg_lh.cookie = 0;
1495 mdt_exit_ucred(info);
1498 mdt_client_compatibility(info);
1499 rc2 = mdt_fix_reply(info);
1505 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1506 void *karg, void *uarg);
1508 int mdt_set_info(struct mdt_thread_info *info)
1510 struct ptlrpc_request *req = mdt_info_req(info);
1513 int keylen, vallen, rc = 0;
1516 rc = req_capsule_server_pack(info->mti_pill);
1520 key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1522 DEBUG_REQ(D_HA, req, "no set_info key");
1526 keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1529 val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1531 DEBUG_REQ(D_HA, req, "no set_info val");
1535 vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1538 /* Swab any part of val you need to here */
1539 if (KEY_IS(KEY_READ_ONLY)) {
1541 lustre_msg_set_status(req->rq_repmsg, 0);
1543 spin_lock(&req->rq_export->exp_lock);
1545 *exp_connect_flags_ptr(req->rq_export) |=
1548 *exp_connect_flags_ptr(req->rq_export) &=
1549 ~OBD_CONNECT_RDONLY;
1550 spin_unlock(&req->rq_export->exp_lock);
1552 } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1553 struct changelog_setinfo *cs =
1554 (struct changelog_setinfo *)val;
1555 if (vallen != sizeof(*cs)) {
1556 CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1559 if (ptlrpc_req_need_swab(req)) {
1560 __swab64s(&cs->cs_recno);
1561 __swab32s(&cs->cs_id);
1564 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1566 lustre_msg_set_status(req->rq_repmsg, rc);
1575 * Top-level handler for MDT connection requests.
1577 int mdt_connect(struct mdt_thread_info *info)
1580 struct obd_connect_data *reply;
1581 struct obd_export *exp;
1582 struct ptlrpc_request *req = mdt_info_req(info);
1584 rc = target_handle_connect(req);
1586 return err_serious(rc);
1588 LASSERT(req->rq_export != NULL);
1589 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1590 rc = mdt_init_sec_level(info);
1592 obd_disconnect(class_export_get(req->rq_export));
1596 /* To avoid exposing partially initialized connection flags, changes up
1597 * to this point have been staged in reply->ocd_connect_flags. Now that
1598 * connection handling has completed successfully, atomically update
1599 * the connect flags in the shared export data structure. LU-1623 */
1600 reply = req_capsule_server_get(info->mti_pill, &RMF_CONNECT_DATA);
1601 exp = req->rq_export;
1602 spin_lock(&exp->exp_lock);
1603 *exp_connect_flags_ptr(exp) = reply->ocd_connect_flags;
1604 spin_unlock(&exp->exp_lock);
1606 rc = mdt_init_idmap(info);
1608 obd_disconnect(class_export_get(req->rq_export));
1613 int mdt_disconnect(struct mdt_thread_info *info)
1618 rc = target_handle_disconnect(mdt_info_req(info));
1620 rc = err_serious(rc);
1624 static int mdt_sendpage(struct mdt_thread_info *info,
1625 struct lu_rdpg *rdpg, int nob)
1627 struct ptlrpc_request *req = mdt_info_req(info);
1628 struct obd_export *exp = req->rq_export;
1629 struct ptlrpc_bulk_desc *desc;
1630 struct l_wait_info *lwi = &info->mti_u.rdpg.mti_wait_info;
1637 desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1, BULK_PUT_SOURCE,
1642 if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
1643 /* old client requires reply size in it's PAGE_SIZE,
1644 * which is rdpg->rp_count */
1645 nob = rdpg->rp_count;
1647 for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1648 i++, tmpcount -= tmpsize) {
1649 tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE);
1650 ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
1653 LASSERT(desc->bd_nob == nob);
1654 rc = target_bulk_io(exp, desc, lwi);
1655 ptlrpc_free_bulk_pin(desc);
1659 int mdt_readpage(struct mdt_thread_info *info)
1661 struct mdt_object *object = info->mti_object;
1662 struct lu_rdpg *rdpg = &info->mti_u.rdpg.mti_rdpg;
1663 struct mdt_body *reqbody;
1664 struct mdt_body *repbody;
1669 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1670 RETURN(err_serious(-ENOMEM));
1672 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1673 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1674 if (reqbody == NULL || repbody == NULL)
1675 RETURN(err_serious(-EFAULT));
1678 * prepare @rdpg before calling lower layers and transfer itself. Here
1679 * reqbody->size contains offset of where to start to read and
1680 * reqbody->nlink contains number bytes to read.
1682 rdpg->rp_hash = reqbody->size;
1683 if (rdpg->rp_hash != reqbody->size) {
1684 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1685 rdpg->rp_hash, reqbody->size);
1689 rdpg->rp_attrs = reqbody->mode;
1690 if (exp_connect_flags(info->mti_exp) & OBD_CONNECT_64BITHASH)
1691 rdpg->rp_attrs |= LUDA_64BITHASH;
1692 rdpg->rp_count = min_t(unsigned int, reqbody->nlink,
1693 exp_max_brw_size(info->mti_exp));
1694 rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE - 1) >>
1696 OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1697 if (rdpg->rp_pages == NULL)
1700 for (i = 0; i < rdpg->rp_npages; ++i) {
1701 rdpg->rp_pages[i] = alloc_page(GFP_IOFS);
1702 if (rdpg->rp_pages[i] == NULL)
1703 GOTO(free_rdpg, rc = -ENOMEM);
1706 /* call lower layers to fill allocated pages with directory data */
1707 rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1709 GOTO(free_rdpg, rc);
1711 /* send pages to client */
1712 rc = mdt_sendpage(info, rdpg, rc);
1717 for (i = 0; i < rdpg->rp_npages; i++)
1718 if (rdpg->rp_pages[i] != NULL)
1719 __free_page(rdpg->rp_pages[i]);
1720 OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1722 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1728 static int mdt_reint_internal(struct mdt_thread_info *info,
1729 struct mdt_lock_handle *lhc,
1732 struct req_capsule *pill = info->mti_pill;
1733 struct mdt_body *repbody;
1738 rc = mdt_reint_unpack(info, op);
1740 CERROR("Can't unpack reint, rc %d\n", rc);
1741 RETURN(err_serious(rc));
1744 /* for replay (no_create) lmm is not needed, client has it already */
1745 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1746 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1747 info->mti_rr.rr_eadatalen);
1749 /* llog cookies are always 0, the field is kept for compatibility */
1750 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1751 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0);
1753 rc = req_capsule_server_pack(pill);
1755 CERROR("Can't pack response, rc %d\n", rc);
1756 RETURN(err_serious(rc));
1759 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1760 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1762 repbody->eadatasize = 0;
1763 repbody->aclsize = 0;
1766 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1768 /* for replay no cookkie / lmm need, because client have this already */
1769 if (info->mti_spec.no_create)
1770 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1771 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1773 rc = mdt_init_ucred_reint(info);
1775 GOTO(out_shrink, rc);
1777 rc = mdt_fix_attr_ucred(info, op);
1779 GOTO(out_ucred, rc = err_serious(rc));
1781 if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1782 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1783 GOTO(out_ucred, rc);
1785 rc = mdt_reint_rec(info, lhc);
1788 mdt_exit_ucred(info);
1790 mdt_client_compatibility(info);
1791 rc2 = mdt_fix_reply(info);
1797 static long mdt_reint_opcode(struct mdt_thread_info *info,
1798 const struct req_format **fmt)
1800 struct mdt_rec_reint *rec;
1803 rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1805 opc = rec->rr_opcode;
1806 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1807 if (opc < REINT_MAX && fmt[opc] != NULL)
1808 req_capsule_extend(info->mti_pill, fmt[opc]);
1810 CERROR("%s: Unsupported opcode '%ld' from client '%s': "
1811 "rc = %d\n", mdt_obd_name(info->mti_mdt), opc,
1812 info->mti_mdt->mdt_ldlm_client->cli_name,
1814 opc = err_serious(-EFAULT);
1817 opc = err_serious(-EFAULT);
1822 int mdt_reint(struct mdt_thread_info *info)
1827 static const struct req_format *reint_fmts[REINT_MAX] = {
1828 [REINT_SETATTR] = &RQF_MDS_REINT_SETATTR,
1829 [REINT_CREATE] = &RQF_MDS_REINT_CREATE,
1830 [REINT_LINK] = &RQF_MDS_REINT_LINK,
1831 [REINT_UNLINK] = &RQF_MDS_REINT_UNLINK,
1832 [REINT_RENAME] = &RQF_MDS_REINT_RENAME,
1833 [REINT_OPEN] = &RQF_MDS_REINT_OPEN,
1834 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR,
1835 [REINT_RMENTRY] = &RQF_MDS_REINT_UNLINK
1840 opc = mdt_reint_opcode(info, reint_fmts);
1843 * No lock possible here from client to pass it to reint code
1846 rc = mdt_reint_internal(info, NULL, opc);
1851 info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1855 /* this should sync the whole device */
1856 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1858 struct dt_device *dt = mdt->mdt_bottom;
1862 rc = dt->dd_ops->dt_sync(env, dt);
1866 /* this should sync this object */
1867 static int mdt_object_sync(struct mdt_thread_info *info)
1869 struct md_object *next;
1873 if (!mdt_object_exists(info->mti_object)) {
1874 CWARN("Non existing object "DFID"!\n",
1875 PFID(mdt_object_fid(info->mti_object)));
1878 next = mdt_object_child(info->mti_object);
1879 rc = mo_object_sync(info->mti_env, next);
1884 int mdt_sync(struct mdt_thread_info *info)
1886 struct ptlrpc_request *req = mdt_info_req(info);
1887 struct req_capsule *pill = info->mti_pill;
1888 struct mdt_body *body;
1892 /* The fid may be zero, so we req_capsule_set manually */
1893 req_capsule_set(pill, &RQF_MDS_SYNC);
1895 body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1897 RETURN(err_serious(-EINVAL));
1899 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1900 RETURN(err_serious(-ENOMEM));
1902 if (fid_seq(&body->fid1) == 0) {
1903 /* sync the whole device */
1904 rc = req_capsule_server_pack(pill);
1906 rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1908 rc = err_serious(rc);
1910 /* sync an object */
1911 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1913 rc = mdt_object_sync(info);
1915 const struct lu_fid *fid;
1916 struct lu_attr *la = &info->mti_attr.ma_attr;
1918 info->mti_attr.ma_need = MA_INODE;
1919 info->mti_attr.ma_valid = 0;
1920 rc = mdt_attr_get_complex(info, info->mti_object,
1923 body = req_capsule_server_get(pill,
1925 fid = mdt_object_fid(info->mti_object);
1926 mdt_pack_attr2body(info, body, la, fid);
1930 rc = err_serious(rc);
1933 mdt_counter_incr(req, LPROC_MDT_SYNC);
1939 * Quotacheck handler.
1940 * in-kernel quotacheck isn't supported any more.
1942 int mdt_quotacheck(struct mdt_thread_info *info)
1944 struct obd_quotactl *oqctl;
1948 oqctl = req_capsule_client_get(info->mti_pill, &RMF_OBD_QUOTACTL);
1950 RETURN(err_serious(-EPROTO));
1952 rc = req_capsule_server_pack(info->mti_pill);
1954 RETURN(err_serious(rc));
1956 /* deprecated, not used any more */
1957 RETURN(-EOPNOTSUPP);
1961 * Handle quota control requests to consult current usage/limit, but also
1962 * to configure quota enforcement
1964 int mdt_quotactl(struct mdt_thread_info *info)
1966 struct obd_export *exp = info->mti_exp;
1967 struct req_capsule *pill = info->mti_pill;
1968 struct obd_quotactl *oqctl, *repoqc;
1970 struct lu_device *qmt = info->mti_mdt->mdt_qmt_dev;
1973 oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1975 RETURN(err_serious(-EPROTO));
1977 rc = req_capsule_server_pack(pill);
1979 RETURN(err_serious(rc));
1981 switch (oqctl->qc_cmd) {
1983 case LUSTRE_Q_INVALIDATE:
1984 case LUSTRE_Q_FINVALIDATE:
1988 /* deprecated, not used any more */
1989 RETURN(-EOPNOTSUPP);
1990 /* master quotactl */
1996 RETURN(-EOPNOTSUPP);
1997 /* slave quotactl */
2002 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2006 /* map uid/gid for remote client */
2008 if (exp_connect_rmtclient(exp)) {
2009 struct lustre_idmap_table *idmap;
2011 idmap = mdt_req2med(mdt_info_req(info))->med_idmap;
2013 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
2014 oqctl->qc_cmd != Q_GETINFO))
2017 if (oqctl->qc_type == USRQUOTA)
2018 id = lustre_idmap_lookup_uid(NULL, idmap, 0,
2020 else if (oqctl->qc_type == GRPQUOTA)
2021 id = lustre_idmap_lookup_gid(NULL, idmap, 0,
2026 if (id == CFS_IDMAP_NOTFOUND) {
2027 CDEBUG(D_QUOTA, "no mapping for id %u\n", oqctl->qc_id);
2032 repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
2034 RETURN(err_serious(-EFAULT));
2036 if (oqctl->qc_id != id)
2037 swap(oqctl->qc_id, id);
2039 switch (oqctl->qc_cmd) {
2045 /* forward quotactl request to QMT */
2046 rc = qmt_hdls.qmth_quotactl(info->mti_env, qmt, oqctl);
2051 /* slave quotactl */
2052 rc = lquotactl_slv(info->mti_env, info->mti_mdt->mdt_bottom,
2057 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2061 if (oqctl->qc_id != id)
2062 swap(oqctl->qc_id, id);
2069 * OBD PING and other handlers.
2071 int mdt_obd_ping(struct mdt_thread_info *info)
2076 req_capsule_set(info->mti_pill, &RQF_OBD_PING);
2078 rc = target_handle_ping(mdt_info_req(info));
2080 rc = err_serious(rc);
2085 * OBD_IDX_READ handler
2087 int mdt_obd_idx_read(struct mdt_thread_info *info)
2089 struct mdt_device *mdt = info->mti_mdt;
2090 struct lu_rdpg *rdpg = &info->mti_u.rdpg.mti_rdpg;
2091 struct idx_info *req_ii, *rep_ii;
2095 memset(rdpg, 0, sizeof(*rdpg));
2096 req_capsule_set(info->mti_pill, &RQF_OBD_IDX_READ);
2098 /* extract idx_info buffer from request & reply */
2099 req_ii = req_capsule_client_get(info->mti_pill, &RMF_IDX_INFO);
2100 if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
2101 RETURN(err_serious(-EPROTO));
2103 rc = req_capsule_server_pack(info->mti_pill);
2105 RETURN(err_serious(rc));
2107 rep_ii = req_capsule_server_get(info->mti_pill, &RMF_IDX_INFO);
2109 RETURN(err_serious(-EFAULT));
2110 rep_ii->ii_magic = IDX_INFO_MAGIC;
2112 /* extract hash to start with */
2113 rdpg->rp_hash = req_ii->ii_hash_start;
2115 /* extract requested attributes */
2116 rdpg->rp_attrs = req_ii->ii_attrs;
2118 /* check that fid packed in request is valid and supported */
2119 if (!fid_is_sane(&req_ii->ii_fid))
2121 rep_ii->ii_fid = req_ii->ii_fid;
2124 rep_ii->ii_flags = req_ii->ii_flags;
2126 /* compute number of pages to allocate, ii_count is the number of 4KB
2128 if (req_ii->ii_count <= 0)
2129 GOTO(out, rc = -EFAULT);
2130 rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
2131 exp_max_brw_size(info->mti_exp));
2132 rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE - 1) >>
2135 /* allocate pages to store the containers */
2136 OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2137 if (rdpg->rp_pages == NULL)
2138 GOTO(out, rc = -ENOMEM);
2139 for (i = 0; i < rdpg->rp_npages; i++) {
2140 rdpg->rp_pages[i] = alloc_page(GFP_IOFS);
2141 if (rdpg->rp_pages[i] == NULL)
2142 GOTO(out, rc = -ENOMEM);
2145 /* populate pages with key/record pairs */
2146 rc = dt_index_read(info->mti_env, mdt->mdt_bottom, rep_ii, rdpg);
2150 LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
2151 "asked %d > %d\n", rc, rdpg->rp_count);
2153 /* send pages to client */
2154 rc = mdt_sendpage(info, rdpg, rc);
2158 if (rdpg->rp_pages) {
2159 for (i = 0; i < rdpg->rp_npages; i++)
2160 if (rdpg->rp_pages[i])
2161 __free_page(rdpg->rp_pages[i]);
2162 OBD_FREE(rdpg->rp_pages,
2163 rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2168 int mdt_obd_log_cancel(struct mdt_thread_info *info)
2170 return err_serious(-EOPNOTSUPP);
2173 int mdt_obd_qc_callback(struct mdt_thread_info *info)
2175 return err_serious(-EOPNOTSUPP);
2182 /** clone llog ctxt from child (mdd)
2183 * This allows remote llog (replicator) access.
2184 * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
2185 * context was originally set up, or we can handle them directly.
2186 * I choose the latter, but that means I need any llog
2187 * contexts set up by child to be accessable by the mdt. So we clone the
2188 * context into our context list here.
2190 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
2193 struct md_device *next = mdt->mdt_child;
2194 struct llog_ctxt *ctxt;
2197 if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
2200 rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
2201 if (rc || ctxt == NULL) {
2205 rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
2207 CERROR("Can't set mdt ctxt %d\n", rc);
2212 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
2213 struct mdt_device *mdt, int idx)
2215 struct llog_ctxt *ctxt;
2217 ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
2220 /* Put once for the get we just did, and once for the clone */
2221 llog_ctxt_put(ctxt);
2222 llog_ctxt_put(ctxt);
2226 int mdt_llog_create(struct mdt_thread_info *info)
2230 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2231 rc = llog_origin_handle_open(mdt_info_req(info));
2232 return (rc < 0 ? err_serious(rc) : rc);
2235 int mdt_llog_destroy(struct mdt_thread_info *info)
2239 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
2240 rc = llog_origin_handle_destroy(mdt_info_req(info));
2241 return (rc < 0 ? err_serious(rc) : rc);
2244 int mdt_llog_read_header(struct mdt_thread_info *info)
2248 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2249 rc = llog_origin_handle_read_header(mdt_info_req(info));
2250 return (rc < 0 ? err_serious(rc) : rc);
2253 int mdt_llog_next_block(struct mdt_thread_info *info)
2257 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2258 rc = llog_origin_handle_next_block(mdt_info_req(info));
2259 return (rc < 0 ? err_serious(rc) : rc);
2262 int mdt_llog_prev_block(struct mdt_thread_info *info)
2266 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
2267 rc = llog_origin_handle_prev_block(mdt_info_req(info));
2268 return (rc < 0 ? err_serious(rc) : rc);
2276 static struct ldlm_callback_suite cbs = {
2277 .lcs_completion = ldlm_server_completion_ast,
2278 .lcs_blocking = ldlm_server_blocking_ast,
2279 .lcs_glimpse = ldlm_server_glimpse_ast
2282 int mdt_enqueue(struct mdt_thread_info *info)
2284 struct ptlrpc_request *req;
2288 * info->mti_dlm_req already contains swapped and (if necessary)
2289 * converted dlm request.
2291 LASSERT(info->mti_dlm_req != NULL);
2293 req = mdt_info_req(info);
2294 rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2295 req, info->mti_dlm_req, &cbs);
2296 info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2297 return rc ? err_serious(rc) : req->rq_status;
2300 int mdt_convert(struct mdt_thread_info *info)
2303 struct ptlrpc_request *req;
2305 LASSERT(info->mti_dlm_req);
2306 req = mdt_info_req(info);
2307 rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2308 return rc ? err_serious(rc) : req->rq_status;
2311 int mdt_bl_callback(struct mdt_thread_info *info)
2313 CERROR("bl callbacks should not happen on MDS\n");
2315 return err_serious(-EOPNOTSUPP);
2318 int mdt_cp_callback(struct mdt_thread_info *info)
2320 CERROR("cp callbacks should not happen on MDS\n");
2322 return err_serious(-EOPNOTSUPP);
2326 * sec context handlers
2328 int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2332 rc = mdt_handle_idmap(info);
2335 struct ptlrpc_request *req = mdt_info_req(info);
2338 opc = lustre_msg_get_opc(req->rq_reqmsg);
2339 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2340 sptlrpc_svc_ctx_invalidate(req);
2343 CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
2349 * quota request handlers
2351 int mdt_quota_dqacq(struct mdt_thread_info *info)
2353 struct lu_device *qmt = info->mti_mdt->mdt_qmt_dev;
2358 RETURN(err_serious(-EOPNOTSUPP));
2360 rc = qmt_hdls.qmth_dqacq(info->mti_env, qmt, mdt_info_req(info));
2364 static struct mdt_object *mdt_obj(struct lu_object *o)
2366 LASSERT(lu_device_is_mdt(o->lo_dev));
2367 return container_of0(o, struct mdt_object, mot_obj);
2370 struct mdt_object *mdt_object_new(const struct lu_env *env,
2371 struct mdt_device *d,
2372 const struct lu_fid *f)
2374 struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
2375 struct lu_object *o;
2376 struct mdt_object *m;
2379 CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
2380 o = lu_object_find(env, &d->mdt_lu_dev, f, &conf);
2381 if (unlikely(IS_ERR(o)))
2382 m = (struct mdt_object *)o;
2388 struct mdt_object *mdt_object_find(const struct lu_env *env,
2389 struct mdt_device *d,
2390 const struct lu_fid *f)
2392 struct lu_object *o;
2393 struct mdt_object *m;
2396 CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2397 o = lu_object_find(env, &d->mdt_lu_dev, f, NULL);
2398 if (unlikely(IS_ERR(o)))
2399 m = (struct mdt_object *)o;
2407 * Asyncronous commit for mdt device.
2409 * Pass asynchonous commit call down the MDS stack.
2411 * \param env environment
2412 * \param mdt the mdt device
2414 static void mdt_device_commit_async(const struct lu_env *env,
2415 struct mdt_device *mdt)
2417 struct dt_device *dt = mdt->mdt_bottom;
2420 rc = dt->dd_ops->dt_commit_async(env, dt);
2421 if (unlikely(rc != 0))
2422 CWARN("async commit start failed with rc = %d", rc);
2426 * Mark the lock as "synchonous".
2428 * Mark the lock to deffer transaction commit to the unlock time.
2430 * \param lock the lock to mark as "synchonous"
2432 * \see mdt_is_lock_sync
2433 * \see mdt_save_lock
2435 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2437 lock->l_ast_data = (void*)1;
2441 * Check whehter the lock "synchonous" or not.
2443 * \param lock the lock to check
2444 * \retval 1 the lock is "synchonous"
2445 * \retval 0 the lock isn't "synchronous"
2447 * \see mdt_set_lock_sync
2448 * \see mdt_save_lock
2450 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2452 return lock->l_ast_data != NULL;
2456 * Blocking AST for mdt locks.
2458 * Starts transaction commit if in case of COS lock conflict or
2459 * deffers such a commit to the mdt_save_lock.
2461 * \param lock the lock which blocks a request or cancelling lock
2462 * \param desc unused
2463 * \param data unused
2464 * \param flag indicates whether this cancelling or blocking callback
2466 * \see ldlm_blocking_ast_nocheck
2468 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2469 void *data, int flag)
2471 struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
2472 struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2476 if (flag == LDLM_CB_CANCELING)
2478 lock_res_and_lock(lock);
2479 if (lock->l_blocking_ast != mdt_blocking_ast) {
2480 unlock_res_and_lock(lock);
2483 if (mdt_cos_is_enabled(mdt) &&
2484 lock->l_req_mode & (LCK_PW | LCK_EX) &&
2485 lock->l_blocking_lock != NULL &&
2486 lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2487 mdt_set_lock_sync(lock);
2489 rc = ldlm_blocking_ast_nocheck(lock);
2491 /* There is no lock conflict if l_blocking_lock == NULL,
2492 * it indicates a blocking ast sent from ldlm_lock_decref_internal
2493 * when the last reference to a local lock was released */
2494 if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2497 rc = lu_env_init(&env, LCT_LOCAL);
2498 if (unlikely(rc != 0))
2499 CWARN("lu_env initialization failed with rc = %d,"
2500 "cannot start asynchronous commit\n", rc);
2502 mdt_device_commit_async(&env, mdt);
2508 int mdt_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2509 void *data, int flag)
2511 struct lustre_handle lockh;
2515 case LDLM_CB_BLOCKING:
2516 ldlm_lock2handle(lock, &lockh);
2517 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
2519 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
2523 case LDLM_CB_CANCELING:
2524 LDLM_DEBUG(lock, "Revoke remote lock\n");
2532 int mdt_remote_object_lock(struct mdt_thread_info *mti,
2533 struct mdt_object *o, struct lustre_handle *lh,
2534 ldlm_mode_t mode, __u64 ibits)
2536 struct ldlm_enqueue_info *einfo = &mti->mti_einfo;
2537 ldlm_policy_data_t *policy = &mti->mti_policy;
2541 LASSERT(mdt_object_remote(o));
2543 LASSERT((ibits & MDS_INODELOCK_UPDATE));
2545 memset(einfo, 0, sizeof(*einfo));
2546 einfo->ei_type = LDLM_IBITS;
2547 einfo->ei_mode = mode;
2548 einfo->ei_cb_bl = mdt_md_blocking_ast;
2549 einfo->ei_cb_cp = ldlm_completion_ast;
2551 memset(policy, 0, sizeof(*policy));
2552 policy->l_inodebits.bits = ibits;
2554 rc = mo_object_lock(mti->mti_env, mdt_object_child(o), lh, einfo,
2559 static int mdt_object_lock0(struct mdt_thread_info *info, struct mdt_object *o,
2560 struct mdt_lock_handle *lh, __u64 ibits,
2561 bool nonblock, int locality)
2563 struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2564 ldlm_policy_data_t *policy = &info->mti_policy;
2565 struct ldlm_res_id *res_id = &info->mti_res_id;
2570 LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2571 LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2572 LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2573 LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2575 if (mdt_object_remote(o)) {
2576 if (locality == MDT_CROSS_LOCK) {
2577 ibits &= ~(MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM);
2578 ibits |= MDS_INODELOCK_LOOKUP;
2581 (MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM)),
2582 "%s: wrong bit "LPX64" for remote obj "DFID"\n",
2583 mdt_obd_name(info->mti_mdt), ibits,
2584 PFID(mdt_object_fid(o)));
2585 LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2587 /* No PDO lock on remote object */
2588 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2591 if (lh->mlh_type == MDT_PDO_LOCK) {
2592 /* check for exists after object is locked */
2593 if (mdt_object_exists(o) == 0) {
2594 /* Non-existent object shouldn't have PDO lock */
2597 /* Non-dir object shouldn't have PDO lock */
2598 if (!S_ISDIR(lu_object_attr(&o->mot_obj)))
2603 memset(policy, 0, sizeof(*policy));
2604 fid_build_reg_res_name(mdt_object_fid(o), res_id);
2606 dlmflags = LDLM_FL_ATOMIC_CB;
2608 dlmflags |= LDLM_FL_BLOCK_NOWAIT;
2611 * Take PDO lock on whole directory and build correct @res_id for lock
2612 * on part of directory.
2614 if (lh->mlh_pdo_hash != 0) {
2615 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2616 mdt_lock_pdo_mode(info, o, lh);
2617 if (lh->mlh_pdo_mode != LCK_NL) {
2619 * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2620 * is never going to be sent to client and we do not
2621 * want it slowed down due to possible cancels.
2623 policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2624 rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2625 policy, res_id, dlmflags,
2626 &info->mti_exp->exp_handle.h_cookie);
2632 * Finish res_id initializing by name hash marking part of
2633 * directory which is taking modification.
2635 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2638 policy->l_inodebits.bits = ibits;
2641 * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2642 * going to be sent to client. If it is - mdt_intent_policy() path will
2643 * fix it up and turn FL_LOCAL flag off.
2645 rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2646 res_id, LDLM_FL_LOCAL_ONLY | dlmflags,
2647 &info->mti_exp->exp_handle.h_cookie);
2649 mdt_object_unlock(info, o, lh, 1);
2650 else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2651 lh->mlh_pdo_hash != 0 &&
2652 (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2653 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
2659 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2660 struct mdt_lock_handle *lh, __u64 ibits, int locality)
2662 return mdt_object_lock0(info, o, lh, ibits, false, locality);
2665 int mdt_object_lock_try(struct mdt_thread_info *info, struct mdt_object *o,
2666 struct mdt_lock_handle *lh, __u64 ibits, int locality)
2668 struct mdt_lock_handle tmp = *lh;
2671 rc = mdt_object_lock0(info, o, &tmp, ibits, true, locality);
2679 * Save a lock within request object.
2681 * Keep the lock referenced until whether client ACK or transaction
2682 * commit happens or release the lock immediately depending on input
2683 * parameters. If COS is ON, a write lock is converted to COS lock
2686 * \param info thead info object
2687 * \param h lock handle
2688 * \param mode lock mode
2689 * \param decref force immediate lock releasing
2692 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2693 ldlm_mode_t mode, int decref)
2697 if (lustre_handle_is_used(h)) {
2698 if (decref || !info->mti_has_trans ||
2699 !(mode & (LCK_PW | LCK_EX))){
2700 mdt_fid_unlock(h, mode);
2702 struct mdt_device *mdt = info->mti_mdt;
2703 struct ldlm_lock *lock = ldlm_handle2lock(h);
2704 struct ptlrpc_request *req = mdt_info_req(info);
2707 LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2709 CDEBUG(D_HA, "request = %p reply state = %p"
2710 " transno = "LPD64"\n",
2711 req, req->rq_reply_state, req->rq_transno);
2712 if (mdt_cos_is_enabled(mdt)) {
2714 ldlm_lock_downgrade(lock, LCK_COS);
2717 ptlrpc_save_lock(req, h, mode, no_ack);
2718 if (mdt_is_lock_sync(lock)) {
2719 CDEBUG(D_HA, "found sync-lock,"
2720 " async commit started\n");
2721 mdt_device_commit_async(info->mti_env,
2724 LDLM_LOCK_PUT(lock);
2733 * Unlock mdt object.
2735 * Immeditely release the regular lock and the PDO lock or save the
2736 * lock in reqeuest and keep them referenced until client ACK or
2737 * transaction commit.
2739 * \param info thread info object
2740 * \param o mdt object
2741 * \param lh mdt lock handle referencing regular and PDO locks
2742 * \param decref force immediate lock releasing
2744 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2745 struct mdt_lock_handle *lh, int decref)
2749 mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2750 mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2752 if (lustre_handle_is_used(&lh->mlh_rreg_lh))
2753 ldlm_lock_decref(&lh->mlh_rreg_lh, lh->mlh_rreg_mode);
2758 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2759 const struct lu_fid *f,
2760 struct mdt_lock_handle *lh,
2763 struct mdt_object *o;
2765 o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2769 rc = mdt_object_lock(info, o, lh, ibits,
2772 mdt_object_put(info->mti_env, o);
2779 void mdt_object_unlock_put(struct mdt_thread_info * info,
2780 struct mdt_object * o,
2781 struct mdt_lock_handle *lh,
2784 mdt_object_unlock(info, o, lh, decref);
2785 mdt_object_put(info->mti_env, o);
2788 struct mdt_handler *mdt_handler_find(__u32 opc, struct mdt_opc_slice *supported)
2790 struct mdt_opc_slice *s;
2791 struct mdt_handler *h;
2794 for (s = supported; s->mos_hs != NULL; s++) {
2795 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2796 h = s->mos_hs + (opc - s->mos_opc_start);
2797 if (likely(h->mh_opc != 0))
2798 LASSERTF(h->mh_opc == opc,
2799 "opcode mismatch %d != %d\n",
2802 h = NULL; /* unsupported opc */
2809 static int mdt_lock_resname_compat(struct mdt_device *m,
2810 struct ldlm_request *req)
2812 /* XXX something... later. */
2816 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2818 /* XXX something... later. */
2823 * Generic code handling requests that have struct mdt_body passed in:
2825 * - extract mdt_body from request and save it in @info, if present;
2827 * - create lu_object, corresponding to the fid in mdt_body, and save it in
2830 * - if HABEO_CORPUS flag is set for this request type check whether object
2831 * actually exists on storage (lu_object_exists()).
2834 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2836 const struct mdt_body *body;
2837 struct mdt_object *obj;
2838 const struct lu_env *env;
2839 struct req_capsule *pill;
2843 env = info->mti_env;
2844 pill = info->mti_pill;
2846 body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2850 if (!(body->valid & OBD_MD_FLID))
2853 if (!fid_is_sane(&body->fid1)) {
2854 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2859 * Do not get size or any capa fields before we check that request
2860 * contains capa actually. There are some requests which do not, for
2861 * instance MDS_IS_SUBDIR.
2863 if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2864 req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2865 mdt_set_capainfo(info, 0, &body->fid1,
2866 req_capsule_client_get(pill, &RMF_CAPA1));
2868 obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2870 if ((flags & HABEO_CORPUS) &&
2871 !mdt_object_exists(obj)) {
2872 mdt_object_put(env, obj);
2873 /* for capability renew ENOENT will be handled in
2875 if (body->valid & OBD_MD_FLOSSCAPA)
2880 info->mti_object = obj;
2889 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2891 struct req_capsule *pill = info->mti_pill;
2895 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2896 rc = mdt_body_unpack(info, flags);
2900 if (rc == 0 && (flags & HABEO_REFERO)) {
2902 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2903 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2904 info->mti_body->eadatasize);
2905 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2906 req_capsule_set_size(pill, &RMF_LOGCOOKIES,
2909 rc = req_capsule_server_pack(pill);
2914 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2916 struct md_device *next = m->mdt_child;
2918 return next->md_ops->mdo_init_capa_ctxt(env, next,
2919 m->mdt_opts.mo_mds_capa,
2920 m->mdt_capa_timeout,
2926 * Invoke handler for this request opc. Also do necessary preprocessing
2927 * (according to handler ->mh_flags), and post-processing (setting of
2928 * ->last_{xid,committed}).
2930 static int mdt_req_handle(struct mdt_thread_info *info,
2931 struct mdt_handler *h, struct ptlrpc_request *req)
2933 int rc, serious = 0;
2938 LASSERT(h->mh_act != NULL);
2939 LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2940 LASSERT(current->journal_info == NULL);
2943 * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2944 * to put same checks into handlers like mdt_close(), mdt_reint(),
2945 * etc., without talking to mdt authors first. Checking same thing
2946 * there again is useless and returning 0 error without packing reply
2947 * is buggy! Handlers either pack reply or return error.
2949 * We return 0 here and do not send any reply in order to emulate
2950 * network failure. Do not send any reply in case any of NET related
2951 * fail_id has occured.
2953 if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2957 flags = h->mh_flags;
2958 LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2960 if (h->mh_fmt != NULL) {
2961 req_capsule_set(info->mti_pill, h->mh_fmt);
2962 rc = mdt_unpack_req_pack_rep(info, flags);
2965 if (rc == 0 && flags & MUTABOR &&
2966 exp_connect_flags(req->rq_export) & OBD_CONNECT_RDONLY)
2967 /* should it be rq_status? */
2970 if (rc == 0 && flags & HABEO_CLAVIS) {
2971 struct ldlm_request *dlm_req;
2973 LASSERT(h->mh_fmt != NULL);
2975 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2976 if (dlm_req != NULL) {
2977 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2979 dlm_req->lock_desc.l_policy_data.\
2980 l_inodebits.bits == 0)) {
2982 * Lock without inodebits makes no sense and
2983 * will oops later in ldlm. If client miss to
2984 * set such bits, do not trigger ASSERTION.
2986 * For liblustre flock case, it maybe zero.
2990 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2991 rc = mdt_lock_resname_compat(
2994 info->mti_dlm_req = dlm_req;
3001 /* capability setting changed via /proc, needs reinitialize ctxt */
3002 if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
3003 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
3004 info->mti_mdt->mdt_capa_conf = 0;
3007 if (likely(rc == 0)) {
3009 * Process request, there can be two types of rc:
3010 * 1) errors with msg unpack/pack, other failures outside the
3011 * operation itself. This is counted as serious errors;
3012 * 2) errors during fs operation, should be placed in rq_status
3015 rc = h->mh_act(info);
3017 !req->rq_no_reply && req->rq_reply_state == NULL) {
3018 DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
3019 "pack reply and returned 0 error\n",
3023 serious = is_serious(rc);
3024 rc = clear_serious(rc);
3028 req->rq_status = rc;
3031 * ELDLM_* codes which > 0 should be in rq_status only as well as
3032 * all non-serious errors.
3034 if (rc > 0 || !serious)
3037 LASSERT(current->journal_info == NULL);
3039 if (rc == 0 && (flags & HABEO_CLAVIS) &&
3040 info->mti_mdt->mdt_opts.mo_compat_resname) {
3041 struct ldlm_reply *dlmrep;
3043 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3045 rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
3048 /* If we're DISCONNECTing, the mdt_export_data is already freed */
3049 if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
3050 target_committed_to_req(req);
3052 if (unlikely(req_is_replay(req) &&
3053 lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
3054 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
3058 target_send_reply(req, rc, info->mti_fail_id);
3062 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
3064 lh->mlh_type = MDT_NUL_LOCK;
3065 lh->mlh_reg_lh.cookie = 0ull;
3066 lh->mlh_reg_mode = LCK_MINMODE;
3067 lh->mlh_pdo_lh.cookie = 0ull;
3068 lh->mlh_pdo_mode = LCK_MINMODE;
3069 lh->mlh_rreg_lh.cookie = 0ull;
3070 lh->mlh_rreg_mode = LCK_MINMODE;
3073 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
3075 LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
3076 LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
3080 * Initialize fields of struct mdt_thread_info. Other fields are left in
3081 * uninitialized state, because it's too expensive to zero out whole
3082 * mdt_thread_info (> 1K) on each request arrival.
3084 static void mdt_thread_info_init(struct ptlrpc_request *req,
3085 struct mdt_thread_info *info)
3089 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
3090 info->mti_pill = &req->rq_pill;
3093 for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3094 mdt_lock_handle_init(&info->mti_lh[i]);
3096 /* mdt device: it can be NULL while CONNECT */
3097 if (req->rq_export) {
3098 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
3099 info->mti_exp = req->rq_export;
3101 info->mti_mdt = NULL;
3102 info->mti_env = req->rq_svc_thread->t_env;
3103 info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
3104 info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
3105 info->mti_mos = NULL;
3107 memset(&info->mti_attr, 0, sizeof(info->mti_attr));
3108 info->mti_big_buf = LU_BUF_NULL;
3109 info->mti_body = NULL;
3110 info->mti_object = NULL;
3111 info->mti_dlm_req = NULL;
3112 info->mti_has_trans = 0;
3113 info->mti_cross_ref = 0;
3114 info->mti_opdata = 0;
3115 info->mti_big_lmm_used = 0;
3117 /* To not check for split by default. */
3118 info->mti_spec.no_create = 0;
3119 info->mti_spec.sp_rm_entry = 0;
3122 static void mdt_thread_info_fini(struct mdt_thread_info *info)
3126 req_capsule_fini(info->mti_pill);
3127 if (info->mti_object != NULL) {
3128 mdt_object_put(info->mti_env, info->mti_object);
3129 info->mti_object = NULL;
3132 for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3133 mdt_lock_handle_fini(&info->mti_lh[i]);
3134 info->mti_env = NULL;
3136 if (unlikely(info->mti_big_buf.lb_buf != NULL))
3137 lu_buf_free(&info->mti_big_buf);
3140 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
3141 struct obd_device *obd, int *process)
3143 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3144 case MDS_CONNECT: /* This will never get here, but for completeness. */
3145 case OST_CONNECT: /* This will never get here, but for completeness. */
3146 case MDS_DISCONNECT:
3147 case OST_DISCONNECT:
3153 case MDS_DONE_WRITING:
3154 case MDS_SYNC: /* used in unmounting */
3161 *process = target_queue_recovery_request(req, obd);
3165 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
3172 * Handle recovery. Return:
3173 * +1: continue request processing;
3174 * -ve: abort immediately with the given error code;
3175 * 0: send reply with error code in req->rq_status;
3177 static int mdt_recovery(struct mdt_thread_info *info)
3179 struct ptlrpc_request *req = mdt_info_req(info);
3180 struct obd_device *obd;
3184 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3187 case SEC_CTX_INIT_CONT:
3193 rc = mdt_handle_idmap(info);
3202 if (unlikely(!class_connected_export(req->rq_export))) {
3203 CDEBUG(D_HA, "operation %d on unconnected MDS from %s\n",
3204 lustre_msg_get_opc(req->rq_reqmsg),
3205 libcfs_id2str(req->rq_peer));
3206 /* FIXME: For CMD cleanup, when mds_B stop, the req from
3207 * mds_A will get -ENOTCONN(especially for ping req),
3208 * which will cause that mds_A deactive timeout, then when
3209 * mds_A cleanup, the cleanup process will be suspended since
3210 * deactive timeout is not zero.
3212 req->rq_status = -ENOTCONN;
3213 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
3217 /* sanity check: if the xid matches, the request must be marked as a
3218 * resent or replayed */
3219 if (req_xid_is_last(req)) {
3220 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
3221 (MSG_RESENT | MSG_REPLAY))) {
3222 DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
3223 "expected REPLAY or RESENT flag (%x)", req->rq_xid,
3224 lustre_msg_get_flags(req->rq_reqmsg));
3226 req->rq_status = -ENOTCONN;
3231 /* else: note the opposite is not always true; a RESENT req after a
3232 * failover will usually not match the last_xid, since it was likely
3233 * never committed. A REPLAYed request will almost never match the
3234 * last xid, however it could for a committed, but still retained,
3237 obd = req->rq_export->exp_obd;
3239 /* Check for aborted recovery... */
3240 if (unlikely(obd->obd_recovering)) {
3243 DEBUG_REQ(D_INFO, req, "Got new replay");
3244 rc = mdt_filter_recovery_request(req, obd, &should_process);
3245 if (rc != 0 || !should_process)
3247 else if (should_process < 0) {
3248 req->rq_status = should_process;
3249 rc = ptlrpc_error(req);
3256 static int mdt_msg_check_version(struct lustre_msg *msg)
3260 switch (lustre_msg_get_opc(msg)) {
3262 case MDS_DISCONNECT:
3265 case SEC_CTX_INIT_CONT:
3268 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
3270 CERROR("bad opc %u version %08x, expecting %08x\n",
3271 lustre_msg_get_opc(msg),
3272 lustre_msg_get_version(msg),
3273 LUSTRE_OBD_VERSION);
3277 case MDS_GETATTR_NAME:
3284 case MDS_DONE_WRITING:
3291 case MDS_HSM_PROGRESS:
3292 case MDS_HSM_REQUEST:
3293 case MDS_HSM_CT_REGISTER:
3294 case MDS_HSM_CT_UNREGISTER:
3295 case MDS_HSM_STATE_GET:
3296 case MDS_HSM_STATE_SET:
3297 case MDS_HSM_ACTION:
3298 case MDS_QUOTACHECK:
3301 case MDS_SWAP_LAYOUTS:
3306 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
3308 CERROR("bad opc %u version %08x, expecting %08x\n",
3309 lustre_msg_get_opc(msg),
3310 lustre_msg_get_version(msg),
3311 LUSTRE_MDS_VERSION);
3315 case LDLM_BL_CALLBACK:
3316 case LDLM_CP_CALLBACK:
3317 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
3319 CERROR("bad opc %u version %08x, expecting %08x\n",
3320 lustre_msg_get_opc(msg),
3321 lustre_msg_get_version(msg),
3322 LUSTRE_DLM_VERSION);
3324 case OBD_LOG_CANCEL:
3325 case LLOG_ORIGIN_HANDLE_CREATE:
3326 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
3327 case LLOG_ORIGIN_HANDLE_READ_HEADER:
3328 case LLOG_ORIGIN_HANDLE_CLOSE:
3329 case LLOG_ORIGIN_HANDLE_DESTROY:
3330 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
3332 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
3334 CERROR("bad opc %u version %08x, expecting %08x\n",
3335 lustre_msg_get_opc(msg),
3336 lustre_msg_get_version(msg),
3337 LUSTRE_LOG_VERSION);
3340 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
3346 static int mdt_handle0(struct ptlrpc_request *req,
3347 struct mdt_thread_info *info,
3348 struct mdt_opc_slice *supported)
3350 struct mdt_handler *h;
3351 struct lustre_msg *msg;
3356 if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
3359 LASSERT(current->journal_info == NULL);
3361 msg = req->rq_reqmsg;
3362 rc = mdt_msg_check_version(msg);
3363 if (likely(rc == 0)) {
3364 rc = mdt_recovery(info);
3365 if (likely(rc == +1)) {
3366 h = mdt_handler_find(lustre_msg_get_opc(msg),
3368 if (likely(h != NULL)) {
3369 rc = mdt_req_handle(info, h, req);
3371 CERROR("%s: opc unsupported: 0x%x\n",
3372 mdt_obd_name(info->mti_mdt),
3373 lustre_msg_get_opc(msg));
3374 req->rq_status = -ENOTSUPP;
3375 rc = ptlrpc_error(req);
3380 CDEBUG(D_INFO, "%s: drops mal-formed request: rc = %d\n",
3381 mdt_obd_name(info->mti_mdt), rc);
3382 req->rq_status = rc;
3383 rc = ptlrpc_error(req);
3389 * MDT handler function called by ptlrpc service thread when request comes.
3391 * XXX common "target" functionality should be factored into separate module
3392 * shared by mdt, ost and stand-alone services like fld.
3394 int mdt_handle_common(struct ptlrpc_request *req,
3395 struct mdt_opc_slice *supported)
3398 struct mdt_thread_info *info;
3402 env = req->rq_svc_thread->t_env;
3403 /* Refill(initilize) the context(mdt_thread_info), in case it is
3404 * not initialized yet. Usually it happens during start up, after
3405 * MDS(ptlrpc threads) is start up, it gets the first CONNECT request,
3406 * before MDT_thread_info is initialized */
3408 LASSERT(env != NULL);
3409 LASSERT(env->le_ses != NULL);
3410 LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
3411 info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
3412 LASSERT(info != NULL);
3414 mdt_thread_info_init(req, info);
3416 rc = mdt_handle0(req, info, supported);
3418 mdt_thread_info_fini(info);
3423 * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
3426 int mdt_recovery_handle(struct ptlrpc_request *req)
3431 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3433 rc = mdt_handle_common(req, mdt_fld_handlers);
3436 rc = mdt_handle_common(req, mdt_seq_handlers);
3439 rc = mdt_handle_common(req, mdt_regular_handlers);
3461 static int mdt_intent_getattr(enum mdt_it_code opcode,
3462 struct mdt_thread_info *info,
3463 struct ldlm_lock **,
3465 static int mdt_intent_layout(enum mdt_it_code opcode,
3466 struct mdt_thread_info *info,
3467 struct ldlm_lock **,
3469 static int mdt_intent_reint(enum mdt_it_code opcode,
3470 struct mdt_thread_info *info,
3471 struct ldlm_lock **,
3474 static struct mdt_it_flavor {
3475 const struct req_format *it_fmt;
3477 int (*it_act)(enum mdt_it_code ,
3478 struct mdt_thread_info *,
3479 struct ldlm_lock **,
3482 } mdt_it_flavor[] = {
3484 .it_fmt = &RQF_LDLM_INTENT,
3485 /*.it_flags = HABEO_REFERO,*/
3487 .it_act = mdt_intent_reint,
3488 .it_reint = REINT_OPEN
3491 .it_fmt = &RQF_LDLM_INTENT,
3492 .it_flags = MUTABOR,
3493 .it_act = mdt_intent_reint,
3494 .it_reint = REINT_OPEN
3497 .it_fmt = &RQF_LDLM_INTENT,
3498 .it_flags = MUTABOR,
3499 .it_act = mdt_intent_reint,
3500 .it_reint = REINT_CREATE
3502 [MDT_IT_GETATTR] = {
3503 .it_fmt = &RQF_LDLM_INTENT_GETATTR,
3504 .it_flags = HABEO_REFERO,
3505 .it_act = mdt_intent_getattr
3507 [MDT_IT_READDIR] = {
3513 .it_fmt = &RQF_LDLM_INTENT_GETATTR,
3514 .it_flags = HABEO_REFERO,
3515 .it_act = mdt_intent_getattr
3518 .it_fmt = &RQF_LDLM_INTENT_UNLINK,
3519 .it_flags = MUTABOR,
3521 .it_reint = REINT_UNLINK
3525 .it_flags = MUTABOR,
3528 [MDT_IT_GETXATTR] = {
3534 .it_fmt = &RQF_LDLM_INTENT_LAYOUT,
3536 .it_act = mdt_intent_layout
3540 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3541 struct ldlm_lock **lockp,
3542 struct ldlm_lock *new_lock,
3543 struct mdt_lock_handle *lh,
3546 struct ptlrpc_request *req = mdt_info_req(info);
3547 struct ldlm_lock *lock = *lockp;
3550 * Get new lock only for cases when possible resent did not find any
3553 if (new_lock == NULL)
3554 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3556 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3557 lh->mlh_reg_lh.cookie = 0;
3561 LASSERTF(new_lock != NULL,
3562 "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3565 * If we've already given this lock to a client once, then we should
3566 * have no readers or writers. Otherwise, we should have one reader
3567 * _or_ writer ref (which will be zeroed below) before returning the
3570 if (new_lock->l_export == req->rq_export) {
3571 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3573 LASSERT(new_lock->l_export == NULL);
3574 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3579 if (new_lock->l_export == req->rq_export) {
3581 * Already gave this to the client, which means that we
3582 * reconstructed a reply.
3584 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3586 lh->mlh_reg_lh.cookie = 0;
3587 RETURN(ELDLM_LOCK_REPLACED);
3591 * Fixup the lock to be given to the client.
3593 lock_res_and_lock(new_lock);
3594 /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3595 * possible blocking AST. */
3596 while (new_lock->l_readers > 0) {
3597 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3598 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3599 new_lock->l_readers--;
3601 while (new_lock->l_writers > 0) {
3602 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3603 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3604 new_lock->l_writers--;
3607 new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
3608 new_lock->l_blocking_ast = lock->l_blocking_ast;
3609 new_lock->l_completion_ast = lock->l_completion_ast;
3610 new_lock->l_remote_handle = lock->l_remote_handle;
3611 new_lock->l_flags &= ~LDLM_FL_LOCAL;
3613 unlock_res_and_lock(new_lock);
3615 cfs_hash_add(new_lock->l_export->exp_lock_hash,
3616 &new_lock->l_remote_handle,
3617 &new_lock->l_exp_hash);
3619 LDLM_LOCK_RELEASE(new_lock);
3620 lh->mlh_reg_lh.cookie = 0;
3622 RETURN(ELDLM_LOCK_REPLACED);
3625 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3626 struct ldlm_lock *new_lock,
3627 struct ldlm_lock **old_lock,
3628 struct mdt_lock_handle *lh)
3630 struct ptlrpc_request *req = mdt_info_req(info);
3631 struct obd_export *exp = req->rq_export;
3632 struct lustre_handle remote_hdl;
3633 struct ldlm_request *dlmreq;
3634 struct ldlm_lock *lock;
3636 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3639 dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3640 remote_hdl = dlmreq->lock_handle[0];
3642 /* In the function below, .hs_keycmp resolves to
3643 * ldlm_export_lock_keycmp() */
3644 /* coverity[overrun-buffer-val] */
3645 lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3647 if (lock != new_lock) {
3648 lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3649 lh->mlh_reg_mode = lock->l_granted_mode;
3651 LDLM_DEBUG(lock, "Restoring lock cookie");
3652 DEBUG_REQ(D_DLMTRACE, req,
3653 "restoring lock cookie "LPX64,
3654 lh->mlh_reg_lh.cookie);
3656 *old_lock = LDLM_LOCK_GET(lock);
3657 cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3661 cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3665 * If the xid matches, then we know this is a resent request, and allow
3666 * it. (It's probably an OPEN, for which we don't send a lock.
3668 if (req_xid_is_last(req))
3672 * This remote handle isn't enqueued, so we never received or processed
3673 * this request. Clear MSG_RESENT, because it can be handled like any
3674 * normal request now.
3676 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3678 DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
3682 static int mdt_intent_getattr(enum mdt_it_code opcode,
3683 struct mdt_thread_info *info,
3684 struct ldlm_lock **lockp,
3687 struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
3688 struct ldlm_lock *new_lock = NULL;
3690 struct ldlm_reply *ldlm_rep;
3691 struct ptlrpc_request *req;
3692 struct mdt_body *reqbody;
3693 struct mdt_body *repbody;
3697 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
3700 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
3703 info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
3704 repbody->eadatasize = 0;
3705 repbody->aclsize = 0;
3709 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM;
3711 case MDT_IT_GETATTR:
3712 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
3716 CERROR("Unsupported intent (%d)\n", opcode);
3717 GOTO(out_shrink, rc = -EINVAL);
3720 rc = mdt_init_ucred(info, reqbody);
3722 GOTO(out_shrink, rc);
3724 req = info->mti_pill->rc_req;