4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mdt/mdt_handler.c
38 * Lustre Metadata Target (mdt) request handler
40 * Author: Peter Braam <braam@clusterfs.com>
41 * Author: Andreas Dilger <adilger@clusterfs.com>
42 * Author: Phil Schwan <phil@clusterfs.com>
43 * Author: Mike Shaver <shaver@clusterfs.com>
44 * Author: Nikita Danilov <nikita@clusterfs.com>
45 * Author: Huang Hua <huanghua@clusterfs.com>
46 * Author: Yury Umanets <umka@clusterfs.com>
49 #define DEBUG_SUBSYSTEM S_MDS
51 #include <linux/module.h>
53 * struct OBD_{ALLOC,FREE}*()
55 #include <obd_support.h>
56 /* struct ptlrpc_request */
57 #include <lustre_net.h>
58 /* struct obd_export */
59 #include <lustre_export.h>
60 /* struct obd_device */
63 #include <dt_object.h>
64 #include <lustre_mds.h>
65 #include <lustre_mdt.h>
66 #include <lustre_log.h>
67 #include "mdt_internal.h"
68 #include <lustre_acl.h>
69 #include <lustre_param.h>
70 #include <lustre_quota.h>
72 mdl_mode_t mdt_mdl_lock_modes[] = {
73 [LCK_MINMODE] = MDL_MINMODE,
80 [LCK_GROUP] = MDL_GROUP
83 ldlm_mode_t mdt_dlm_lock_modes[] = {
84 [MDL_MINMODE] = LCK_MINMODE,
91 [MDL_GROUP] = LCK_GROUP
95 * Initialized in mdt_mod_init().
97 static unsigned long mdt_num_threads;
98 CFS_MODULE_PARM(mdt_num_threads, "ul", ulong, 0444,
99 "number of MDS service threads to start "
100 "(deprecated in favor of mds_num_threads)");
102 static unsigned long mds_num_threads;
103 CFS_MODULE_PARM(mds_num_threads, "ul", ulong, 0444,
104 "number of MDS service threads to start");
106 static char *mds_num_cpts;
107 CFS_MODULE_PARM(mds_num_cpts, "c", charp, 0444,
108 "CPU partitions MDS threads should run on");
110 static unsigned long mds_rdpg_num_threads;
111 CFS_MODULE_PARM(mds_rdpg_num_threads, "ul", ulong, 0444,
112 "number of MDS readpage service threads to start");
114 static char *mds_rdpg_num_cpts;
115 CFS_MODULE_PARM(mds_rdpg_num_cpts, "c", charp, 0444,
116 "CPU partitions MDS readpage threads should run on");
118 /* NB: these two should be removed along with setattr service in the future */
119 static unsigned long mds_attr_num_threads;
120 CFS_MODULE_PARM(mds_attr_num_threads, "ul", ulong, 0444,
121 "number of MDS setattr service threads to start");
123 static char *mds_attr_num_cpts;
124 CFS_MODULE_PARM(mds_attr_num_cpts, "c", charp, 0444,
125 "CPU partitions MDS setattr threads should run on");
127 /* ptlrpc request handler for MDT. All handlers are
128 * grouped into several slices - struct mdt_opc_slice,
129 * and stored in an array - mdt_handlers[].
132 /* The name of this handler. */
134 /* Fail id for this handler, checked at the beginning of this handler*/
136 /* Operation code for this handler */
138 /* flags are listed in enum mdt_handler_flags below. */
140 /* The actual handler function to execute. */
141 int (*mh_act)(struct mdt_thread_info *info);
142 /* Request format for this request. */
143 const struct req_format *mh_fmt;
146 enum mdt_handler_flags {
148 * struct mdt_body is passed in the incoming message, and object
149 * identified by this fid exists on disk.
151 * "habeo corpus" == "I have a body"
153 HABEO_CORPUS = (1 << 0),
155 * struct ldlm_request is passed in the incoming message.
157 * "habeo clavis" == "I have a key"
159 HABEO_CLAVIS = (1 << 1),
161 * this request has fixed reply format, so that reply message can be
162 * packed by generic code.
164 * "habeo refero" == "I have a reply"
166 HABEO_REFERO = (1 << 2),
168 * this request will modify something, so check whether the filesystem
169 * is readonly or not, then return -EROFS to client asap if necessary.
171 * "mutabor" == "I shall modify"
176 struct mdt_opc_slice {
179 struct mdt_handler *mos_hs;
182 static struct mdt_opc_slice mdt_regular_handlers[];
183 static struct mdt_opc_slice mdt_readpage_handlers[];
184 static struct mdt_opc_slice mdt_xmds_handlers[];
185 static struct mdt_opc_slice mdt_seq_handlers[];
186 static struct mdt_opc_slice mdt_fld_handlers[];
188 static struct mdt_device *mdt_dev(struct lu_device *d);
189 static int mdt_regular_handle(struct ptlrpc_request *req);
190 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
191 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
192 struct getinfo_fid2path *fp);
194 static const struct lu_object_operations mdt_obj_ops;
196 /* Slab for MDT object allocation */
197 static cfs_mem_cache_t *mdt_object_kmem;
199 static struct lu_kmem_descr mdt_caches[] = {
201 .ckd_cache = &mdt_object_kmem,
202 .ckd_name = "mdt_obj",
203 .ckd_size = sizeof(struct mdt_object)
210 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
214 return (rep->lock_policy_res1 & flag);
217 void mdt_clear_disposition(struct mdt_thread_info *info,
218 struct ldlm_reply *rep, int flag)
221 info->mti_opdata &= ~flag;
223 rep->lock_policy_res1 &= ~flag;
226 void mdt_set_disposition(struct mdt_thread_info *info,
227 struct ldlm_reply *rep, int flag)
230 info->mti_opdata |= flag;
232 rep->lock_policy_res1 |= flag;
235 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
237 lh->mlh_pdo_hash = 0;
238 lh->mlh_reg_mode = lm;
239 lh->mlh_type = MDT_REG_LOCK;
242 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
243 const char *name, int namelen)
245 lh->mlh_reg_mode = lm;
246 lh->mlh_type = MDT_PDO_LOCK;
248 if (name != NULL && (name[0] != '\0')) {
249 LASSERT(namelen > 0);
250 lh->mlh_pdo_hash = full_name_hash(name, namelen);
252 LASSERT(namelen == 0);
253 lh->mlh_pdo_hash = 0ull;
257 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
258 struct mdt_lock_handle *lh)
264 * Any dir access needs couple of locks:
266 * 1) on part of dir we gonna take lookup/modify;
268 * 2) on whole dir to protect it from concurrent splitting and/or to
269 * flush client's cache for readdir().
271 * so, for a given mode and object this routine decides what lock mode
272 * to use for lock #2:
274 * 1) if caller's gonna lookup in dir then we need to protect dir from
275 * being splitted only - LCK_CR
277 * 2) if caller's gonna modify dir then we need to protect dir from
278 * being splitted and to flush cache - LCK_CW
280 * 3) if caller's gonna modify dir and that dir seems ready for
281 * splitting then we need to protect it from any type of access
282 * (lookup/modify/split) - LCK_EX --bzzz
285 LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
286 LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
289 * Ask underlaying level its opinion about preferable PDO lock mode
290 * having access type passed as regular lock mode:
292 * - MDL_MINMODE means that lower layer does not want to specify lock
295 * - MDL_NL means that no PDO lock should be taken. This is used in some
296 * cases. Say, for non-splittable directories no need to use PDO locks
299 mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
300 mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
302 if (mode != MDL_MINMODE) {
303 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
306 * Lower layer does not want to specify locking mode. We do it
307 * our selves. No special protection is needed, just flush
308 * client's cache on modification and allow concurrent
311 switch (lh->mlh_reg_mode) {
313 lh->mlh_pdo_mode = LCK_EX;
316 lh->mlh_pdo_mode = LCK_CR;
319 lh->mlh_pdo_mode = LCK_CW;
322 CERROR("Not expected lock type (0x%x)\n",
323 (int)lh->mlh_reg_mode);
328 LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
332 static int mdt_getstatus(struct mdt_thread_info *info)
334 struct mdt_device *mdt = info->mti_mdt;
335 struct md_device *next = mdt->mdt_child;
336 struct mdt_body *repbody;
341 rc = mdt_check_ucred(info);
343 RETURN(err_serious(rc));
345 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
346 RETURN(err_serious(-ENOMEM));
348 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
349 rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
353 repbody->valid |= OBD_MD_FLID;
355 if (mdt->mdt_opts.mo_mds_capa &&
356 info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
357 struct mdt_object *root;
358 struct lustre_capa *capa;
360 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
362 RETURN(PTR_ERR(root));
364 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
366 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
367 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
369 mdt_object_put(info->mti_env, root);
371 repbody->valid |= OBD_MD_FLMDSCAPA;
377 static int mdt_statfs(struct mdt_thread_info *info)
379 struct ptlrpc_request *req = mdt_info_req(info);
380 struct md_device *next = info->mti_mdt->mdt_child;
381 struct ptlrpc_service_part *svcpt;
382 struct obd_statfs *osfs;
387 svcpt = info->mti_pill->rc_req->rq_rqbd->rqbd_svcpt;
389 /* This will trigger a watchdog timeout */
390 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
391 (MDT_SERVICE_WATCHDOG_FACTOR *
392 at_get(&svcpt->scp_at_estimate)) + 1);
394 rc = mdt_check_ucred(info);
396 RETURN(err_serious(rc));
398 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
399 RETURN(err_serious(-ENOMEM));
401 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
405 /** statfs information are cached in the mdt_device */
406 if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age,
407 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
408 /** statfs data is too old, get up-to-date one */
409 rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
412 cfs_spin_lock(&info->mti_mdt->mdt_osfs_lock);
413 info->mti_mdt->mdt_osfs = *osfs;
414 info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
415 cfs_spin_unlock(&info->mti_mdt->mdt_osfs_lock);
417 /** use cached statfs data */
418 cfs_spin_lock(&info->mti_mdt->mdt_osfs_lock);
419 *osfs = info->mti_mdt->mdt_osfs;
420 cfs_spin_unlock(&info->mti_mdt->mdt_osfs_lock);
424 mdt_counter_incr(req, LPROC_MDT_STATFS);
430 * Pack SOM attributes into the reply.
431 * Call under a DLM UPDATE lock.
433 static void mdt_pack_size2body(struct mdt_thread_info *info,
434 struct mdt_object *mo)
437 struct md_attr *ma = &info->mti_attr;
439 LASSERT(ma->ma_attr.la_valid & LA_MODE);
440 b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
442 /* Check if Size-on-MDS is supported, if this is a regular file,
443 * if SOM is enabled on the object and if SOM cache exists and valid.
444 * Otherwise do not pack Size-on-MDS attributes to the reply. */
445 if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
446 !S_ISREG(ma->ma_attr.la_mode) ||
447 !mdt_object_is_som_enabled(mo) ||
448 !(ma->ma_valid & MA_SOM))
451 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
452 b->size = ma->ma_som->msd_size;
453 b->blocks = ma->ma_som->msd_blocks;
456 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
457 const struct lu_attr *attr, const struct lu_fid *fid)
459 struct md_attr *ma = &info->mti_attr;
461 LASSERT(ma->ma_valid & MA_INODE);
463 b->atime = attr->la_atime;
464 b->mtime = attr->la_mtime;
465 b->ctime = attr->la_ctime;
466 b->mode = attr->la_mode;
467 b->size = attr->la_size;
468 b->blocks = attr->la_blocks;
469 b->uid = attr->la_uid;
470 b->gid = attr->la_gid;
471 b->flags = attr->la_flags;
472 b->nlink = attr->la_nlink;
473 b->rdev = attr->la_rdev;
475 /*XXX should pack the reply body according to lu_valid*/
476 b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID |
477 OBD_MD_FLGID | OBD_MD_FLTYPE |
478 OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
479 OBD_MD_FLATIME | OBD_MD_FLMTIME ;
481 if (!S_ISREG(attr->la_mode)) {
482 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
483 } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
484 /* means no objects are allocated on osts. */
485 LASSERT(!(ma->ma_valid & MA_LOV));
486 /* just ignore blocks occupied by extend attributes on MDS */
488 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
489 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
494 b->valid |= OBD_MD_FLID;
496 /* FIXME: these should be fixed when new igif ready.*/
497 b->ino = fid_oid(fid); /* 1.6 compatibility */
498 b->generation = fid_ver(fid); /* 1.6 compatibility */
499 b->valid |= OBD_MD_FLGENER; /* 1.6 compatibility */
501 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
502 PFID(fid), b->nlink, b->mode, b->size);
506 mdt_body_reverse_idmap(info, b);
508 if (b->valid & OBD_MD_FLSIZE)
509 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
510 PFID(fid), (unsigned long long)b->size);
513 static inline int mdt_body_has_lov(const struct lu_attr *la,
514 const struct mdt_body *body)
516 return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
517 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
520 void mdt_client_compatibility(struct mdt_thread_info *info)
522 struct mdt_body *body;
523 struct ptlrpc_request *req = mdt_info_req(info);
524 struct obd_export *exp = req->rq_export;
525 struct md_attr *ma = &info->mti_attr;
526 struct lu_attr *la = &ma->ma_attr;
529 if (exp->exp_connect_flags & OBD_CONNECT_LAYOUTLOCK)
530 /* the client can deal with 16-bit lmm_stripe_count */
533 body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
535 if (!mdt_body_has_lov(la, body))
538 /* now we have a reply with a lov for a client not compatible with the
539 * layout lock so we have to clean the layout generation number */
540 if (S_ISREG(la->la_mode))
541 ma->ma_lmm->lmm_layout_gen = 0;
545 static int mdt_big_lmm_get(const struct lu_env *env, struct mdt_object *o,
548 struct mdt_thread_info *info;
552 info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
553 LASSERT(info != NULL);
554 LASSERT(ma->ma_lmm_size > 0);
555 LASSERT(info->mti_big_lmm_used == 0);
556 rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL,
561 /* big_lmm may need to be grown */
562 if (info->mti_big_lmmsize < rc) {
563 int size = size_roundup_power2(rc);
565 if (info->mti_big_lmmsize > 0) {
566 /* free old buffer */
567 LASSERT(info->mti_big_lmm);
568 OBD_FREE_LARGE(info->mti_big_lmm,
569 info->mti_big_lmmsize);
570 info->mti_big_lmm = NULL;
571 info->mti_big_lmmsize = 0;
574 OBD_ALLOC_LARGE(info->mti_big_lmm, size);
575 if (info->mti_big_lmm == NULL)
577 info->mti_big_lmmsize = size;
579 LASSERT(info->mti_big_lmmsize >= rc);
581 info->mti_buf.lb_buf = info->mti_big_lmm;
582 info->mti_buf.lb_len = info->mti_big_lmmsize;
583 rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf,
588 info->mti_big_lmm_used = 1;
589 ma->ma_valid |= MA_LOV;
590 ma->ma_lmm = info->mti_big_lmm;
591 ma->ma_lmm_size = rc;
593 /* update mdt_max_mdsize so all clients will be aware about that */
594 if (info->mti_mdt->mdt_max_mdsize < rc)
595 info->mti_mdt->mdt_max_mdsize = rc;
600 int mdt_attr_get_lov(struct mdt_thread_info *info,
601 struct mdt_object *o, struct md_attr *ma)
603 struct md_object *next = mdt_object_child(o);
604 struct lu_buf *buf = &info->mti_buf;
607 buf->lb_buf = ma->ma_lmm;
608 buf->lb_len = ma->ma_lmm_size;
609 rc = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_LOV);
611 ma->ma_lmm_size = rc;
612 ma->ma_valid |= MA_LOV;
614 } else if (rc == -ENODATA) {
617 } else if (rc == -ERANGE) {
618 rc = mdt_big_lmm_get(info->mti_env, o, ma);
624 int mdt_attr_get_complex(struct mdt_thread_info *info,
625 struct mdt_object *o, struct md_attr *ma)
627 const struct lu_env *env = info->mti_env;
628 struct md_object *next = mdt_object_child(o);
629 struct lu_buf *buf = &info->mti_buf;
630 u32 mode = lu_object_attr(&next->mo_lu);
631 int need = ma->ma_need;
635 /* do we really need PFID */
636 LASSERT((ma->ma_need & MA_PFID) == 0);
640 if (need & MA_INODE) {
641 ma->ma_need = MA_INODE;
642 rc = mo_attr_get(env, next, ma);
645 ma->ma_valid |= MA_INODE;
648 if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
649 rc = mdt_attr_get_lov(info, o, ma);
654 if (need & MA_LMV && S_ISDIR(mode)) {
655 buf->lb_buf = ma->ma_lmv;
656 buf->lb_len = ma->ma_lmv_size;
657 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
659 ma->ma_lmv_size = rc2;
660 ma->ma_valid |= MA_LMV;
661 } else if (rc2 == -ENODATA) {
669 if (rc == 0 && S_ISREG(mode) && (need & (MA_HSM | MA_SOM))) {
670 struct lustre_mdt_attrs *lma;
672 lma = (struct lustre_mdt_attrs *)info->mti_xattr_buf;
673 CLASSERT(sizeof(*lma) <= sizeof(info->mti_xattr_buf));
676 buf->lb_len = sizeof(info->mti_xattr_buf);
677 rc = mo_xattr_get(env, next, buf, XATTR_NAME_LMA);
679 lustre_lma_swab(lma);
680 /* Swab and copy LMA */
682 if (lma->lma_compat & LMAC_HSM)
683 ma->ma_hsm.mh_flags =
684 lma->lma_flags & HSM_FLAGS_MASK;
686 ma->ma_hsm.mh_flags = 0;
687 ma->ma_valid |= MA_HSM;
690 if (need & MA_SOM && lma->lma_compat & LMAC_SOM) {
691 LASSERT(ma->ma_som != NULL);
692 ma->ma_som->msd_ioepoch = lma->lma_ioepoch;
693 ma->ma_som->msd_size = lma->lma_som_size;
694 ma->ma_som->msd_blocks = lma->lma_som_blocks;
695 ma->ma_som->msd_mountid = lma->lma_som_mountid;
696 ma->ma_valid |= MA_SOM;
699 } else if (rc == -ENODATA) {
704 #ifdef CONFIG_FS_POSIX_ACL
705 if (need & MA_ACL_DEF && S_ISDIR(mode)) {
706 buf->lb_buf = ma->ma_acl;
707 buf->lb_len = ma->ma_acl_size;
708 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
710 ma->ma_acl_size = rc2;
711 ma->ma_valid |= MA_ACL_DEF;
712 } else if (rc2 == -ENODATA) {
721 CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
722 rc, ma->ma_valid, ma->ma_lmm);
726 static int mdt_getattr_internal(struct mdt_thread_info *info,
727 struct mdt_object *o, int ma_need)
729 struct md_object *next = mdt_object_child(o);
730 const struct mdt_body *reqbody = info->mti_body;
731 struct ptlrpc_request *req = mdt_info_req(info);
732 struct md_attr *ma = &info->mti_attr;
733 struct lu_attr *la = &ma->ma_attr;
734 struct req_capsule *pill = info->mti_pill;
735 const struct lu_env *env = info->mti_env;
736 struct mdt_body *repbody;
737 struct lu_buf *buffer = &info->mti_buf;
742 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
743 RETURN(err_serious(-ENOMEM));
745 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
749 rc = mdt_object_exists(o);
751 /* This object is located on remote node.*/
752 repbody->fid1 = *mdt_object_fid(o);
753 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
757 buffer->lb_len = reqbody->eadatasize;
758 if (buffer->lb_len > 0)
759 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
761 buffer->lb_buf = NULL;
763 /* If it is dir object and client require MEA, then we got MEA */
764 if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
765 reqbody->valid & OBD_MD_MEA) {
766 /* Assumption: MDT_MD size is enough for lmv size. */
767 ma->ma_lmv = buffer->lb_buf;
768 ma->ma_lmv_size = buffer->lb_len;
769 ma->ma_need = MA_LMV | MA_INODE;
771 ma->ma_lmm = buffer->lb_buf;
772 ma->ma_lmm_size = buffer->lb_len;
773 ma->ma_need = MA_LOV | MA_INODE;
776 if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
777 reqbody->valid & OBD_MD_FLDIREA &&
778 lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
779 /* get default stripe info for this dir. */
780 ma->ma_need |= MA_LOV_DEF;
782 ma->ma_need |= ma_need;
783 if (ma->ma_need & MA_SOM)
784 ma->ma_som = &info->mti_u.som.data;
786 rc = mdt_attr_get_complex(info, o, ma);
788 CERROR("getattr error for "DFID": %d\n",
789 PFID(mdt_object_fid(o)), rc);
793 is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid);
795 /* the Lustre protocol supposes to return default striping
796 * on the user-visible root if explicitly requested */
797 if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) &&
798 (ma->ma_need & MA_LOV_DEF && is_root) && (ma->ma_need & MA_LOV)) {
799 struct lu_fid rootfid;
800 struct mdt_object *root;
801 struct mdt_device *mdt = info->mti_mdt;
803 rc = dt_root_get(env, mdt->mdt_bottom, &rootfid);
806 root = mdt_object_find(env, mdt, &rootfid);
808 RETURN(PTR_ERR(root));
809 rc = mdt_attr_get_lov(info, root, ma);
810 mdt_object_put(info->mti_env, root);
812 CERROR("getattr error for "DFID": %d\n",
813 PFID(mdt_object_fid(o)), rc);
818 if (likely(ma->ma_valid & MA_INODE))
819 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
823 if (mdt_body_has_lov(la, reqbody)) {
824 if (ma->ma_valid & MA_LOV) {
825 LASSERT(ma->ma_lmm_size);
826 mdt_dump_lmm(D_INFO, ma->ma_lmm);
827 repbody->eadatasize = ma->ma_lmm_size;
828 if (S_ISDIR(la->la_mode))
829 repbody->valid |= OBD_MD_FLDIREA;
831 repbody->valid |= OBD_MD_FLEASIZE;
833 if (ma->ma_valid & MA_LMV) {
834 LASSERT(S_ISDIR(la->la_mode));
835 repbody->eadatasize = ma->ma_lmv_size;
836 repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
838 } else if (S_ISLNK(la->la_mode) &&
839 reqbody->valid & OBD_MD_LINKNAME) {
840 buffer->lb_buf = ma->ma_lmm;
841 /* eadatasize from client includes NULL-terminator, so
842 * there is no need to read it */
843 buffer->lb_len = reqbody->eadatasize - 1;
844 rc = mo_readlink(env, next, buffer);
845 if (unlikely(rc <= 0)) {
846 CERROR("readlink failed: %d\n", rc);
849 int print_limit = min_t(int, CFS_PAGE_SIZE - 128, rc);
851 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
853 repbody->valid |= OBD_MD_LINKNAME;
854 /* we need to report back size with NULL-terminator
855 * because client expects that */
856 repbody->eadatasize = rc + 1;
857 if (repbody->eadatasize != reqbody->eadatasize)
858 CERROR("Read shorter symlink %d, expected %d\n",
859 rc, reqbody->eadatasize - 1);
861 ((char *)ma->ma_lmm)[rc] = 0;
863 /* If the total CDEBUG() size is larger than a page, it
864 * will print a warning to the console, avoid this by
865 * printing just the last part of the symlink. */
866 CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
867 print_limit < rc ? "..." : "", print_limit,
868 (char *)ma->ma_lmm + rc - print_limit, rc);
873 if (reqbody->valid & OBD_MD_FLMODEASIZE) {
874 repbody->max_cookiesize = 0;
875 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
876 repbody->valid |= OBD_MD_FLMODEASIZE;
877 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
878 "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
879 repbody->max_cookiesize);
882 if (exp_connect_rmtclient(info->mti_exp) &&
883 reqbody->valid & OBD_MD_FLRMTPERM) {
884 void *buf = req_capsule_server_get(pill, &RMF_ACL);
886 /* mdt_getattr_lock only */
887 rc = mdt_pack_remote_perm(info, o, buf);
889 repbody->valid &= ~OBD_MD_FLRMTPERM;
890 repbody->aclsize = 0;
893 repbody->valid |= OBD_MD_FLRMTPERM;
894 repbody->aclsize = sizeof(struct mdt_remote_perm);
897 #ifdef CONFIG_FS_POSIX_ACL
898 else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
899 (reqbody->valid & OBD_MD_FLACL)) {
900 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
901 buffer->lb_len = req_capsule_get_size(pill,
902 &RMF_ACL, RCL_SERVER);
903 if (buffer->lb_len > 0) {
904 rc = mo_xattr_get(env, next, buffer,
905 XATTR_NAME_ACL_ACCESS);
907 if (rc == -ENODATA) {
908 repbody->aclsize = 0;
909 repbody->valid |= OBD_MD_FLACL;
911 } else if (rc == -EOPNOTSUPP) {
914 CERROR("got acl size: %d\n", rc);
917 repbody->aclsize = rc;
918 repbody->valid |= OBD_MD_FLACL;
925 if (reqbody->valid & OBD_MD_FLMDSCAPA &&
926 info->mti_mdt->mdt_opts.mo_mds_capa &&
927 info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
928 struct lustre_capa *capa;
930 capa = req_capsule_server_get(pill, &RMF_CAPA1);
932 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
933 rc = mo_capa_get(env, next, capa, 0);
936 repbody->valid |= OBD_MD_FLMDSCAPA;
941 mdt_counter_incr(req, LPROC_MDT_GETATTR);
946 static int mdt_renew_capa(struct mdt_thread_info *info)
948 struct mdt_object *obj = info->mti_object;
949 struct mdt_body *body;
950 struct lustre_capa *capa, *c;
954 /* if object doesn't exist, or server has disabled capability,
955 * return directly, client will find body->valid OBD_MD_FLOSSCAPA
958 if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
959 !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
962 body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
963 LASSERT(body != NULL);
965 c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
968 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
972 rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
974 body->valid |= OBD_MD_FLOSSCAPA;
978 static int mdt_getattr(struct mdt_thread_info *info)
980 struct mdt_object *obj = info->mti_object;
981 struct req_capsule *pill = info->mti_pill;
982 struct mdt_body *reqbody;
983 struct mdt_body *repbody;
988 reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
991 if (reqbody->valid & OBD_MD_FLOSSCAPA) {
992 rc = req_capsule_server_pack(pill);
994 RETURN(err_serious(rc));
995 rc = mdt_renew_capa(info);
996 GOTO(out_shrink, rc);
999 LASSERT(obj != NULL);
1000 LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
1002 mode = lu_object_attr(&obj->mot_obj.mo_lu);
1004 /* old clients may not report needed easize, use max value then */
1005 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1006 reqbody->eadatasize == 0 ?
1007 info->mti_mdt->mdt_max_mdsize :
1008 reqbody->eadatasize);
1010 rc = req_capsule_server_pack(pill);
1011 if (unlikely(rc != 0))
1012 RETURN(err_serious(rc));
1014 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1015 LASSERT(repbody != NULL);
1016 repbody->eadatasize = 0;
1017 repbody->aclsize = 0;
1019 if (reqbody->valid & OBD_MD_FLRMTPERM)
1020 rc = mdt_init_ucred(info, reqbody);
1022 rc = mdt_check_ucred(info);
1024 GOTO(out_shrink, rc);
1026 info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1027 info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1030 * Don't check capability at all, because rename might getattr for
1031 * remote obj, and at that time no capability is available.
1033 mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
1034 rc = mdt_getattr_internal(info, obj, 0);
1035 if (reqbody->valid & OBD_MD_FLRMTPERM)
1036 mdt_exit_ucred(info);
1039 mdt_client_compatibility(info);
1040 rc2 = mdt_fix_reply(info);
1046 static int mdt_is_subdir(struct mdt_thread_info *info)
1048 struct mdt_object *o = info->mti_object;
1049 struct req_capsule *pill = info->mti_pill;
1050 const struct mdt_body *body = info->mti_body;
1051 struct mdt_body *repbody;
1057 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1060 * We save last checked parent fid to @repbody->fid1 for remote
1063 LASSERT(fid_is_sane(&body->fid2));
1064 LASSERT(mdt_object_exists(o) > 0);
1065 rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
1066 &body->fid2, &repbody->fid1);
1067 if (rc == 0 || rc == -EREMOTE)
1068 repbody->valid |= OBD_MD_FLID;
1073 static int mdt_raw_lookup(struct mdt_thread_info *info,
1074 struct mdt_object *parent,
1075 const struct lu_name *lname,
1076 struct ldlm_reply *ldlm_rep)
1078 struct md_object *next = mdt_object_child(info->mti_object);
1079 const struct mdt_body *reqbody = info->mti_body;
1080 struct lu_fid *child_fid = &info->mti_tmp_fid1;
1081 struct mdt_body *repbody;
1085 if (reqbody->valid != OBD_MD_FLID)
1088 LASSERT(!info->mti_cross_ref);
1090 /* Only got the fid of this obj by name */
1091 fid_zero(child_fid);
1092 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1095 /* XXX is raw_lookup possible as intent operation? */
1098 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
1101 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1103 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1106 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1107 repbody->fid1 = *child_fid;
1108 repbody->valid = OBD_MD_FLID;
1114 * UPDATE lock should be taken against parent, and be release before exit;
1115 * child_bits lock should be taken against child, and be returned back:
1116 * (1)normal request should release the child lock;
1117 * (2)intent request will grant the lock to client.
1119 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
1120 struct mdt_lock_handle *lhc,
1122 struct ldlm_reply *ldlm_rep)
1124 struct ptlrpc_request *req = mdt_info_req(info);
1125 struct mdt_body *reqbody = NULL;
1126 struct mdt_object *parent = info->mti_object;
1127 struct mdt_object *child;
1128 struct md_object *next = mdt_object_child(parent);
1129 struct lu_fid *child_fid = &info->mti_tmp_fid1;
1130 struct lu_name *lname = NULL;
1131 const char *name = NULL;
1133 struct mdt_lock_handle *lhp = NULL;
1134 struct ldlm_lock *lock;
1135 struct ldlm_res_id *res_id;
1142 is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
1143 LASSERT(ergo(is_resent,
1144 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
1146 LASSERT(parent != NULL);
1147 name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
1149 RETURN(err_serious(-EFAULT));
1151 namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
1153 if (!info->mti_cross_ref) {
1155 * XXX: Check for "namelen == 0" is for getattr by fid
1156 * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
1157 * that is the name must contain at least one character and
1158 * the terminating '\0'
1161 reqbody = req_capsule_client_get(info->mti_pill,
1163 if (unlikely(reqbody == NULL))
1164 RETURN(err_serious(-EFAULT));
1166 if (unlikely(!fid_is_sane(&reqbody->fid2)))
1167 RETURN(err_serious(-EINVAL));
1170 CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
1172 PFID(mdt_object_fid(parent)),
1173 PFID(&reqbody->fid2), ldlm_rep);
1175 lname = mdt_name(info->mti_env, (char *)name, namelen);
1176 CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
1177 "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
1181 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
1183 rc = mdt_object_exists(parent);
1184 if (unlikely(rc == 0)) {
1185 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1186 &parent->mot_obj.mo_lu,
1187 "Parent doesn't exist!\n");
1189 } else if (!info->mti_cross_ref) {
1190 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
1191 PFID(mdt_object_fid(parent)));
1194 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
1202 if (info->mti_cross_ref) {
1203 /* Only getattr on the child. Parent is on another node. */
1204 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1206 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
1207 "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
1210 /* Do not take lock for resent case. */
1211 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1212 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1213 lhc->mlh_reg_lh.cookie);
1214 LASSERT(fid_res_name_eq(mdt_object_fid(child),
1215 &lock->l_resource->lr_name));
1216 LDLM_LOCK_PUT(lock);
1219 mdt_lock_handle_init(lhc);
1220 mdt_lock_reg_init(lhc, LCK_PR);
1223 * Object's name is on another MDS, no lookup lock is
1224 * needed here but update is.
1226 child_bits &= ~MDS_INODELOCK_LOOKUP;
1227 child_bits |= MDS_INODELOCK_UPDATE;
1229 rc = mdt_object_lock(info, child, lhc, child_bits,
1233 /* Finally, we can get attr for child. */
1234 mdt_set_capainfo(info, 0, mdt_object_fid(child),
1236 rc = mdt_getattr_internal(info, child, 0);
1237 if (unlikely(rc != 0))
1238 mdt_object_unlock(info, child, lhc, 1);
1244 /* step 1: lock parent only if parent is a directory */
1245 if (S_ISDIR(lu_object_attr(&parent->mot_obj.mo_lu))) {
1246 lhp = &info->mti_lh[MDT_LH_PARENT];
1247 mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
1248 rc = mdt_object_lock(info, parent, lhp,
1249 MDS_INODELOCK_UPDATE,
1251 if (unlikely(rc != 0))
1255 /* step 2: lookup child's fid by name */
1256 fid_zero(child_fid);
1257 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1262 mdt_set_disposition(info, ldlm_rep,
1264 GOTO(out_parent, rc);
1266 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1268 *child_fid = reqbody->fid2;
1269 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1273 *step 3: find the child object by fid & lock it.
1274 * regardless if it is local or remote.
1276 child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
1278 if (unlikely(IS_ERR(child)))
1279 GOTO(out_parent, rc = PTR_ERR(child));
1281 /* Do not take lock for resent case. */
1282 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1283 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1284 lhc->mlh_reg_lh.cookie);
1286 res_id = &lock->l_resource->lr_name;
1287 if (!fid_res_name_eq(mdt_object_fid(child),
1288 &lock->l_resource->lr_name)) {
1289 LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
1290 &lock->l_resource->lr_name),
1291 "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1292 (unsigned long)res_id->name[0],
1293 (unsigned long)res_id->name[1],
1294 (unsigned long)res_id->name[2],
1295 PFID(mdt_object_fid(parent)));
1296 CWARN("Although resent, but still not get child lock"
1297 "parent:"DFID" child:"DFID"\n",
1298 PFID(mdt_object_fid(parent)),
1299 PFID(mdt_object_fid(child)));
1300 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1301 LDLM_LOCK_PUT(lock);
1304 LDLM_LOCK_PUT(lock);
1308 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
1309 mdt_lock_handle_init(lhc);
1310 if (child_bits == MDS_INODELOCK_LAYOUT)
1311 mdt_lock_reg_init(lhc, LCK_CR);
1313 mdt_lock_reg_init(lhc, LCK_PR);
1315 if (mdt_object_exists(child) == 0) {
1316 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1317 &child->mot_obj.mo_lu,
1318 "Object doesn't exist!\n");
1319 GOTO(out_child, rc = -ENOENT);
1322 if (!(child_bits & MDS_INODELOCK_UPDATE)) {
1323 struct md_attr *ma = &info->mti_attr;
1326 ma->ma_need = MA_INODE;
1327 rc = mdt_attr_get_complex(info, child, ma);
1328 if (unlikely(rc != 0))
1329 GOTO(out_child, rc);
1331 /* layout lock is used only on regular files */
1332 if ((ma->ma_valid & MA_INODE) &&
1333 (ma->ma_attr.la_valid & LA_MODE) &&
1334 !S_ISREG(ma->ma_attr.la_mode))
1335 child_bits &= ~MDS_INODELOCK_LAYOUT;
1337 /* If the file has not been changed for some time, we
1338 * return not only a LOOKUP lock, but also an UPDATE
1339 * lock and this might save us RPC on later STAT. For
1340 * directories, it also let negative dentry starts
1341 * working for this dir. */
1342 if (ma->ma_valid & MA_INODE &&
1343 ma->ma_attr.la_valid & LA_CTIME &&
1344 info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1345 ma->ma_attr.la_ctime < cfs_time_current_sec())
1346 child_bits |= MDS_INODELOCK_UPDATE;
1349 rc = mdt_object_lock(info, child, lhc, child_bits,
1352 if (unlikely(rc != 0))
1353 GOTO(out_child, rc);
1356 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1357 /* Get MA_SOM attributes if update lock is given. */
1359 lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1360 S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1363 /* finally, we can get attr for child. */
1364 mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1365 rc = mdt_getattr_internal(info, child, ma_need);
1366 if (unlikely(rc != 0)) {
1367 mdt_object_unlock(info, child, lhc, 1);
1369 /* Debugging code. */
1370 res_id = &lock->l_resource->lr_name;
1371 LDLM_DEBUG(lock, "Returning lock to client");
1372 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1373 &lock->l_resource->lr_name),
1374 "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1375 (unsigned long)res_id->name[0],
1376 (unsigned long)res_id->name[1],
1377 (unsigned long)res_id->name[2],
1378 PFID(mdt_object_fid(child)));
1379 mdt_pack_size2body(info, child);
1382 LDLM_LOCK_PUT(lock);
1386 mdt_object_put(info->mti_env, child);
1389 mdt_object_unlock(info, parent, lhp, 1);
1393 /* normal handler: should release the child lock */
1394 static int mdt_getattr_name(struct mdt_thread_info *info)
1396 struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1397 struct mdt_body *reqbody;
1398 struct mdt_body *repbody;
1402 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1403 LASSERT(reqbody != NULL);
1404 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1405 LASSERT(repbody != NULL);
1407 info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1408 info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1409 repbody->eadatasize = 0;
1410 repbody->aclsize = 0;
1412 rc = mdt_init_ucred(info, reqbody);
1414 GOTO(out_shrink, rc);
1416 rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1417 if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1418 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1419 lhc->mlh_reg_lh.cookie = 0;
1421 mdt_exit_ucred(info);
1424 mdt_client_compatibility(info);
1425 rc2 = mdt_fix_reply(info);
1431 static const struct lu_device_operations mdt_lu_ops;
1433 static int lu_device_is_mdt(struct lu_device *d)
1435 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1438 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1439 void *karg, void *uarg);
1441 static int mdt_set_info(struct mdt_thread_info *info)
1443 struct ptlrpc_request *req = mdt_info_req(info);
1446 int keylen, vallen, rc = 0;
1449 rc = req_capsule_server_pack(info->mti_pill);
1453 key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1455 DEBUG_REQ(D_HA, req, "no set_info key");
1459 keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1462 val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1464 DEBUG_REQ(D_HA, req, "no set_info val");
1468 vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1471 /* Swab any part of val you need to here */
1472 if (KEY_IS(KEY_READ_ONLY)) {
1474 lustre_msg_set_status(req->rq_repmsg, 0);
1476 cfs_spin_lock(&req->rq_export->exp_lock);
1478 req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1480 req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
1481 cfs_spin_unlock(&req->rq_export->exp_lock);
1483 } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1484 struct changelog_setinfo *cs =
1485 (struct changelog_setinfo *)val;
1486 if (vallen != sizeof(*cs)) {
1487 CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1490 if (ptlrpc_req_need_swab(req)) {
1491 __swab64s(&cs->cs_recno);
1492 __swab32s(&cs->cs_id);
1495 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1497 lustre_msg_set_status(req->rq_repmsg, rc);
1505 static int mdt_connect(struct mdt_thread_info *info)
1508 struct ptlrpc_request *req;
1510 req = mdt_info_req(info);
1511 rc = target_handle_connect(req);
1513 LASSERT(req->rq_export != NULL);
1514 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1515 rc = mdt_init_sec_level(info);
1517 rc = mdt_init_idmap(info);
1519 obd_disconnect(class_export_get(req->rq_export));
1521 rc = err_serious(rc);
1526 static int mdt_disconnect(struct mdt_thread_info *info)
1531 rc = target_handle_disconnect(mdt_info_req(info));
1533 rc = err_serious(rc);
1537 static int mdt_sendpage(struct mdt_thread_info *info,
1538 struct lu_rdpg *rdpg, int nob)
1540 struct ptlrpc_request *req = mdt_info_req(info);
1541 struct obd_export *exp = req->rq_export;
1542 struct ptlrpc_bulk_desc *desc;
1543 struct l_wait_info *lwi = &info->mti_u.rdpg.mti_wait_info;
1550 desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1555 if (!(exp->exp_connect_flags & OBD_CONNECT_BRW_SIZE))
1556 /* old client requires reply size in it's PAGE_SIZE,
1557 * which is rdpg->rp_count */
1558 nob = rdpg->rp_count;
1560 for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1561 i++, tmpcount -= tmpsize) {
1562 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1563 ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
1566 LASSERT(desc->bd_nob == nob);
1567 rc = target_bulk_io(exp, desc, lwi);
1568 ptlrpc_free_bulk_pin(desc);
1572 #ifdef HAVE_SPLIT_SUPPORT
1574 * Retrieve dir entry from the page and insert it to the slave object, actually,
1575 * this should be in osd layer, but since it will not in the final product, so
1576 * just do it here and do not define more moo api anymore for this.
1578 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1581 struct mdt_object *object = info->mti_object;
1582 struct lu_fid *lf = &info->mti_tmp_fid2;
1583 struct md_attr *ma = &info->mti_attr;
1584 struct lu_dirpage *dp;
1585 struct lu_dirent *ent;
1586 int rc = 0, offset = 0;
1589 /* Make sure we have at least one entry. */
1594 * Disable trans for this name insert, since it will include many trans
1597 info->mti_no_need_trans = 1;
1599 * When write_dir_page, no need update parent's ctime,
1600 * and no permission check for name_insert.
1602 ma->ma_attr.la_ctime = 0;
1603 ma->ma_attr.la_valid = LA_MODE;
1604 ma->ma_valid = MA_INODE;
1607 dp = page_address(page);
1608 offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1610 for (ent = lu_dirent_start(dp); ent != NULL;
1611 ent = lu_dirent_next(ent)) {
1612 struct lu_name *lname;
1615 if (le16_to_cpu(ent->lde_namelen) == 0)
1618 fid_le_to_cpu(lf, &ent->lde_fid);
1619 if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1620 ma->ma_attr.la_mode = S_IFDIR;
1622 ma->ma_attr.la_mode = 0;
1623 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1625 GOTO(out, rc = -ENOMEM);
1627 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1628 lname = mdt_name(info->mti_env, name,
1629 le16_to_cpu(ent->lde_namelen));
1630 ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE);
1631 rc = mdo_name_insert(info->mti_env,
1632 md_object_next(&object->mot_obj),
1634 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1636 CERROR("Can't insert %*.*s, rc %d\n",
1637 le16_to_cpu(ent->lde_namelen),
1638 le16_to_cpu(ent->lde_namelen),
1643 offset += lu_dirent_size(ent);
1653 static int mdt_bulk_timeout(void *data)
1657 CERROR("mdt bulk transfer timeout \n");
1662 static int mdt_writepage(struct mdt_thread_info *info)
1664 struct ptlrpc_request *req = mdt_info_req(info);
1665 struct mdt_body *reqbody;
1666 struct l_wait_info *lwi;
1667 struct ptlrpc_bulk_desc *desc;
1673 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1674 if (reqbody == NULL)
1675 RETURN(err_serious(-EFAULT));
1677 desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1679 RETURN(err_serious(-ENOMEM));
1681 /* allocate the page for the desc */
1682 page = cfs_alloc_page(CFS_ALLOC_STD);
1684 GOTO(desc_cleanup, rc = -ENOMEM);
1686 CDEBUG(D_INFO, "Received page offset %d size %d \n",
1687 (int)reqbody->size, (int)reqbody->nlink);
1689 ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1690 (int)reqbody->nlink);
1692 rc = sptlrpc_svc_prep_bulk(req, desc);
1694 GOTO(cleanup_page, rc);
1696 * Check if client was evicted while we were doing i/o before touching
1701 GOTO(cleanup_page, rc = -ENOMEM);
1703 if (desc->bd_export->exp_failed)
1706 rc = ptlrpc_start_bulk_transfer (desc);
1708 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ,
1709 mdt_bulk_timeout, desc);
1710 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1711 desc->bd_export->exp_failed, lwi);
1712 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1713 if (rc == -ETIMEDOUT) {
1714 DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1715 ptlrpc_abort_bulk(desc);
1716 } else if (desc->bd_export->exp_failed) {
1717 DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1719 ptlrpc_abort_bulk(desc);
1720 } else if (!desc->bd_success ||
1721 desc->bd_nob_transferred != desc->bd_nob) {
1722 DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1724 "truncated" : "network error on",
1725 desc->bd_nob_transferred, desc->bd_nob);
1726 /* XXX should this be a different errno? */
1730 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1733 GOTO(cleanup_lwi, rc);
1734 rc = mdt_write_dir_page(info, page, reqbody->nlink);
1739 cfs_free_page(page);
1741 ptlrpc_free_bulk_pin(desc);
1746 static int mdt_readpage(struct mdt_thread_info *info)
1748 struct mdt_object *object = info->mti_object;
1749 struct lu_rdpg *rdpg = &info->mti_u.rdpg.mti_rdpg;
1750 struct mdt_body *reqbody;
1751 struct mdt_body *repbody;
1756 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1757 RETURN(err_serious(-ENOMEM));
1759 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1760 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1761 if (reqbody == NULL || repbody == NULL)
1762 RETURN(err_serious(-EFAULT));
1765 * prepare @rdpg before calling lower layers and transfer itself. Here
1766 * reqbody->size contains offset of where to start to read and
1767 * reqbody->nlink contains number bytes to read.
1769 rdpg->rp_hash = reqbody->size;
1770 if (rdpg->rp_hash != reqbody->size) {
1771 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1772 rdpg->rp_hash, reqbody->size);
1776 rdpg->rp_attrs = reqbody->mode;
1777 if (info->mti_exp->exp_connect_flags & OBD_CONNECT_64BITHASH)
1778 rdpg->rp_attrs |= LUDA_64BITHASH;
1779 rdpg->rp_count = min_t(unsigned int, reqbody->nlink,
1780 PTLRPC_MAX_BRW_SIZE);
1781 rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1) >>
1783 OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1784 if (rdpg->rp_pages == NULL)
1787 for (i = 0; i < rdpg->rp_npages; ++i) {
1788 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1789 if (rdpg->rp_pages[i] == NULL)
1790 GOTO(free_rdpg, rc = -ENOMEM);
1793 /* call lower layers to fill allocated pages with directory data */
1794 rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1796 GOTO(free_rdpg, rc);
1798 /* send pages to client */
1799 rc = mdt_sendpage(info, rdpg, rc);
1804 for (i = 0; i < rdpg->rp_npages; i++)
1805 if (rdpg->rp_pages[i] != NULL)
1806 cfs_free_page(rdpg->rp_pages[i]);
1807 OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1809 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1815 static int mdt_reint_internal(struct mdt_thread_info *info,
1816 struct mdt_lock_handle *lhc,
1819 struct req_capsule *pill = info->mti_pill;
1820 struct mdt_body *repbody;
1825 rc = mdt_reint_unpack(info, op);
1827 CERROR("Can't unpack reint, rc %d\n", rc);
1828 RETURN(err_serious(rc));
1831 /* for replay (no_create) lmm is not needed, client has it already */
1832 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1833 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1834 info->mti_rr.rr_eadatalen);
1836 /* llog cookies are always 0, the field is kept for compatibility */
1837 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1838 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0);
1840 rc = req_capsule_server_pack(pill);
1842 CERROR("Can't pack response, rc %d\n", rc);
1843 RETURN(err_serious(rc));
1846 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1847 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1849 repbody->eadatasize = 0;
1850 repbody->aclsize = 0;
1853 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1855 /* for replay no cookkie / lmm need, because client have this already */
1856 if (info->mti_spec.no_create)
1857 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1858 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1860 rc = mdt_init_ucred_reint(info);
1862 GOTO(out_shrink, rc);
1864 rc = mdt_fix_attr_ucred(info, op);
1866 GOTO(out_ucred, rc = err_serious(rc));
1868 if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1869 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1870 GOTO(out_ucred, rc);
1872 rc = mdt_reint_rec(info, lhc);
1875 mdt_exit_ucred(info);
1877 mdt_client_compatibility(info);
1878 rc2 = mdt_fix_reply(info);
1884 static long mdt_reint_opcode(struct mdt_thread_info *info,
1885 const struct req_format **fmt)
1887 struct mdt_rec_reint *rec;
1890 opc = err_serious(-EFAULT);
1891 rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1893 opc = rec->rr_opcode;
1894 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1895 if (opc < REINT_MAX && fmt[opc] != NULL)
1896 req_capsule_extend(info->mti_pill, fmt[opc]);
1898 CERROR("Unsupported opc: %ld\n", opc);
1899 opc = err_serious(opc);
1905 static int mdt_reint(struct mdt_thread_info *info)
1910 static const struct req_format *reint_fmts[REINT_MAX] = {
1911 [REINT_SETATTR] = &RQF_MDS_REINT_SETATTR,
1912 [REINT_CREATE] = &RQF_MDS_REINT_CREATE,
1913 [REINT_LINK] = &RQF_MDS_REINT_LINK,
1914 [REINT_UNLINK] = &RQF_MDS_REINT_UNLINK,
1915 [REINT_RENAME] = &RQF_MDS_REINT_RENAME,
1916 [REINT_OPEN] = &RQF_MDS_REINT_OPEN,
1917 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1922 opc = mdt_reint_opcode(info, reint_fmts);
1925 * No lock possible here from client to pass it to reint code
1928 rc = mdt_reint_internal(info, NULL, opc);
1933 info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1937 /* this should sync the whole device */
1938 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1940 struct dt_device *dt = mdt->mdt_bottom;
1944 rc = dt->dd_ops->dt_sync(env, dt);
1948 /* this should sync this object */
1949 static int mdt_object_sync(struct mdt_thread_info *info)
1951 struct md_object *next;
1955 if (!mdt_object_exists(info->mti_object)) {
1956 CWARN("Non existing object "DFID"!\n",
1957 PFID(mdt_object_fid(info->mti_object)));
1960 next = mdt_object_child(info->mti_object);
1961 rc = mo_object_sync(info->mti_env, next);
1966 static int mdt_sync(struct mdt_thread_info *info)
1968 struct ptlrpc_request *req = mdt_info_req(info);
1969 struct req_capsule *pill = info->mti_pill;
1970 struct mdt_body *body;
1974 /* The fid may be zero, so we req_capsule_set manually */
1975 req_capsule_set(pill, &RQF_MDS_SYNC);
1977 body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1979 RETURN(err_serious(-EINVAL));
1981 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1982 RETURN(err_serious(-ENOMEM));
1984 if (fid_seq(&body->fid1) == 0) {
1985 /* sync the whole device */
1986 rc = req_capsule_server_pack(pill);
1988 rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1990 rc = err_serious(rc);
1992 /* sync an object */
1993 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1995 rc = mdt_object_sync(info);
1997 const struct lu_fid *fid;
1998 struct lu_attr *la = &info->mti_attr.ma_attr;
2000 info->mti_attr.ma_need = MA_INODE;
2001 info->mti_attr.ma_valid = 0;
2002 rc = mdt_attr_get_complex(info, info->mti_object,
2005 body = req_capsule_server_get(pill,
2007 fid = mdt_object_fid(info->mti_object);
2008 mdt_pack_attr2body(info, body, la, fid);
2012 rc = err_serious(rc);
2015 mdt_counter_incr(req, LPROC_MDT_SYNC);
2021 * Quotacheck handler.
2022 * in-kernel quotacheck isn't supported any more.
2024 static int mdt_quotacheck(struct mdt_thread_info *info)
2026 struct obd_quotactl *oqctl;
2030 oqctl = req_capsule_client_get(info->mti_pill, &RMF_OBD_QUOTACTL);
2032 RETURN(err_serious(-EPROTO));
2034 rc = req_capsule_server_pack(info->mti_pill);
2036 RETURN(err_serious(rc));
2038 /* deprecated, not used any more */
2039 RETURN(-EOPNOTSUPP);
2043 * Handle quota control requests to consult current usage/limit, but also
2044 * to configure quota enforcement
2046 static int mdt_quotactl(struct mdt_thread_info *info)
2048 struct obd_export *exp = info->mti_exp;
2049 struct req_capsule *pill = info->mti_pill;
2050 struct obd_quotactl *oqctl, *repoqc;
2052 struct lu_device *qmt = info->mti_mdt->mdt_qmt_dev;
2055 oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
2057 RETURN(err_serious(-EPROTO));
2059 rc = req_capsule_server_pack(pill);
2061 RETURN(err_serious(rc));
2063 switch (oqctl->qc_cmd) {
2065 case LUSTRE_Q_INVALIDATE:
2066 case LUSTRE_Q_FINVALIDATE:
2070 /* deprecated, not used any more */
2071 RETURN(-EOPNOTSUPP);
2072 /* master quotactl */
2078 RETURN(-EOPNOTSUPP);
2079 /* slave quotactl */
2084 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2088 /* map uid/gid for remote client */
2090 if (exp_connect_rmtclient(exp)) {
2091 struct lustre_idmap_table *idmap;
2093 idmap = mdt_req2med(mdt_info_req(info))->med_idmap;
2095 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
2096 oqctl->qc_cmd != Q_GETINFO))
2099 if (oqctl->qc_type == USRQUOTA)
2100 id = lustre_idmap_lookup_uid(NULL, idmap, 0,
2102 else if (oqctl->qc_type == GRPQUOTA)
2103 id = lustre_idmap_lookup_gid(NULL, idmap, 0,
2108 if (id == CFS_IDMAP_NOTFOUND) {
2109 CDEBUG(D_QUOTA, "no mapping for id %u\n", oqctl->qc_id);
2114 repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
2116 RETURN(err_serious(-EFAULT));
2118 if (oqctl->qc_id != id)
2119 swap(oqctl->qc_id, id);
2121 switch (oqctl->qc_cmd) {
2127 /* forward quotactl request to QMT */
2128 rc = qmt_hdls.qmth_quotactl(info->mti_env, qmt, oqctl);
2133 /* slave quotactl */
2134 rc = lquotactl_slv(info->mti_env, info->mti_mdt->mdt_bottom,
2139 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2143 if (oqctl->qc_id != id)
2144 swap(oqctl->qc_id, id);
2151 * OBD PING and other handlers.
2153 static int mdt_obd_ping(struct mdt_thread_info *info)
2158 req_capsule_set(info->mti_pill, &RQF_OBD_PING);
2160 rc = target_handle_ping(mdt_info_req(info));
2162 rc = err_serious(rc);
2167 * OBD_IDX_READ handler
2169 static int mdt_obd_idx_read(struct mdt_thread_info *info)
2171 struct mdt_device *mdt = info->mti_mdt;
2172 struct lu_rdpg *rdpg = &info->mti_u.rdpg.mti_rdpg;
2173 struct idx_info *req_ii, *rep_ii;
2177 memset(rdpg, 0, sizeof(*rdpg));
2178 req_capsule_set(info->mti_pill, &RQF_OBD_IDX_READ);
2180 /* extract idx_info buffer from request & reply */
2181 req_ii = req_capsule_client_get(info->mti_pill, &RMF_IDX_INFO);
2182 if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
2183 RETURN(err_serious(-EPROTO));
2185 rc = req_capsule_server_pack(info->mti_pill);
2187 RETURN(err_serious(rc));
2189 rep_ii = req_capsule_server_get(info->mti_pill, &RMF_IDX_INFO);
2191 RETURN(err_serious(-EFAULT));
2192 rep_ii->ii_magic = IDX_INFO_MAGIC;
2194 /* extract hash to start with */
2195 rdpg->rp_hash = req_ii->ii_hash_start;
2197 /* extract requested attributes */
2198 rdpg->rp_attrs = req_ii->ii_attrs;
2200 /* check that fid packed in request is valid and supported */
2201 if (!fid_is_sane(&req_ii->ii_fid))
2203 rep_ii->ii_fid = req_ii->ii_fid;
2206 rep_ii->ii_flags = req_ii->ii_flags;
2208 /* compute number of pages to allocate, ii_count is the number of 4KB
2210 if (req_ii->ii_count <= 0)
2211 GOTO(out, rc = -EFAULT);
2212 rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
2213 PTLRPC_MAX_BRW_SIZE);
2214 rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE -1) >> CFS_PAGE_SHIFT;
2216 /* allocate pages to store the containers */
2217 OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2218 if (rdpg->rp_pages == NULL)
2219 GOTO(out, rc = -ENOMEM);
2220 for (i = 0; i < rdpg->rp_npages; i++) {
2221 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
2222 if (rdpg->rp_pages[i] == NULL)
2223 GOTO(out, rc = -ENOMEM);
2226 /* populate pages with key/record pairs */
2227 rc = dt_index_read(info->mti_env, mdt->mdt_bottom, rep_ii, rdpg);
2231 LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
2232 "asked %d > %d\n", rc, rdpg->rp_count);
2234 /* send pages to client */
2235 rc = mdt_sendpage(info, rdpg, rc);
2239 if (rdpg->rp_pages) {
2240 for (i = 0; i < rdpg->rp_npages; i++)
2241 if (rdpg->rp_pages[i])
2242 cfs_free_page(rdpg->rp_pages[i]);
2243 OBD_FREE(rdpg->rp_pages,
2244 rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2249 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
2251 return err_serious(-EOPNOTSUPP);
2254 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
2256 return err_serious(-EOPNOTSUPP);
2264 /** clone llog ctxt from child (mdd)
2265 * This allows remote llog (replicator) access.
2266 * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
2267 * context was originally set up, or we can handle them directly.
2268 * I choose the latter, but that means I need any llog
2269 * contexts set up by child to be accessable by the mdt. So we clone the
2270 * context into our context list here.
2272 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
2275 struct md_device *next = mdt->mdt_child;
2276 struct llog_ctxt *ctxt;
2279 if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
2282 rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
2283 if (rc || ctxt == NULL) {
2287 rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
2289 CERROR("Can't set mdt ctxt %d\n", rc);
2294 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
2295 struct mdt_device *mdt, int idx)
2297 struct llog_ctxt *ctxt;
2299 ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
2302 /* Put once for the get we just did, and once for the clone */
2303 llog_ctxt_put(ctxt);
2304 llog_ctxt_put(ctxt);
2308 static int mdt_llog_create(struct mdt_thread_info *info)
2312 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2313 rc = llog_origin_handle_open(mdt_info_req(info));
2314 return (rc < 0 ? err_serious(rc) : rc);
2317 static int mdt_llog_destroy(struct mdt_thread_info *info)
2321 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
2322 rc = llog_origin_handle_destroy(mdt_info_req(info));
2323 return (rc < 0 ? err_serious(rc) : rc);
2326 static int mdt_llog_read_header(struct mdt_thread_info *info)
2330 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2331 rc = llog_origin_handle_read_header(mdt_info_req(info));
2332 return (rc < 0 ? err_serious(rc) : rc);
2335 static int mdt_llog_next_block(struct mdt_thread_info *info)
2339 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2340 rc = llog_origin_handle_next_block(mdt_info_req(info));
2341 return (rc < 0 ? err_serious(rc) : rc);
2344 static int mdt_llog_prev_block(struct mdt_thread_info *info)
2348 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
2349 rc = llog_origin_handle_prev_block(mdt_info_req(info));
2350 return (rc < 0 ? err_serious(rc) : rc);
2357 static struct ldlm_callback_suite cbs = {
2358 .lcs_completion = ldlm_server_completion_ast,
2359 .lcs_blocking = ldlm_server_blocking_ast,
2360 .lcs_glimpse = ldlm_server_glimpse_ast
2363 static int mdt_enqueue(struct mdt_thread_info *info)
2365 struct ptlrpc_request *req;
2369 * info->mti_dlm_req already contains swapped and (if necessary)
2370 * converted dlm request.
2372 LASSERT(info->mti_dlm_req != NULL);
2374 req = mdt_info_req(info);
2375 rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2376 req, info->mti_dlm_req, &cbs);
2377 info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2378 return rc ? err_serious(rc) : req->rq_status;
2381 static int mdt_convert(struct mdt_thread_info *info)
2384 struct ptlrpc_request *req;
2386 LASSERT(info->mti_dlm_req);
2387 req = mdt_info_req(info);
2388 rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2389 return rc ? err_serious(rc) : req->rq_status;
2392 static int mdt_bl_callback(struct mdt_thread_info *info)
2394 CERROR("bl callbacks should not happen on MDS\n");
2396 return err_serious(-EOPNOTSUPP);
2399 static int mdt_cp_callback(struct mdt_thread_info *info)
2401 CERROR("cp callbacks should not happen on MDS\n");
2403 return err_serious(-EOPNOTSUPP);
2407 * sec context handlers
2409 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2413 rc = mdt_handle_idmap(info);
2416 struct ptlrpc_request *req = mdt_info_req(info);
2419 opc = lustre_msg_get_opc(req->rq_reqmsg);
2420 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2421 sptlrpc_svc_ctx_invalidate(req);
2424 CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
2430 * quota request handlers
2432 static int mdt_quota_dqacq(struct mdt_thread_info *info)
2434 struct lu_device *qmt = info->mti_mdt->mdt_qmt_dev;
2439 RETURN(err_serious(-EOPNOTSUPP));
2441 rc = qmt_hdls.qmth_dqacq(info->mti_env, qmt, mdt_info_req(info));
2445 static struct mdt_object *mdt_obj(struct lu_object *o)
2447 LASSERT(lu_device_is_mdt(o->lo_dev));
2448 return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2451 struct mdt_object *mdt_object_new(const struct lu_env *env,
2452 struct mdt_device *d,
2453 const struct lu_fid *f)
2455 struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
2456 struct lu_object *o;
2457 struct mdt_object *m;
2460 CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
2461 o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, &conf);
2462 if (unlikely(IS_ERR(o)))
2463 m = (struct mdt_object *)o;
2469 struct mdt_object *mdt_object_find(const struct lu_env *env,
2470 struct mdt_device *d,
2471 const struct lu_fid *f)
2473 struct lu_object *o;
2474 struct mdt_object *m;
2477 CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2478 o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2479 if (unlikely(IS_ERR(o)))
2480 m = (struct mdt_object *)o;
2487 * Asyncronous commit for mdt device.
2489 * Pass asynchonous commit call down the MDS stack.
2491 * \param env environment
2492 * \param mdt the mdt device
2494 static void mdt_device_commit_async(const struct lu_env *env,
2495 struct mdt_device *mdt)
2497 struct dt_device *dt = mdt->mdt_bottom;
2500 rc = dt->dd_ops->dt_commit_async(env, dt);
2501 if (unlikely(rc != 0))
2502 CWARN("async commit start failed with rc = %d", rc);
2506 * Mark the lock as "synchonous".
2508 * Mark the lock to deffer transaction commit to the unlock time.
2510 * \param lock the lock to mark as "synchonous"
2512 * \see mdt_is_lock_sync
2513 * \see mdt_save_lock
2515 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2517 lock->l_ast_data = (void*)1;
2521 * Check whehter the lock "synchonous" or not.
2523 * \param lock the lock to check
2524 * \retval 1 the lock is "synchonous"
2525 * \retval 0 the lock isn't "synchronous"
2527 * \see mdt_set_lock_sync
2528 * \see mdt_save_lock
2530 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2532 return lock->l_ast_data != NULL;
2536 * Blocking AST for mdt locks.
2538 * Starts transaction commit if in case of COS lock conflict or
2539 * deffers such a commit to the mdt_save_lock.
2541 * \param lock the lock which blocks a request or cancelling lock
2542 * \param desc unused
2543 * \param data unused
2544 * \param flag indicates whether this cancelling or blocking callback
2546 * \see ldlm_blocking_ast_nocheck
2548 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2549 void *data, int flag)
2551 struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
2552 struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2556 if (flag == LDLM_CB_CANCELING)
2558 lock_res_and_lock(lock);
2559 if (lock->l_blocking_ast != mdt_blocking_ast) {
2560 unlock_res_and_lock(lock);
2563 if (mdt_cos_is_enabled(mdt) &&
2564 lock->l_req_mode & (LCK_PW | LCK_EX) &&
2565 lock->l_blocking_lock != NULL &&
2566 lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2567 mdt_set_lock_sync(lock);
2569 rc = ldlm_blocking_ast_nocheck(lock);
2571 /* There is no lock conflict if l_blocking_lock == NULL,
2572 * it indicates a blocking ast sent from ldlm_lock_decref_internal
2573 * when the last reference to a local lock was released */
2574 if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2577 rc = lu_env_init(&env, LCT_LOCAL);
2578 if (unlikely(rc != 0))
2579 CWARN("lu_env initialization failed with rc = %d,"
2580 "cannot start asynchronous commit\n", rc);
2582 mdt_device_commit_async(&env, mdt);
2588 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2589 struct mdt_lock_handle *lh, __u64 ibits, int locality)
2591 struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2592 ldlm_policy_data_t *policy = &info->mti_policy;
2593 struct ldlm_res_id *res_id = &info->mti_res_id;
2597 LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2598 LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2599 LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2600 LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2602 if (mdt_object_exists(o) < 0) {
2603 if (locality == MDT_CROSS_LOCK) {
2604 /* cross-ref object fix */
2605 ibits &= ~MDS_INODELOCK_UPDATE;
2606 ibits |= MDS_INODELOCK_LOOKUP;
2608 LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2609 LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2611 /* No PDO lock on remote object */
2612 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2615 if (lh->mlh_type == MDT_PDO_LOCK) {
2616 /* check for exists after object is locked */
2617 if (mdt_object_exists(o) == 0) {
2618 /* Non-existent object shouldn't have PDO lock */
2621 /* Non-dir object shouldn't have PDO lock */
2622 if (!S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)))
2627 memset(policy, 0, sizeof(*policy));
2628 fid_build_reg_res_name(mdt_object_fid(o), res_id);
2631 * Take PDO lock on whole directory and build correct @res_id for lock
2632 * on part of directory.
2634 if (lh->mlh_pdo_hash != 0) {
2635 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2636 mdt_lock_pdo_mode(info, o, lh);
2637 if (lh->mlh_pdo_mode != LCK_NL) {
2639 * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2640 * is never going to be sent to client and we do not
2641 * want it slowed down due to possible cancels.
2643 policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2644 rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2645 policy, res_id, LDLM_FL_ATOMIC_CB,
2646 &info->mti_exp->exp_handle.h_cookie);
2652 * Finish res_id initializing by name hash marking part of
2653 * directory which is taking modification.
2655 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2658 policy->l_inodebits.bits = ibits;
2661 * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2662 * going to be sent to client. If it is - mdt_intent_policy() path will
2663 * fix it up and turn FL_LOCAL flag off.
2665 rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2666 res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2667 &info->mti_exp->exp_handle.h_cookie);
2669 mdt_object_unlock(info, o, lh, 1);
2670 else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2671 lh->mlh_pdo_hash != 0 &&
2672 (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2673 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
2680 * Save a lock within request object.
2682 * Keep the lock referenced until whether client ACK or transaction
2683 * commit happens or release the lock immediately depending on input
2684 * parameters. If COS is ON, a write lock is converted to COS lock
2687 * \param info thead info object
2688 * \param h lock handle
2689 * \param mode lock mode
2690 * \param decref force immediate lock releasing
2693 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2694 ldlm_mode_t mode, int decref)
2698 if (lustre_handle_is_used(h)) {
2699 if (decref || !info->mti_has_trans ||
2700 !(mode & (LCK_PW | LCK_EX))){
2701 mdt_fid_unlock(h, mode);
2703 struct mdt_device *mdt = info->mti_mdt;
2704 struct ldlm_lock *lock = ldlm_handle2lock(h);
2705 struct ptlrpc_request *req = mdt_info_req(info);
2708 LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2710 CDEBUG(D_HA, "request = %p reply state = %p"
2711 " transno = "LPD64"\n",
2712 req, req->rq_reply_state, req->rq_transno);
2713 if (mdt_cos_is_enabled(mdt)) {
2715 ldlm_lock_downgrade(lock, LCK_COS);
2718 ptlrpc_save_lock(req, h, mode, no_ack);
2719 if (mdt_is_lock_sync(lock)) {
2720 CDEBUG(D_HA, "found sync-lock,"
2721 " async commit started\n");
2722 mdt_device_commit_async(info->mti_env,
2725 LDLM_LOCK_PUT(lock);
2734 * Unlock mdt object.
2736 * Immeditely release the regular lock and the PDO lock or save the
2737 * lock in reqeuest and keep them referenced until client ACK or
2738 * transaction commit.
2740 * \param info thread info object
2741 * \param o mdt object
2742 * \param lh mdt lock handle referencing regular and PDO locks
2743 * \param decref force immediate lock releasing
2745 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2746 struct mdt_lock_handle *lh, int decref)
2750 mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2751 mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2756 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2757 const struct lu_fid *f,
2758 struct mdt_lock_handle *lh,
2761 struct mdt_object *o;
2763 o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2767 rc = mdt_object_lock(info, o, lh, ibits,
2770 mdt_object_put(info->mti_env, o);
2777 void mdt_object_unlock_put(struct mdt_thread_info * info,
2778 struct mdt_object * o,
2779 struct mdt_lock_handle *lh,
2782 mdt_object_unlock(info, o, lh, decref);
2783 mdt_object_put(info->mti_env, o);
2786 static struct mdt_handler *mdt_handler_find(__u32 opc,
2787 struct mdt_opc_slice *supported)
2789 struct mdt_opc_slice *s;
2790 struct mdt_handler *h;
2793 for (s = supported; s->mos_hs != NULL; s++) {
2794 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2795 h = s->mos_hs + (opc - s->mos_opc_start);
2796 if (likely(h->mh_opc != 0))
2797 LASSERTF(h->mh_opc == opc,
2798 "opcode mismatch %d != %d\n",
2801 h = NULL; /* unsupported opc */
2808 static int mdt_lock_resname_compat(struct mdt_device *m,
2809 struct ldlm_request *req)
2811 /* XXX something... later. */
2815 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2817 /* XXX something... later. */
2822 * Generic code handling requests that have struct mdt_body passed in:
2824 * - extract mdt_body from request and save it in @info, if present;
2826 * - create lu_object, corresponding to the fid in mdt_body, and save it in
2829 * - if HABEO_CORPUS flag is set for this request type check whether object
2830 * actually exists on storage (lu_object_exists()).
2833 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2835 const struct mdt_body *body;
2836 struct mdt_object *obj;
2837 const struct lu_env *env;
2838 struct req_capsule *pill;
2842 env = info->mti_env;
2843 pill = info->mti_pill;
2845 body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2849 if (!(body->valid & OBD_MD_FLID))
2852 if (!fid_is_sane(&body->fid1)) {
2853 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2858 * Do not get size or any capa fields before we check that request
2859 * contains capa actually. There are some requests which do not, for
2860 * instance MDS_IS_SUBDIR.
2862 if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2863 req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2864 mdt_set_capainfo(info, 0, &body->fid1,
2865 req_capsule_client_get(pill, &RMF_CAPA1));
2867 obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2869 if ((flags & HABEO_CORPUS) &&
2870 !mdt_object_exists(obj)) {
2871 mdt_object_put(env, obj);
2872 /* for capability renew ENOENT will be handled in
2874 if (body->valid & OBD_MD_FLOSSCAPA)
2879 info->mti_object = obj;
2888 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2890 struct req_capsule *pill = info->mti_pill;
2894 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2895 rc = mdt_body_unpack(info, flags);
2899 if (rc == 0 && (flags & HABEO_REFERO)) {
2901 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2902 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2903 info->mti_body->eadatasize);
2904 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2905 req_capsule_set_size(pill, &RMF_LOGCOOKIES,
2908 rc = req_capsule_server_pack(pill);
2913 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2915 struct md_device *next = m->mdt_child;
2917 return next->md_ops->mdo_init_capa_ctxt(env, next,
2918 m->mdt_opts.mo_mds_capa,
2919 m->mdt_capa_timeout,
2925 * Invoke handler for this request opc. Also do necessary preprocessing
2926 * (according to handler ->mh_flags), and post-processing (setting of
2927 * ->last_{xid,committed}).
2929 static int mdt_req_handle(struct mdt_thread_info *info,
2930 struct mdt_handler *h, struct ptlrpc_request *req)
2932 int rc, serious = 0;
2937 LASSERT(h->mh_act != NULL);
2938 LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2939 LASSERT(current->journal_info == NULL);
2942 * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2943 * to put same checks into handlers like mdt_close(), mdt_reint(),
2944 * etc., without talking to mdt authors first. Checking same thing
2945 * there again is useless and returning 0 error without packing reply
2946 * is buggy! Handlers either pack reply or return error.
2948 * We return 0 here and do not send any reply in order to emulate
2949 * network failure. Do not send any reply in case any of NET related
2950 * fail_id has occured.
2952 if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2956 flags = h->mh_flags;
2957 LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2959 if (h->mh_fmt != NULL) {
2960 req_capsule_set(info->mti_pill, h->mh_fmt);
2961 rc = mdt_unpack_req_pack_rep(info, flags);
2964 if (rc == 0 && flags & MUTABOR &&
2965 req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2966 /* should it be rq_status? */
2969 if (rc == 0 && flags & HABEO_CLAVIS) {
2970 struct ldlm_request *dlm_req;
2972 LASSERT(h->mh_fmt != NULL);
2974 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2975 if (dlm_req != NULL) {
2976 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2978 dlm_req->lock_desc.l_policy_data.\
2979 l_inodebits.bits == 0)) {
2981 * Lock without inodebits makes no sense and
2982 * will oops later in ldlm. If client miss to
2983 * set such bits, do not trigger ASSERTION.
2985 * For liblustre flock case, it maybe zero.
2989 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2990 rc = mdt_lock_resname_compat(
2993 info->mti_dlm_req = dlm_req;
3000 /* capability setting changed via /proc, needs reinitialize ctxt */
3001 if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
3002 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
3003 info->mti_mdt->mdt_capa_conf = 0;
3006 if (likely(rc == 0)) {
3008 * Process request, there can be two types of rc:
3009 * 1) errors with msg unpack/pack, other failures outside the
3010 * operation itself. This is counted as serious errors;
3011 * 2) errors during fs operation, should be placed in rq_status
3014 rc = h->mh_act(info);
3016 !req->rq_no_reply && req->rq_reply_state == NULL) {
3017 DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
3018 "pack reply and returned 0 error\n",
3022 serious = is_serious(rc);
3023 rc = clear_serious(rc);
3027 req->rq_status = rc;
3030 * ELDLM_* codes which > 0 should be in rq_status only as well as
3031 * all non-serious errors.
3033 if (rc > 0 || !serious)
3036 LASSERT(current->journal_info == NULL);
3038 if (rc == 0 && (flags & HABEO_CLAVIS) &&
3039 info->mti_mdt->mdt_opts.mo_compat_resname) {
3040 struct ldlm_reply *dlmrep;
3042 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3044 rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
3047 /* If we're DISCONNECTing, the mdt_export_data is already freed */
3048 if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
3049 target_committed_to_req(req);
3051 if (unlikely(req_is_replay(req) &&
3052 lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
3053 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
3057 target_send_reply(req, rc, info->mti_fail_id);
3061 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
3063 lh->mlh_type = MDT_NUL_LOCK;
3064 lh->mlh_reg_lh.cookie = 0ull;
3065 lh->mlh_reg_mode = LCK_MINMODE;
3066 lh->mlh_pdo_lh.cookie = 0ull;
3067 lh->mlh_pdo_mode = LCK_MINMODE;
3070 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
3072 LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
3073 LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
3077 * Initialize fields of struct mdt_thread_info. Other fields are left in
3078 * uninitialized state, because it's too expensive to zero out whole
3079 * mdt_thread_info (> 1K) on each request arrival.
3081 static void mdt_thread_info_init(struct ptlrpc_request *req,
3082 struct mdt_thread_info *info)
3085 struct md_capainfo *ci;
3087 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
3088 info->mti_pill = &req->rq_pill;
3091 for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3092 mdt_lock_handle_init(&info->mti_lh[i]);
3094 /* mdt device: it can be NULL while CONNECT */
3095 if (req->rq_export) {
3096 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
3097 info->mti_exp = req->rq_export;
3099 info->mti_mdt = NULL;
3100 info->mti_env = req->rq_svc_thread->t_env;
3101 ci = md_capainfo(info->mti_env);
3102 memset(ci, 0, sizeof *ci);
3103 if (req->rq_export) {
3104 if (exp_connect_rmtclient(req->rq_export))
3105 ci->mc_auth = LC_ID_CONVERT;
3106 else if (req->rq_export->exp_connect_flags &
3107 OBD_CONNECT_MDS_CAPA)
3108 ci->mc_auth = LC_ID_PLAIN;
3110 ci->mc_auth = LC_ID_NONE;
3113 info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
3114 info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
3115 info->mti_mos = NULL;
3117 memset(&info->mti_attr, 0, sizeof(info->mti_attr));
3118 info->mti_body = NULL;
3119 info->mti_object = NULL;
3120 info->mti_dlm_req = NULL;
3121 info->mti_has_trans = 0;
3122 info->mti_cross_ref = 0;
3123 info->mti_opdata = 0;
3124 info->mti_big_lmm_used = 0;
3126 /* To not check for split by default. */
3127 info->mti_spec.sp_ck_split = 0;
3128 info->mti_spec.no_create = 0;
3131 static void mdt_thread_info_fini(struct mdt_thread_info *info)
3135 req_capsule_fini(info->mti_pill);
3136 if (info->mti_object != NULL) {
3137 mdt_object_put(info->mti_env, info->mti_object);
3138 info->mti_object = NULL;
3140 for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3141 mdt_lock_handle_fini(&info->mti_lh[i]);
3142 info->mti_env = NULL;
3145 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
3146 struct obd_device *obd, int *process)
3148 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3149 case MDS_CONNECT: /* This will never get here, but for completeness. */
3150 case OST_CONNECT: /* This will never get here, but for completeness. */
3151 case MDS_DISCONNECT:
3152 case OST_DISCONNECT:
3157 case MDS_DONE_WRITING:
3158 case MDS_SYNC: /* used in unmounting */
3164 *process = target_queue_recovery_request(req, obd);
3168 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
3175 * Handle recovery. Return:
3176 * +1: continue request processing;
3177 * -ve: abort immediately with the given error code;
3178 * 0: send reply with error code in req->rq_status;
3180 static int mdt_recovery(struct mdt_thread_info *info)
3182 struct ptlrpc_request *req = mdt_info_req(info);
3183 struct obd_device *obd;
3187 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3190 case SEC_CTX_INIT_CONT:
3196 rc = mdt_handle_idmap(info);
3205 if (unlikely(!class_connected_export(req->rq_export))) {
3206 CERROR("operation %d on unconnected MDS from %s\n",
3207 lustre_msg_get_opc(req->rq_reqmsg),
3208 libcfs_id2str(req->rq_peer));
3209 /* FIXME: For CMD cleanup, when mds_B stop, the req from
3210 * mds_A will get -ENOTCONN(especially for ping req),
3211 * which will cause that mds_A deactive timeout, then when
3212 * mds_A cleanup, the cleanup process will be suspended since
3213 * deactive timeout is not zero.
3215 req->rq_status = -ENOTCONN;
3216 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
3220 /* sanity check: if the xid matches, the request must be marked as a
3221 * resent or replayed */
3222 if (req_xid_is_last(req)) {
3223 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
3224 (MSG_RESENT | MSG_REPLAY))) {
3225 DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
3226 "expected REPLAY or RESENT flag (%x)", req->rq_xid,
3227 lustre_msg_get_flags(req->rq_reqmsg));
3229 req->rq_status = -ENOTCONN;