1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mdt/mdt_handler.c
38 * Lustre Metadata Target (mdt) request handler
40 * Author: Peter Braam <braam@clusterfs.com>
41 * Author: Andreas Dilger <adilger@clusterfs.com>
42 * Author: Phil Schwan <phil@clusterfs.com>
43 * Author: Mike Shaver <shaver@clusterfs.com>
44 * Author: Nikita Danilov <nikita@clusterfs.com>
45 * Author: Huang Hua <huanghua@clusterfs.com>
46 * Author: Yury Umanets <umka@clusterfs.com>
50 # define EXPORT_SYMTAB
52 #define DEBUG_SUBSYSTEM S_MDS
54 #include <linux/module.h>
56 * struct OBD_{ALLOC,FREE}*()
58 #include <obd_support.h>
59 /* struct ptlrpc_request */
60 #include <lustre_net.h>
61 /* struct obd_export */
62 #include <lustre_export.h>
63 /* struct obd_device */
66 #include <dt_object.h>
67 #include <lustre_mds.h>
68 #include <lustre_mdt.h>
69 #include "mdt_internal.h"
70 #ifdef HAVE_QUOTA_SUPPORT
71 # include <lustre_quota.h>
73 #include <lustre_acl.h>
74 #include <lustre_param.h>
75 #include <lustre_fsfilt.h>
77 mdl_mode_t mdt_mdl_lock_modes[] = {
78 [LCK_MINMODE] = MDL_MINMODE,
85 [LCK_GROUP] = MDL_GROUP
88 ldlm_mode_t mdt_dlm_lock_modes[] = {
89 [MDL_MINMODE] = LCK_MINMODE,
96 [MDL_GROUP] = LCK_GROUP
100 * Initialized in mdt_mod_init().
102 static unsigned long mdt_num_threads;
103 static unsigned long mdt_min_threads;
104 static unsigned long mdt_max_threads;
106 /* ptlrpc request handler for MDT. All handlers are
107 * grouped into several slices - struct mdt_opc_slice,
108 * and stored in an array - mdt_handlers[].
111 /* The name of this handler. */
113 /* Fail id for this handler, checked at the beginning of this handler*/
115 /* Operation code for this handler */
117 /* flags are listed in enum mdt_handler_flags below. */
119 /* The actual handler function to execute. */
120 int (*mh_act)(struct mdt_thread_info *info);
121 /* Request format for this request. */
122 const struct req_format *mh_fmt;
125 enum mdt_handler_flags {
127 * struct mdt_body is passed in the incoming message, and object
128 * identified by this fid exists on disk.
130 * "habeo corpus" == "I have a body"
132 HABEO_CORPUS = (1 << 0),
134 * struct ldlm_request is passed in the incoming message.
136 * "habeo clavis" == "I have a key"
138 HABEO_CLAVIS = (1 << 1),
140 * this request has fixed reply format, so that reply message can be
141 * packed by generic code.
143 * "habeo refero" == "I have a reply"
145 HABEO_REFERO = (1 << 2),
147 * this request will modify something, so check whether the filesystem
148 * is readonly or not, then return -EROFS to client asap if necessary.
150 * "mutabor" == "I shall modify"
155 struct mdt_opc_slice {
158 struct mdt_handler *mos_hs;
161 static struct mdt_opc_slice mdt_regular_handlers[];
162 static struct mdt_opc_slice mdt_readpage_handlers[];
163 static struct mdt_opc_slice mdt_xmds_handlers[];
164 static struct mdt_opc_slice mdt_seq_handlers[];
165 static struct mdt_opc_slice mdt_fld_handlers[];
167 static struct mdt_device *mdt_dev(struct lu_device *d);
168 static int mdt_regular_handle(struct ptlrpc_request *req);
169 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
170 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
171 struct getinfo_fid2path *fp);
173 static const struct lu_object_operations mdt_obj_ops;
175 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
179 return (rep->lock_policy_res1 & flag);
182 void mdt_clear_disposition(struct mdt_thread_info *info,
183 struct ldlm_reply *rep, int flag)
186 info->mti_opdata &= ~flag;
188 rep->lock_policy_res1 &= ~flag;
191 void mdt_set_disposition(struct mdt_thread_info *info,
192 struct ldlm_reply *rep, int flag)
195 info->mti_opdata |= flag;
197 rep->lock_policy_res1 |= flag;
200 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
202 lh->mlh_pdo_hash = 0;
203 lh->mlh_reg_mode = lm;
204 lh->mlh_type = MDT_REG_LOCK;
207 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
208 const char *name, int namelen)
210 lh->mlh_reg_mode = lm;
211 lh->mlh_type = MDT_PDO_LOCK;
214 LASSERT(namelen > 0);
215 lh->mlh_pdo_hash = full_name_hash(name, namelen);
217 LASSERT(namelen == 0);
218 lh->mlh_pdo_hash = 0ull;
222 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
223 struct mdt_lock_handle *lh)
229 * Any dir access needs couple of locks:
231 * 1) on part of dir we gonna take lookup/modify;
233 * 2) on whole dir to protect it from concurrent splitting and/or to
234 * flush client's cache for readdir().
236 * so, for a given mode and object this routine decides what lock mode
237 * to use for lock #2:
239 * 1) if caller's gonna lookup in dir then we need to protect dir from
240 * being splitted only - LCK_CR
242 * 2) if caller's gonna modify dir then we need to protect dir from
243 * being splitted and to flush cache - LCK_CW
245 * 3) if caller's gonna modify dir and that dir seems ready for
246 * splitting then we need to protect it from any type of access
247 * (lookup/modify/split) - LCK_EX --bzzz
250 LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
251 LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
254 * Ask underlaying level its opinion about preferable PDO lock mode
255 * having access type passed as regular lock mode:
257 * - MDL_MINMODE means that lower layer does not want to specify lock
260 * - MDL_NL means that no PDO lock should be taken. This is used in some
261 * cases. Say, for non-splittable directories no need to use PDO locks
264 mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
265 mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
267 if (mode != MDL_MINMODE) {
268 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
271 * Lower layer does not want to specify locking mode. We do it
272 * our selves. No special protection is needed, just flush
273 * client's cache on modification and allow concurrent
276 switch (lh->mlh_reg_mode) {
278 lh->mlh_pdo_mode = LCK_EX;
281 lh->mlh_pdo_mode = LCK_CR;
284 lh->mlh_pdo_mode = LCK_CW;
287 CERROR("Not expected lock type (0x%x)\n",
288 (int)lh->mlh_reg_mode);
293 LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
297 static int mdt_getstatus(struct mdt_thread_info *info)
299 struct mdt_device *mdt = info->mti_mdt;
300 struct md_device *next = mdt->mdt_child;
301 struct mdt_body *repbody;
306 rc = mdt_check_ucred(info);
308 RETURN(err_serious(rc));
310 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
311 RETURN(err_serious(-ENOMEM));
313 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
314 rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
318 repbody->valid |= OBD_MD_FLID;
320 if (mdt->mdt_opts.mo_mds_capa &&
321 info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
322 struct mdt_object *root;
323 struct lustre_capa *capa;
325 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
327 RETURN(PTR_ERR(root));
329 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
331 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
332 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
334 mdt_object_put(info->mti_env, root);
336 repbody->valid |= OBD_MD_FLMDSCAPA;
342 static int mdt_statfs(struct mdt_thread_info *info)
344 struct md_device *next = info->mti_mdt->mdt_child;
345 struct ptlrpc_service *svc;
346 struct obd_statfs *osfs;
351 svc = info->mti_pill->rc_req->rq_rqbd->rqbd_service;
353 /* This will trigger a watchdog timeout */
354 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
355 (MDT_SERVICE_WATCHDOG_FACTOR *
356 at_get(&svc->srv_at_estimate)) + 1);
358 rc = mdt_check_ucred(info);
360 RETURN(err_serious(rc));
362 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
363 rc = err_serious(-ENOMEM);
365 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
366 rc = next->md_ops->mdo_statfs(info->mti_env, next,
368 statfs_pack(osfs, &info->mti_u.ksfs);
374 * Pack SOM attributes into the reply.
375 * Call under a DLM UPDATE lock.
377 static void mdt_pack_size2body(struct mdt_thread_info *info,
378 struct mdt_object *mo)
381 struct md_attr *ma = &info->mti_attr;
383 LASSERT(ma->ma_attr.la_valid & LA_MODE);
384 b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
386 /* Check if Size-on-MDS is supported, if this is a regular file,
387 * if SOM is enabled on the object and if SOM cache exists and valid.
388 * Otherwise do not pack Size-on-MDS attributes to the reply. */
389 if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
390 !S_ISREG(ma->ma_attr.la_mode) ||
391 !mdt_object_is_som_enabled(mo) ||
392 !(ma->ma_valid & MA_SOM))
395 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
396 b->size = ma->ma_som->msd_size;
397 b->blocks = ma->ma_som->msd_blocks;
400 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
401 const struct lu_attr *attr, const struct lu_fid *fid)
403 /*XXX should pack the reply body according to lu_valid*/
404 b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID |
405 OBD_MD_FLGID | OBD_MD_FLTYPE |
406 OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
407 OBD_MD_FLATIME | OBD_MD_FLMTIME ;
409 if (!S_ISREG(attr->la_mode))
410 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
412 b->atime = attr->la_atime;
413 b->mtime = attr->la_mtime;
414 b->ctime = attr->la_ctime;
415 b->mode = attr->la_mode;
416 b->size = attr->la_size;
417 b->blocks = attr->la_blocks;
418 b->uid = attr->la_uid;
419 b->gid = attr->la_gid;
420 b->flags = attr->la_flags;
421 b->nlink = attr->la_nlink;
422 b->rdev = attr->la_rdev;
426 b->valid |= OBD_MD_FLID;
428 /* FIXME: these should be fixed when new igif ready.*/
429 b->ino = fid_oid(fid); /* 1.6 compatibility */
430 b->generation = fid_ver(fid); /* 1.6 compatibility */
431 b->valid |= OBD_MD_FLGENER; /* 1.6 compatibility */
433 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
434 PFID(fid), b->nlink, b->mode, b->size);
438 mdt_body_reverse_idmap(info, b);
441 static inline int mdt_body_has_lov(const struct lu_attr *la,
442 const struct mdt_body *body)
444 return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
445 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
448 static int mdt_getattr_internal(struct mdt_thread_info *info,
449 struct mdt_object *o, int ma_need)
451 struct md_object *next = mdt_object_child(o);
452 const struct mdt_body *reqbody = info->mti_body;
453 struct ptlrpc_request *req = mdt_info_req(info);
454 struct md_attr *ma = &info->mti_attr;
455 struct lu_attr *la = &ma->ma_attr;
456 struct req_capsule *pill = info->mti_pill;
457 const struct lu_env *env = info->mti_env;
458 struct mdt_body *repbody;
459 struct lu_buf *buffer = &info->mti_buf;
463 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
464 RETURN(err_serious(-ENOMEM));
466 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
470 rc = mdt_object_exists(o);
472 /* This object is located on remote node.*/
473 repbody->fid1 = *mdt_object_fid(o);
474 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
478 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
479 buffer->lb_len = req_capsule_get_size(pill, &RMF_MDT_MD, RCL_SERVER);
481 /* If it is dir object and client require MEA, then we got MEA */
482 if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
483 reqbody->valid & OBD_MD_MEA) {
484 /* Assumption: MDT_MD size is enough for lmv size. */
485 ma->ma_lmv = buffer->lb_buf;
486 ma->ma_lmv_size = buffer->lb_len;
487 ma->ma_need = MA_LMV | MA_INODE;
489 ma->ma_lmm = buffer->lb_buf;
490 ma->ma_lmm_size = buffer->lb_len;
491 ma->ma_need = MA_LOV | MA_INODE;
494 if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
495 reqbody->valid & OBD_MD_FLDIREA &&
496 lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
497 /* get default stripe info for this dir. */
498 ma->ma_need |= MA_LOV_DEF;
500 ma->ma_need |= ma_need;
501 if (ma->ma_need & MA_SOM)
502 ma->ma_som = &info->mti_u.som.data;
504 rc = mo_attr_get(env, next, ma);
506 CERROR("getattr error for "DFID": %d\n",
507 PFID(mdt_object_fid(o)), rc);
511 if (likely(ma->ma_valid & MA_INODE))
512 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
516 if (mdt_body_has_lov(la, reqbody)) {
517 if (ma->ma_valid & MA_LOV) {
518 LASSERT(ma->ma_lmm_size);
519 mdt_dump_lmm(D_INFO, ma->ma_lmm);
520 repbody->eadatasize = ma->ma_lmm_size;
521 if (S_ISDIR(la->la_mode))
522 repbody->valid |= OBD_MD_FLDIREA;
524 repbody->valid |= OBD_MD_FLEASIZE;
526 if (ma->ma_valid & MA_LMV) {
527 LASSERT(S_ISDIR(la->la_mode));
528 repbody->eadatasize = ma->ma_lmv_size;
529 repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
531 if (!(ma->ma_valid & MA_LOV) && !(ma->ma_valid & MA_LMV)) {
532 repbody->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
534 } else if (S_ISLNK(la->la_mode) &&
535 reqbody->valid & OBD_MD_LINKNAME) {
536 buffer->lb_buf = ma->ma_lmm;
537 buffer->lb_len = reqbody->eadatasize;
538 rc = mo_readlink(env, next, buffer);
539 if (unlikely(rc <= 0)) {
540 CERROR("readlink failed: %d\n", rc);
543 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
545 repbody->valid |= OBD_MD_LINKNAME;
546 repbody->eadatasize = rc;
548 ((char*)ma->ma_lmm)[rc - 1] = 0;
549 CDEBUG(D_INODE, "symlink dest %s, len = %d\n",
550 (char*)ma->ma_lmm, rc);
555 if (reqbody->valid & OBD_MD_FLMODEASIZE) {
556 repbody->max_cookiesize = info->mti_mdt->mdt_max_cookiesize;
557 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
558 repbody->valid |= OBD_MD_FLMODEASIZE;
559 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
560 "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
561 repbody->max_cookiesize);
564 if (exp_connect_rmtclient(info->mti_exp) &&
565 reqbody->valid & OBD_MD_FLRMTPERM) {
566 void *buf = req_capsule_server_get(pill, &RMF_ACL);
568 /* mdt_getattr_lock only */
569 rc = mdt_pack_remote_perm(info, o, buf);
571 repbody->valid &= ~OBD_MD_FLRMTPERM;
572 repbody->aclsize = 0;
575 repbody->valid |= OBD_MD_FLRMTPERM;
576 repbody->aclsize = sizeof(struct mdt_remote_perm);
579 #ifdef CONFIG_FS_POSIX_ACL
580 else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
581 (reqbody->valid & OBD_MD_FLACL)) {
582 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
583 buffer->lb_len = req_capsule_get_size(pill,
584 &RMF_ACL, RCL_SERVER);
585 if (buffer->lb_len > 0) {
586 rc = mo_xattr_get(env, next, buffer,
587 XATTR_NAME_ACL_ACCESS);
589 if (rc == -ENODATA) {
590 repbody->aclsize = 0;
591 repbody->valid |= OBD_MD_FLACL;
593 } else if (rc == -EOPNOTSUPP) {
596 CERROR("got acl size: %d\n", rc);
599 repbody->aclsize = rc;
600 repbody->valid |= OBD_MD_FLACL;
607 if (reqbody->valid & OBD_MD_FLMDSCAPA &&
608 info->mti_mdt->mdt_opts.mo_mds_capa &&
609 info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
610 struct lustre_capa *capa;
612 capa = req_capsule_server_get(pill, &RMF_CAPA1);
614 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
615 rc = mo_capa_get(env, next, capa, 0);
618 repbody->valid |= OBD_MD_FLMDSCAPA;
623 static int mdt_renew_capa(struct mdt_thread_info *info)
625 struct mdt_object *obj = info->mti_object;
626 struct mdt_body *body;
627 struct lustre_capa *capa, *c;
631 /* if object doesn't exist, or server has disabled capability,
632 * return directly, client will find body->valid OBD_MD_FLOSSCAPA
635 if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
636 !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
639 body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
640 LASSERT(body != NULL);
642 c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
645 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
649 rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
651 body->valid |= OBD_MD_FLOSSCAPA;
655 static int mdt_getattr(struct mdt_thread_info *info)
657 struct mdt_object *obj = info->mti_object;
658 struct req_capsule *pill = info->mti_pill;
659 struct mdt_body *reqbody;
660 struct mdt_body *repbody;
666 reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
669 if (reqbody->valid & OBD_MD_FLOSSCAPA) {
670 rc = req_capsule_server_pack(pill);
672 RETURN(err_serious(rc));
673 rc = mdt_renew_capa(info);
674 GOTO(out_shrink, rc);
677 LASSERT(obj != NULL);
678 LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
680 mode = lu_object_attr(&obj->mot_obj.mo_lu);
681 if (S_ISLNK(mode) && (reqbody->valid & OBD_MD_LINKNAME) &&
682 (reqbody->eadatasize > info->mti_mdt->mdt_max_mdsize))
683 md_size = reqbody->eadatasize;
685 md_size = info->mti_mdt->mdt_max_mdsize;
687 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, md_size);
689 rc = req_capsule_server_pack(pill);
690 if (unlikely(rc != 0))
691 RETURN(err_serious(rc));
693 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
694 LASSERT(repbody != NULL);
695 repbody->eadatasize = 0;
696 repbody->aclsize = 0;
698 if (reqbody->valid & OBD_MD_FLRMTPERM)
699 rc = mdt_init_ucred(info, reqbody);
701 rc = mdt_check_ucred(info);
703 GOTO(out_shrink, rc);
705 info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
706 info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
709 * Don't check capability at all, because rename might getattr for
710 * remote obj, and at that time no capability is available.
712 mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
713 rc = mdt_getattr_internal(info, obj, 0);
714 if (reqbody->valid & OBD_MD_FLRMTPERM)
715 mdt_exit_ucred(info);
718 mdt_shrink_reply(info);
722 static int mdt_is_subdir(struct mdt_thread_info *info)
724 struct mdt_object *o = info->mti_object;
725 struct req_capsule *pill = info->mti_pill;
726 const struct mdt_body *body = info->mti_body;
727 struct mdt_body *repbody;
733 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
736 * We save last checked parent fid to @repbody->fid1 for remote
739 LASSERT(fid_is_sane(&body->fid2));
740 LASSERT(mdt_object_exists(o) > 0);
741 rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
742 &body->fid2, &repbody->fid1);
743 if (rc == 0 || rc == -EREMOTE)
744 repbody->valid |= OBD_MD_FLID;
749 static int mdt_raw_lookup(struct mdt_thread_info *info,
750 struct mdt_object *parent,
751 const struct lu_name *lname,
752 struct ldlm_reply *ldlm_rep)
754 struct md_object *next = mdt_object_child(info->mti_object);
755 const struct mdt_body *reqbody = info->mti_body;
756 struct lu_fid *child_fid = &info->mti_tmp_fid1;
757 struct mdt_body *repbody;
761 if (reqbody->valid != OBD_MD_FLID)
764 LASSERT(!info->mti_cross_ref);
766 /* Only got the fid of this obj by name */
767 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
770 /* XXX is raw_lookup possible as intent operation? */
773 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
776 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
778 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
781 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
782 repbody->fid1 = *child_fid;
783 repbody->valid = OBD_MD_FLID;
789 * UPDATE lock should be taken against parent, and be release before exit;
790 * child_bits lock should be taken against child, and be returned back:
791 * (1)normal request should release the child lock;
792 * (2)intent request will grant the lock to client.
794 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
795 struct mdt_lock_handle *lhc,
797 struct ldlm_reply *ldlm_rep)
799 struct ptlrpc_request *req = mdt_info_req(info);
800 struct mdt_body *reqbody = NULL;
801 struct mdt_object *parent = info->mti_object;
802 struct mdt_object *child;
803 struct md_object *next = mdt_object_child(parent);
804 struct lu_fid *child_fid = &info->mti_tmp_fid1;
805 struct lu_name *lname = NULL;
806 const char *name = NULL;
808 struct mdt_lock_handle *lhp;
809 struct ldlm_lock *lock;
810 struct ldlm_res_id *res_id;
817 is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
818 LASSERT(ergo(is_resent,
819 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
821 LASSERT(parent != NULL);
822 name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
824 RETURN(err_serious(-EFAULT));
826 namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
828 if (!info->mti_cross_ref) {
830 * XXX: Check for "namelen == 0" is for getattr by fid
831 * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
832 * that is the name must contain at least one character and
833 * the terminating '\0'
836 reqbody = req_capsule_client_get(info->mti_pill,
838 LASSERT(fid_is_sane(&reqbody->fid2));
841 CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
843 PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2),
846 lname = mdt_name(info->mti_env, (char *)name, namelen);
847 CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
848 "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
852 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
854 rc = mdt_object_exists(parent);
855 if (unlikely(rc == 0)) {
856 LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
857 &parent->mot_obj.mo_lu,
858 "Parent doesn't exist!\n");
860 } else if (!info->mti_cross_ref) {
861 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
862 PFID(mdt_object_fid(parent)));
865 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
873 if (info->mti_cross_ref) {
874 /* Only getattr on the child. Parent is on another node. */
875 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
877 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
878 "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
881 /* Do not take lock for resent case. */
882 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
883 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
884 lhc->mlh_reg_lh.cookie);
885 LASSERT(fid_res_name_eq(mdt_object_fid(child),
886 &lock->l_resource->lr_name));
890 mdt_lock_handle_init(lhc);
891 mdt_lock_reg_init(lhc, LCK_PR);
894 * Object's name is on another MDS, no lookup lock is
895 * needed here but update is.
897 child_bits &= ~MDS_INODELOCK_LOOKUP;
898 child_bits |= MDS_INODELOCK_UPDATE;
900 rc = mdt_object_lock(info, child, lhc, child_bits,
904 /* Finally, we can get attr for child. */
905 mdt_set_capainfo(info, 0, mdt_object_fid(child),
907 rc = mdt_getattr_internal(info, child, 0);
908 if (unlikely(rc != 0))
909 mdt_object_unlock(info, child, lhc, 1);
914 /* step 1: lock parent */
915 lhp = &info->mti_lh[MDT_LH_PARENT];
916 mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
917 rc = mdt_object_lock(info, parent, lhp, MDS_INODELOCK_UPDATE,
920 if (unlikely(rc != 0))
924 /* step 2: lookup child's fid by name */
925 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
930 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
931 GOTO(out_parent, rc);
933 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
935 *child_fid = reqbody->fid2;
936 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
940 *step 3: find the child object by fid & lock it.
941 * regardless if it is local or remote.
943 child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
945 if (unlikely(IS_ERR(child)))
946 GOTO(out_parent, rc = PTR_ERR(child));
948 /* Do not take lock for resent case. */
949 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
950 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
951 lhc->mlh_reg_lh.cookie);
953 res_id = &lock->l_resource->lr_name;
954 if (!fid_res_name_eq(mdt_object_fid(child),
955 &lock->l_resource->lr_name)) {
956 LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
957 &lock->l_resource->lr_name),
958 "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
959 (unsigned long)res_id->name[0],
960 (unsigned long)res_id->name[1],
961 (unsigned long)res_id->name[2],
962 PFID(mdt_object_fid(parent)));
963 CWARN("Although resent, but still not get child lock"
964 "parent:"DFID" child:"DFID"\n",
965 PFID(mdt_object_fid(parent)),
966 PFID(mdt_object_fid(child)));
967 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
976 ma = &info->mti_attr;
978 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
979 mdt_lock_handle_init(lhc);
980 mdt_lock_reg_init(lhc, LCK_PR);
982 if (mdt_object_exists(child) == 0) {
983 LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
984 &child->mot_obj.mo_lu,
985 "Object doesn't exist!\n");
986 GOTO(out_child, rc = -ESTALE);
990 ma->ma_need = MA_INODE;
991 rc = mo_attr_get(info->mti_env, next, ma);
992 if (unlikely(rc != 0))
995 /* If the file has not been changed for some time, we return
996 * not only a LOOKUP lock, but also an UPDATE lock and this
997 * might save us RPC on later STAT. For directories, it also
998 * let negative dentry starts working for this dir. */
999 if (ma->ma_valid & MA_INODE &&
1000 ma->ma_attr.la_valid & LA_CTIME &&
1001 info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1002 ma->ma_attr.la_ctime < cfs_time_current_sec())
1003 child_bits |= MDS_INODELOCK_UPDATE;
1005 rc = mdt_object_lock(info, child, lhc, child_bits,
1008 if (unlikely(rc != 0))
1009 GOTO(out_child, rc);
1012 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1013 /* Get MA_SOM attributes if update lock is given. */
1015 lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1016 S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1019 /* finally, we can get attr for child. */
1020 mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1021 rc = mdt_getattr_internal(info, child, ma_need);
1022 if (unlikely(rc != 0)) {
1023 mdt_object_unlock(info, child, lhc, 1);
1025 /* Debugging code. */
1026 res_id = &lock->l_resource->lr_name;
1027 LDLM_DEBUG(lock, "Returning lock to client");
1028 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1029 &lock->l_resource->lr_name),
1030 "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1031 (unsigned long)res_id->name[0],
1032 (unsigned long)res_id->name[1],
1033 (unsigned long)res_id->name[2],
1034 PFID(mdt_object_fid(child)));
1035 mdt_pack_size2body(info, child);
1038 LDLM_LOCK_PUT(lock);
1042 mdt_object_put(info->mti_env, child);
1044 mdt_object_unlock(info, parent, lhp, 1);
1048 /* normal handler: should release the child lock */
1049 static int mdt_getattr_name(struct mdt_thread_info *info)
1051 struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1052 struct mdt_body *reqbody;
1053 struct mdt_body *repbody;
1057 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1058 LASSERT(reqbody != NULL);
1059 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1060 LASSERT(repbody != NULL);
1062 info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1063 info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1064 repbody->eadatasize = 0;
1065 repbody->aclsize = 0;
1067 rc = mdt_init_ucred(info, reqbody);
1069 GOTO(out_shrink, rc);
1071 rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1072 if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1073 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1074 lhc->mlh_reg_lh.cookie = 0;
1076 mdt_exit_ucred(info);
1079 mdt_shrink_reply(info);
1083 static const struct lu_device_operations mdt_lu_ops;
1085 static int lu_device_is_mdt(struct lu_device *d)
1087 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1090 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1091 void *karg, void *uarg);
1093 static int mdt_set_info(struct mdt_thread_info *info)
1095 struct ptlrpc_request *req = mdt_info_req(info);
1098 int keylen, vallen, rc = 0;
1101 rc = req_capsule_server_pack(info->mti_pill);
1105 key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1107 DEBUG_REQ(D_HA, req, "no set_info key");
1111 keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1114 val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1116 DEBUG_REQ(D_HA, req, "no set_info val");
1120 vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1123 /* Swab any part of val you need to here */
1124 if (KEY_IS(KEY_READ_ONLY)) {
1126 lustre_msg_set_status(req->rq_repmsg, 0);
1128 cfs_spin_lock(&req->rq_export->exp_lock);
1130 req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1132 req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
1133 cfs_spin_unlock(&req->rq_export->exp_lock);
1135 } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1136 struct changelog_setinfo *cs =
1137 (struct changelog_setinfo *)val;
1138 if (vallen != sizeof(*cs)) {
1139 CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1142 if (ptlrpc_req_need_swab(req)) {
1143 __swab64s(&cs->cs_recno);
1144 __swab32s(&cs->cs_id);
1147 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1149 lustre_msg_set_status(req->rq_repmsg, rc);
1157 static int mdt_connect(struct mdt_thread_info *info)
1160 struct ptlrpc_request *req;
1162 req = mdt_info_req(info);
1163 rc = target_handle_connect(req);
1165 LASSERT(req->rq_export != NULL);
1166 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1167 rc = mdt_init_sec_level(info);
1169 rc = mdt_init_idmap(info);
1171 obd_disconnect(class_export_get(req->rq_export));
1173 rc = err_serious(rc);
1178 static int mdt_disconnect(struct mdt_thread_info *info)
1183 rc = target_handle_disconnect(mdt_info_req(info));
1185 rc = err_serious(rc);
1189 static int mdt_sendpage(struct mdt_thread_info *info,
1190 struct lu_rdpg *rdpg)
1192 struct ptlrpc_request *req = mdt_info_req(info);
1193 struct obd_export *exp = req->rq_export;
1194 struct ptlrpc_bulk_desc *desc;
1195 struct l_wait_info *lwi = &info->mti_u.rdpg.mti_wait_info;
1203 desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1208 for (i = 0, tmpcount = rdpg->rp_count;
1209 i < rdpg->rp_npages; i++, tmpcount -= tmpsize) {
1210 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1211 ptlrpc_prep_bulk_page(desc, rdpg->rp_pages[i], 0, tmpsize);
1214 LASSERT(desc->bd_nob == rdpg->rp_count);
1215 rc = sptlrpc_svc_wrap_bulk(req, desc);
1217 GOTO(free_desc, rc);
1219 rc = ptlrpc_start_bulk_transfer(desc);
1221 GOTO(free_desc, rc);
1223 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1224 GOTO(abort_bulk, rc = 0);
1226 timeout = (int) req->rq_deadline - cfs_time_current_sec();
1228 CERROR("Req deadline already passed %lu (now: %lu)\n",
1229 req->rq_deadline, cfs_time_current_sec());
1230 *lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(max(timeout, 1)),
1231 cfs_time_seconds(1), NULL, NULL);
1232 rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc) ||
1233 exp->exp_failed || exp->exp_abort_active_req, lwi);
1234 LASSERT (rc == 0 || rc == -ETIMEDOUT);
1237 if (desc->bd_success &&
1238 desc->bd_nob_transferred == rdpg->rp_count)
1239 GOTO(free_desc, rc);
1242 if (exp->exp_abort_active_req || exp->exp_failed)
1243 GOTO(abort_bulk, rc);
1246 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s",
1247 (rc == -ETIMEDOUT) ? "timeout" : "network error",
1248 desc->bd_nob_transferred, rdpg->rp_count,
1249 exp->exp_client_uuid.uuid,
1250 exp->exp_connection->c_remote_uuid.uuid);
1252 class_fail_export(exp);
1256 ptlrpc_abort_bulk(desc);
1258 ptlrpc_free_bulk(desc);
1262 #ifdef HAVE_SPLIT_SUPPORT
1264 * Retrieve dir entry from the page and insert it to the slave object, actually,
1265 * this should be in osd layer, but since it will not in the final product, so
1266 * just do it here and do not define more moo api anymore for this.
1268 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1271 struct mdt_object *object = info->mti_object;
1272 struct lu_fid *lf = &info->mti_tmp_fid2;
1273 struct md_attr *ma = &info->mti_attr;
1274 struct lu_dirpage *dp;
1275 struct lu_dirent *ent;
1276 int rc = 0, offset = 0;
1279 /* Make sure we have at least one entry. */
1284 * Disable trans for this name insert, since it will include many trans
1287 info->mti_no_need_trans = 1;
1289 * When write_dir_page, no need update parent's ctime,
1290 * and no permission check for name_insert.
1292 ma->ma_attr.la_ctime = 0;
1293 ma->ma_attr.la_valid = LA_MODE;
1294 ma->ma_valid = MA_INODE;
1297 dp = page_address(page);
1298 offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1300 for (ent = lu_dirent_start(dp); ent != NULL;
1301 ent = lu_dirent_next(ent)) {
1302 struct lu_name *lname;
1305 if (le16_to_cpu(ent->lde_namelen) == 0)
1308 fid_le_to_cpu(lf, &ent->lde_fid);
1309 if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1310 ma->ma_attr.la_mode = S_IFDIR;
1312 ma->ma_attr.la_mode = 0;
1313 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1315 GOTO(out, rc = -ENOMEM);
1317 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1318 lname = mdt_name(info->mti_env, name,
1319 le16_to_cpu(ent->lde_namelen));
1320 ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE);
1321 rc = mdo_name_insert(info->mti_env,
1322 md_object_next(&object->mot_obj),
1324 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1326 CERROR("Can't insert %*.*s, rc %d\n",
1327 le16_to_cpu(ent->lde_namelen),
1328 le16_to_cpu(ent->lde_namelen),
1333 offset += lu_dirent_size(ent);
1343 static int mdt_bulk_timeout(void *data)
1347 CERROR("mdt bulk transfer timeout \n");
1352 static int mdt_writepage(struct mdt_thread_info *info)
1354 struct ptlrpc_request *req = mdt_info_req(info);
1355 struct mdt_body *reqbody;
1356 struct l_wait_info *lwi;
1357 struct ptlrpc_bulk_desc *desc;
1363 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1364 if (reqbody == NULL)
1365 RETURN(err_serious(-EFAULT));
1367 desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1369 RETURN(err_serious(-ENOMEM));
1371 /* allocate the page for the desc */
1372 page = cfs_alloc_page(CFS_ALLOC_STD);
1374 GOTO(desc_cleanup, rc = -ENOMEM);
1376 CDEBUG(D_INFO, "Received page offset %d size %d \n",
1377 (int)reqbody->size, (int)reqbody->nlink);
1379 ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1380 (int)reqbody->nlink);
1382 rc = sptlrpc_svc_prep_bulk(req, desc);
1384 GOTO(cleanup_page, rc);
1386 * Check if client was evicted while we were doing i/o before touching
1391 GOTO(cleanup_page, rc = -ENOMEM);
1393 if (desc->bd_export->exp_failed)
1396 rc = ptlrpc_start_bulk_transfer (desc);
1398 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ,
1399 mdt_bulk_timeout, desc);
1400 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1401 desc->bd_export->exp_failed, lwi);
1402 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1403 if (rc == -ETIMEDOUT) {
1404 DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1405 ptlrpc_abort_bulk(desc);
1406 } else if (desc->bd_export->exp_failed) {
1407 DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1409 ptlrpc_abort_bulk(desc);
1410 } else if (!desc->bd_success ||
1411 desc->bd_nob_transferred != desc->bd_nob) {
1412 DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1414 "truncated" : "network error on",
1415 desc->bd_nob_transferred, desc->bd_nob);
1416 /* XXX should this be a different errno? */
1420 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1423 GOTO(cleanup_lwi, rc);
1424 rc = mdt_write_dir_page(info, page, reqbody->nlink);
1429 cfs_free_page(page);
1431 ptlrpc_free_bulk(desc);
1436 static int mdt_readpage(struct mdt_thread_info *info)
1438 struct mdt_object *object = info->mti_object;
1439 struct lu_rdpg *rdpg = &info->mti_u.rdpg.mti_rdpg;
1440 struct mdt_body *reqbody;
1441 struct mdt_body *repbody;
1446 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1447 RETURN(err_serious(-ENOMEM));
1449 reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1450 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1451 if (reqbody == NULL || repbody == NULL)
1452 RETURN(err_serious(-EFAULT));
1455 * prepare @rdpg before calling lower layers and transfer itself. Here
1456 * reqbody->size contains offset of where to start to read and
1457 * reqbody->nlink contains number bytes to read.
1459 rdpg->rp_hash = reqbody->size;
1460 if (rdpg->rp_hash != reqbody->size) {
1461 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1462 rdpg->rp_hash, reqbody->size);
1466 rdpg->rp_attrs = reqbody->mode;
1467 rdpg->rp_count = reqbody->nlink;
1468 rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1)>>CFS_PAGE_SHIFT;
1469 OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1470 if (rdpg->rp_pages == NULL)
1473 for (i = 0; i < rdpg->rp_npages; ++i) {
1474 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1475 if (rdpg->rp_pages[i] == NULL)
1476 GOTO(free_rdpg, rc = -ENOMEM);
1479 /* call lower layers to fill allocated pages with directory data */
1480 rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1482 GOTO(free_rdpg, rc);
1484 /* send pages to client */
1485 rc = mdt_sendpage(info, rdpg);
1490 for (i = 0; i < rdpg->rp_npages; i++)
1491 if (rdpg->rp_pages[i] != NULL)
1492 cfs_free_page(rdpg->rp_pages[i]);
1493 OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1495 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1501 static int mdt_reint_internal(struct mdt_thread_info *info,
1502 struct mdt_lock_handle *lhc,
1505 struct req_capsule *pill = info->mti_pill;
1506 struct mdt_device *mdt = info->mti_mdt;
1507 struct md_quota *mq = md_quota(info->mti_env);
1508 struct mdt_body *repbody;
1513 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1514 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1515 mdt->mdt_max_mdsize);
1516 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1517 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1518 mdt->mdt_max_cookiesize);
1520 rc = req_capsule_server_pack(pill);
1522 CERROR("Can't pack response, rc %d\n", rc);
1523 RETURN(err_serious(rc));
1526 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1527 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1529 repbody->eadatasize = 0;
1530 repbody->aclsize = 0;
1533 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK))
1534 GOTO(out_shrink, rc = err_serious(-EFAULT));
1536 rc = mdt_reint_unpack(info, op);
1538 CERROR("Can't unpack reint, rc %d\n", rc);
1539 GOTO(out_shrink, rc = err_serious(rc));
1542 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1544 /* for replay no cookkie / lmm need, because client have this already */
1545 if (info->mti_spec.no_create == 1) {
1546 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1547 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1549 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1550 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1554 rc = mdt_init_ucred_reint(info);
1556 GOTO(out_shrink, rc);
1558 rc = mdt_fix_attr_ucred(info, op);
1560 GOTO(out_ucred, rc = err_serious(rc));
1562 if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1563 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1564 GOTO(out_ucred, rc);
1566 mq->mq_exp = info->mti_exp;
1567 rc = mdt_reint_rec(info, lhc);
1570 mdt_exit_ucred(info);
1572 mdt_shrink_reply(info);
1576 static long mdt_reint_opcode(struct mdt_thread_info *info,
1577 const struct req_format **fmt)
1579 struct mdt_rec_reint *rec;
1582 opc = err_serious(-EFAULT);
1583 rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1585 opc = rec->rr_opcode;
1586 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1587 if (opc < REINT_MAX && fmt[opc] != NULL)
1588 req_capsule_extend(info->mti_pill, fmt[opc]);
1590 CERROR("Unsupported opc: %ld\n", opc);
1591 opc = err_serious(opc);
1597 static int mdt_reint(struct mdt_thread_info *info)
1602 static const struct req_format *reint_fmts[REINT_MAX] = {
1603 [REINT_SETATTR] = &RQF_MDS_REINT_SETATTR,
1604 [REINT_CREATE] = &RQF_MDS_REINT_CREATE,
1605 [REINT_LINK] = &RQF_MDS_REINT_LINK,
1606 [REINT_UNLINK] = &RQF_MDS_REINT_UNLINK,
1607 [REINT_RENAME] = &RQF_MDS_REINT_RENAME,
1608 [REINT_OPEN] = &RQF_MDS_REINT_OPEN,
1609 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1614 opc = mdt_reint_opcode(info, reint_fmts);
1617 * No lock possible here from client to pass it to reint code
1620 rc = mdt_reint_internal(info, NULL, opc);
1625 info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1629 /* this should sync the whole device */
1630 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1632 struct dt_device *dt = mdt->mdt_bottom;
1636 rc = dt->dd_ops->dt_sync(env, dt);
1640 /* this should sync this object */
1641 static int mdt_object_sync(struct mdt_thread_info *info)
1643 struct md_object *next;
1647 if (!mdt_object_exists(info->mti_object)) {
1648 CWARN("Non existing object "DFID"!\n",
1649 PFID(mdt_object_fid(info->mti_object)));
1652 next = mdt_object_child(info->mti_object);
1653 rc = mo_object_sync(info->mti_env, next);
1658 static int mdt_sync(struct mdt_thread_info *info)
1660 struct req_capsule *pill = info->mti_pill;
1661 struct mdt_body *body;
1665 /* The fid may be zero, so we req_capsule_set manually */
1666 req_capsule_set(pill, &RQF_MDS_SYNC);
1668 body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1670 RETURN(err_serious(-EINVAL));
1672 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1673 RETURN(err_serious(-ENOMEM));
1675 if (fid_seq(&body->fid1) == 0) {
1676 /* sync the whole device */
1677 rc = req_capsule_server_pack(pill);
1679 rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1681 rc = err_serious(rc);
1683 /* sync an object */
1684 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1686 rc = mdt_object_sync(info);
1688 struct md_object *next;
1689 const struct lu_fid *fid;
1690 struct lu_attr *la = &info->mti_attr.ma_attr;
1692 next = mdt_object_child(info->mti_object);
1693 info->mti_attr.ma_need = MA_INODE;
1694 info->mti_attr.ma_valid = 0;
1695 rc = mo_attr_get(info->mti_env, next,
1698 body = req_capsule_server_get(pill,
1700 fid = mdt_object_fid(info->mti_object);
1701 mdt_pack_attr2body(info, body, la, fid);
1705 rc = err_serious(rc);
1710 #ifdef HAVE_QUOTA_SUPPORT
1711 static int mdt_quotacheck_handle(struct mdt_thread_info *info)
1713 struct obd_quotactl *oqctl;
1714 struct req_capsule *pill = info->mti_pill;
1715 struct obd_export *exp = info->mti_exp;
1716 struct md_quota *mq = md_quota(info->mti_env);
1717 struct md_device *next = info->mti_mdt->mdt_child;
1721 oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1725 /* remote client has no permission for quotacheck */
1726 if (unlikely(exp_connect_rmtclient(exp)))
1729 rc = req_capsule_server_pack(pill);
1734 rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next,
1739 static int mdt_quotactl_handle(struct mdt_thread_info *info)
1741 struct obd_quotactl *oqctl, *repoqc;
1742 struct req_capsule *pill = info->mti_pill;
1743 struct obd_export *exp = info->mti_exp;
1744 struct md_quota *mq = md_quota(info->mti_env);
1745 struct md_device *next = info->mti_mdt->mdt_child;
1746 const struct md_quota_operations *mqo = &next->md_ops->mdo_quota;
1750 oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1755 if (exp_connect_rmtclient(exp)) {
1756 struct ptlrpc_request *req = mdt_info_req(info);
1757 struct mdt_export_data *med = mdt_req2med(req);
1758 struct lustre_idmap_table *idmap = med->med_idmap;
1760 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1761 oqctl->qc_cmd != Q_GETINFO))
1765 if (oqctl->qc_type == USRQUOTA)
1766 id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1768 else if (oqctl->qc_type == GRPQUOTA)
1769 id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1774 if (id == CFS_IDMAP_NOTFOUND) {
1775 CDEBUG(D_QUOTA, "no mapping for id %u\n",
1781 rc = req_capsule_server_pack(pill);
1785 repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
1786 LASSERT(repoqc != NULL);
1789 switch (oqctl->qc_cmd) {
1791 rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type);
1794 rc = mqo->mqo_off(info->mti_env, next, oqctl->qc_type);
1797 rc = mqo->mqo_setinfo(info->mti_env, next, oqctl->qc_type, id,
1801 rc = mqo->mqo_getinfo(info->mti_env, next, oqctl->qc_type, id,
1805 rc = mqo->mqo_setquota(info->mti_env, next, oqctl->qc_type, id,
1809 rc = mqo->mqo_getquota(info->mti_env, next, oqctl->qc_type, id,
1813 rc = mqo->mqo_getoinfo(info->mti_env, next, oqctl->qc_type, id,
1817 rc = mqo->mqo_getoquota(info->mti_env, next, oqctl->qc_type, id,
1820 case LUSTRE_Q_INVALIDATE:
1821 rc = mqo->mqo_invalidate(info->mti_env, next, oqctl->qc_type);
1823 case LUSTRE_Q_FINVALIDATE:
1824 rc = mqo->mqo_finvalidate(info->mti_env, next, oqctl->qc_type);
1827 CERROR("unsupported mdt_quotactl command: %d\n",
1839 * OBD PING and other handlers.
1841 static int mdt_obd_ping(struct mdt_thread_info *info)
1846 req_capsule_set(info->mti_pill, &RQF_OBD_PING);
1848 rc = target_handle_ping(mdt_info_req(info));
1850 rc = err_serious(rc);
1854 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
1856 return err_serious(-EOPNOTSUPP);
1859 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
1861 return err_serious(-EOPNOTSUPP);
1869 /** clone llog ctxt from child (mdd)
1870 * This allows remote llog (replicator) access.
1871 * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
1872 * context was originally set up, or we can handle them directly.
1873 * I choose the latter, but that means I need any llog
1874 * contexts set up by child to be accessable by the mdt. So we clone the
1875 * context into our context list here.
1877 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
1880 struct md_device *next = mdt->mdt_child;
1881 struct llog_ctxt *ctxt;
1884 if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
1887 rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
1888 if (rc || ctxt == NULL) {
1889 CERROR("Can't get mdd ctxt %d\n", rc);
1893 rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
1895 CERROR("Can't set mdt ctxt %d\n", rc);
1900 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
1901 struct mdt_device *mdt, int idx)
1903 struct llog_ctxt *ctxt;
1905 ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
1908 /* Put once for the get we just did, and once for the clone */
1909 llog_ctxt_put(ctxt);
1910 llog_ctxt_put(ctxt);
1914 static int mdt_llog_create(struct mdt_thread_info *info)
1918 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
1919 rc = llog_origin_handle_create(mdt_info_req(info));
1920 return (rc < 0 ? err_serious(rc) : rc);
1923 static int mdt_llog_destroy(struct mdt_thread_info *info)
1927 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
1928 rc = llog_origin_handle_destroy(mdt_info_req(info));
1929 return (rc < 0 ? err_serious(rc) : rc);
1932 static int mdt_llog_read_header(struct mdt_thread_info *info)
1936 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
1937 rc = llog_origin_handle_read_header(mdt_info_req(info));
1938 return (rc < 0 ? err_serious(rc) : rc);
1941 static int mdt_llog_next_block(struct mdt_thread_info *info)
1945 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
1946 rc = llog_origin_handle_next_block(mdt_info_req(info));
1947 return (rc < 0 ? err_serious(rc) : rc);
1950 static int mdt_llog_prev_block(struct mdt_thread_info *info)
1954 req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
1955 rc = llog_origin_handle_prev_block(mdt_info_req(info));
1956 return (rc < 0 ? err_serious(rc) : rc);
1963 static struct ldlm_callback_suite cbs = {
1964 .lcs_completion = ldlm_server_completion_ast,
1965 .lcs_blocking = ldlm_server_blocking_ast,
1969 static int mdt_enqueue(struct mdt_thread_info *info)
1971 struct ptlrpc_request *req;
1976 * info->mti_dlm_req already contains swapped and (if necessary)
1977 * converted dlm request.
1979 LASSERT(info->mti_dlm_req != NULL);
1981 req = mdt_info_req(info);
1984 * Lock without inodebits makes no sense and will oops later in
1985 * ldlm. Let's check it now to see if we have wrong lock from client or
1986 * bits get corrupted somewhere in mdt_intent_policy().
1988 req_bits = info->mti_dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
1989 /* This is disabled because we need to support liblustre flock.
1990 * LASSERT(req_bits != 0);
1993 rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
1994 req, info->mti_dlm_req, &cbs);
1995 info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
1996 return rc ? err_serious(rc) : req->rq_status;
1999 static int mdt_convert(struct mdt_thread_info *info)
2002 struct ptlrpc_request *req;
2004 LASSERT(info->mti_dlm_req);
2005 req = mdt_info_req(info);
2006 rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2007 return rc ? err_serious(rc) : req->rq_status;
2010 static int mdt_bl_callback(struct mdt_thread_info *info)
2012 CERROR("bl callbacks should not happen on MDS\n");
2014 return err_serious(-EOPNOTSUPP);
2017 static int mdt_cp_callback(struct mdt_thread_info *info)
2019 CERROR("cp callbacks should not happen on MDS\n");
2021 return err_serious(-EOPNOTSUPP);
2025 * sec context handlers
2027 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2031 rc = mdt_handle_idmap(info);
2034 struct ptlrpc_request *req = mdt_info_req(info);
2037 opc = lustre_msg_get_opc(req->rq_reqmsg);
2038 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2039 sptlrpc_svc_ctx_invalidate(req);
2042 OBD_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, obd_fail_val);
2047 static struct mdt_object *mdt_obj(struct lu_object *o)
2049 LASSERT(lu_device_is_mdt(o->lo_dev));
2050 return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2053 struct mdt_object *mdt_object_find(const struct lu_env *env,
2054 struct mdt_device *d,
2055 const struct lu_fid *f)
2057 struct lu_object *o;
2058 struct mdt_object *m;
2061 CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2062 o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2063 if (unlikely(IS_ERR(o)))
2064 m = (struct mdt_object *)o;
2071 * Asyncronous commit for mdt device.
2073 * Pass asynchonous commit call down the MDS stack.
2075 * \param env environment
2076 * \param mdt the mdt device
2078 static void mdt_device_commit_async(const struct lu_env *env,
2079 struct mdt_device *mdt)
2081 struct dt_device *dt = mdt->mdt_bottom;
2084 rc = dt->dd_ops->dt_commit_async(env, dt);
2085 if (unlikely(rc != 0))
2086 CWARN("async commit start failed with rc = %d", rc);
2090 * Mark the lock as "synchonous".
2092 * Mark the lock to deffer transaction commit to the unlock time.
2094 * \param lock the lock to mark as "synchonous"
2096 * \see mdt_is_lock_sync
2097 * \see mdt_save_lock
2099 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2101 lock->l_ast_data = (void*)1;
2105 * Check whehter the lock "synchonous" or not.
2107 * \param lock the lock to check
2108 * \retval 1 the lock is "synchonous"
2109 * \retval 0 the lock isn't "synchronous"
2111 * \see mdt_set_lock_sync
2112 * \see mdt_save_lock
2114 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2116 return lock->l_ast_data != NULL;
2120 * Blocking AST for mdt locks.
2122 * Starts transaction commit if in case of COS lock conflict or
2123 * deffers such a commit to the mdt_save_lock.
2125 * \param lock the lock which blocks a request or cancelling lock
2126 * \param desc unused
2127 * \param data unused
2128 * \param flag indicates whether this cancelling or blocking callback
2130 * \see ldlm_blocking_ast_nocheck
2132 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2133 void *data, int flag)
2135 struct obd_device *obd = lock->l_resource->lr_namespace->ns_obd;
2136 struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2140 if (flag == LDLM_CB_CANCELING)
2142 lock_res_and_lock(lock);
2143 if (lock->l_blocking_ast != mdt_blocking_ast) {
2144 unlock_res_and_lock(lock);
2147 if (mdt_cos_is_enabled(mdt) &&
2148 lock->l_req_mode & (LCK_PW | LCK_EX) &&
2149 lock->l_blocking_lock != NULL &&
2150 lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2151 mdt_set_lock_sync(lock);
2153 rc = ldlm_blocking_ast_nocheck(lock);
2155 /* There is no lock conflict if l_blocking_lock == NULL,
2156 * it indicates a blocking ast sent from ldlm_lock_decref_internal
2157 * when the last reference to a local lock was released */
2158 if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2161 rc = lu_env_init(&env, LCT_MD_THREAD);
2162 if (unlikely(rc != 0))
2163 CWARN("lu_env initialization failed with rc = %d,"
2164 "cannot start asynchronous commit\n", rc);
2166 mdt_device_commit_async(&env, mdt);
2172 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2173 struct mdt_lock_handle *lh, __u64 ibits, int locality)
2175 struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2176 ldlm_policy_data_t *policy = &info->mti_policy;
2177 struct ldlm_res_id *res_id = &info->mti_res_id;
2181 LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2182 LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2183 LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2184 LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2186 if (mdt_object_exists(o) < 0) {
2187 if (locality == MDT_CROSS_LOCK) {
2188 /* cross-ref object fix */
2189 ibits &= ~MDS_INODELOCK_UPDATE;
2190 ibits |= MDS_INODELOCK_LOOKUP;
2192 LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2193 LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2195 /* No PDO lock on remote object */
2196 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2199 if (lh->mlh_type == MDT_PDO_LOCK) {
2200 /* check for exists after object is locked */
2201 if (mdt_object_exists(o) == 0) {
2202 /* Non-existent object shouldn't have PDO lock */
2205 /* Non-dir object shouldn't have PDO lock */
2206 LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)));
2210 memset(policy, 0, sizeof(*policy));
2211 fid_build_reg_res_name(mdt_object_fid(o), res_id);
2214 * Take PDO lock on whole directory and build correct @res_id for lock
2215 * on part of directory.
2217 if (lh->mlh_pdo_hash != 0) {
2218 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2219 mdt_lock_pdo_mode(info, o, lh);
2220 if (lh->mlh_pdo_mode != LCK_NL) {
2222 * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2223 * is never going to be sent to client and we do not
2224 * want it slowed down due to possible cancels.
2226 policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2227 rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2228 policy, res_id, LDLM_FL_ATOMIC_CB,
2229 &info->mti_exp->exp_handle.h_cookie);
2235 * Finish res_id initializing by name hash marking part of
2236 * directory which is taking modification.
2238 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2241 policy->l_inodebits.bits = ibits;
2244 * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2245 * going to be sent to client. If it is - mdt_intent_policy() path will
2246 * fix it up and turn FL_LOCAL flag off.
2248 rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2249 res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2250 &info->mti_exp->exp_handle.h_cookie);
2256 mdt_object_unlock(info, o, lh, 1);
2263 * Save a lock within request object.
2265 * Keep the lock referenced until whether client ACK or transaction
2266 * commit happens or release the lock immediately depending on input
2267 * parameters. If COS is ON, a write lock is converted to COS lock
2270 * \param info thead info object
2271 * \param h lock handle
2272 * \param mode lock mode
2273 * \param decref force immediate lock releasing
2276 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2277 ldlm_mode_t mode, int decref)
2281 if (lustre_handle_is_used(h)) {
2282 if (decref || !info->mti_has_trans ||
2283 !(mode & (LCK_PW | LCK_EX))){
2284 mdt_fid_unlock(h, mode);
2286 struct mdt_device *mdt = info->mti_mdt;
2287 struct ldlm_lock *lock = ldlm_handle2lock(h);
2288 struct ptlrpc_request *req = mdt_info_req(info);
2291 LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2293 CDEBUG(D_HA, "request = %p reply state = %p"
2294 " transno = "LPD64"\n",
2295 req, req->rq_reply_state, req->rq_transno);
2296 if (mdt_cos_is_enabled(mdt)) {
2298 ldlm_lock_downgrade(lock, LCK_COS);
2301 ptlrpc_save_lock(req, h, mode, no_ack);
2302 if (mdt_is_lock_sync(lock)) {
2303 CDEBUG(D_HA, "found sync-lock,"
2304 " async commit started\n");
2305 mdt_device_commit_async(info->mti_env,
2308 LDLM_LOCK_PUT(lock);
2317 * Unlock mdt object.
2319 * Immeditely release the regular lock and the PDO lock or save the
2320 * lock in reqeuest and keep them referenced until client ACK or
2321 * transaction commit.
2323 * \param info thread info object
2324 * \param o mdt object
2325 * \param lh mdt lock handle referencing regular and PDO locks
2326 * \param decref force immediate lock releasing
2328 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2329 struct mdt_lock_handle *lh, int decref)
2333 mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2334 mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2339 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2340 const struct lu_fid *f,
2341 struct mdt_lock_handle *lh,
2344 struct mdt_object *o;
2346 o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2350 rc = mdt_object_lock(info, o, lh, ibits,
2353 mdt_object_put(info->mti_env, o);
2360 void mdt_object_unlock_put(struct mdt_thread_info * info,
2361 struct mdt_object * o,
2362 struct mdt_lock_handle *lh,
2365 mdt_object_unlock(info, o, lh, decref);
2366 mdt_object_put(info->mti_env, o);
2369 static struct mdt_handler *mdt_handler_find(__u32 opc,
2370 struct mdt_opc_slice *supported)
2372 struct mdt_opc_slice *s;
2373 struct mdt_handler *h;
2376 for (s = supported; s->mos_hs != NULL; s++) {
2377 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2378 h = s->mos_hs + (opc - s->mos_opc_start);
2379 if (likely(h->mh_opc != 0))
2380 LASSERTF(h->mh_opc == opc,
2381 "opcode mismatch %d != %d\n",
2384 h = NULL; /* unsupported opc */
2391 static int mdt_lock_resname_compat(struct mdt_device *m,
2392 struct ldlm_request *req)
2394 /* XXX something... later. */
2398 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2400 /* XXX something... later. */
2405 * Generic code handling requests that have struct mdt_body passed in:
2407 * - extract mdt_body from request and save it in @info, if present;
2409 * - create lu_object, corresponding to the fid in mdt_body, and save it in
2412 * - if HABEO_CORPUS flag is set for this request type check whether object
2413 * actually exists on storage (lu_object_exists()).
2416 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2418 const struct mdt_body *body;
2419 struct mdt_object *obj;
2420 const struct lu_env *env;
2421 struct req_capsule *pill;
2425 env = info->mti_env;
2426 pill = info->mti_pill;
2428 body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2432 if (!(body->valid & OBD_MD_FLID))
2435 if (!fid_is_sane(&body->fid1)) {
2436 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2441 * Do not get size or any capa fields before we check that request
2442 * contains capa actually. There are some requests which do not, for
2443 * instance MDS_IS_SUBDIR.
2445 if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2446 req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2447 mdt_set_capainfo(info, 0, &body->fid1,
2448 req_capsule_client_get(pill, &RMF_CAPA1));
2450 obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2452 if ((flags & HABEO_CORPUS) &&
2453 !mdt_object_exists(obj)) {
2454 mdt_object_put(env, obj);
2455 /* for capability renew ENOENT will be handled in
2457 if (body->valid & OBD_MD_FLOSSCAPA)
2462 info->mti_object = obj;
2471 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2473 struct req_capsule *pill = info->mti_pill;
2477 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2478 rc = mdt_body_unpack(info, flags);
2482 if (rc == 0 && (flags & HABEO_REFERO)) {
2483 struct mdt_device *mdt = info->mti_mdt;
2487 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2488 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2489 mdt->mdt_max_mdsize);
2490 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2491 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
2492 mdt->mdt_max_cookiesize);
2494 rc = req_capsule_server_pack(pill);
2499 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2501 struct md_device *next = m->mdt_child;
2503 return next->md_ops->mdo_init_capa_ctxt(env, next,
2504 m->mdt_opts.mo_mds_capa,
2505 m->mdt_capa_timeout,
2511 * Invoke handler for this request opc. Also do necessary preprocessing
2512 * (according to handler ->mh_flags), and post-processing (setting of
2513 * ->last_{xid,committed}).
2515 static int mdt_req_handle(struct mdt_thread_info *info,
2516 struct mdt_handler *h, struct ptlrpc_request *req)
2518 int rc, serious = 0;
2523 LASSERT(h->mh_act != NULL);
2524 LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2525 LASSERT(current->journal_info == NULL);
2528 * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2529 * to put same checks into handlers like mdt_close(), mdt_reint(),
2530 * etc., without talking to mdt authors first. Checking same thing
2531 * there again is useless and returning 0 error without packing reply
2532 * is buggy! Handlers either pack reply or return error.
2534 * We return 0 here and do not send any reply in order to emulate
2535 * network failure. Do not send any reply in case any of NET related
2536 * fail_id has occured.
2538 if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2542 flags = h->mh_flags;
2543 LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2545 if (h->mh_fmt != NULL) {
2546 req_capsule_set(info->mti_pill, h->mh_fmt);
2547 rc = mdt_unpack_req_pack_rep(info, flags);
2550 if (rc == 0 && flags & MUTABOR &&
2551 req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2552 /* should it be rq_status? */
2555 if (rc == 0 && flags & HABEO_CLAVIS) {
2556 struct ldlm_request *dlm_req;
2558 LASSERT(h->mh_fmt != NULL);
2560 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2561 if (dlm_req != NULL) {
2562 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2563 rc = mdt_lock_resname_compat(info->mti_mdt,
2565 info->mti_dlm_req = dlm_req;
2571 /* capability setting changed via /proc, needs reinitialize ctxt */
2572 if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2573 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2574 info->mti_mdt->mdt_capa_conf = 0;
2577 if (likely(rc == 0)) {
2579 * Process request, there can be two types of rc:
2580 * 1) errors with msg unpack/pack, other failures outside the
2581 * operation itself. This is counted as serious errors;
2582 * 2) errors during fs operation, should be placed in rq_status
2585 rc = h->mh_act(info);
2587 !req->rq_no_reply && req->rq_reply_state == NULL) {
2588 DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2589 "pack reply and returned 0 error\n",
2593 serious = is_serious(rc);
2594 rc = clear_serious(rc);
2598 req->rq_status = rc;
2601 * ELDLM_* codes which > 0 should be in rq_status only as well as
2602 * all non-serious errors.
2604 if (rc > 0 || !serious)
2607 LASSERT(current->journal_info == NULL);
2609 if (rc == 0 && (flags & HABEO_CLAVIS) &&
2610 info->mti_mdt->mdt_opts.mo_compat_resname) {
2611 struct ldlm_reply *dlmrep;
2613 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2615 rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
2618 /* If we're DISCONNECTing, the mdt_export_data is already freed */
2619 if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
2620 target_committed_to_req(req);
2622 if (unlikely((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) &&
2623 lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
2624 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
2628 target_send_reply(req, rc, info->mti_fail_id);
2632 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
2634 lh->mlh_type = MDT_NUL_LOCK;
2635 lh->mlh_reg_lh.cookie = 0ull;
2636 lh->mlh_reg_mode = LCK_MINMODE;
2637 lh->mlh_pdo_lh.cookie = 0ull;
2638 lh->mlh_pdo_mode = LCK_MINMODE;
2641 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
2643 LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2644 LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2648 * Initialize fields of struct mdt_thread_info. Other fields are left in
2649 * uninitialized state, because it's too expensive to zero out whole
2650 * mdt_thread_info (> 1K) on each request arrival.
2652 static void mdt_thread_info_init(struct ptlrpc_request *req,
2653 struct mdt_thread_info *info)
2656 struct md_capainfo *ci;
2658 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2659 info->mti_pill = &req->rq_pill;
2662 for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2663 mdt_lock_handle_init(&info->mti_lh[i]);
2665 /* mdt device: it can be NULL while CONNECT */
2666 if (req->rq_export) {
2667 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
2668 info->mti_exp = req->rq_export;
2670 info->mti_mdt = NULL;
2671 info->mti_env = req->rq_svc_thread->t_env;
2672 ci = md_capainfo(info->mti_env);
2673 memset(ci, 0, sizeof *ci);
2674 if (req->rq_export) {
2675 if (exp_connect_rmtclient(req->rq_export))
2676 ci->mc_auth = LC_ID_CONVERT;
2677 else if (req->rq_export->exp_connect_flags &
2678 OBD_CONNECT_MDS_CAPA)
2679 ci->mc_auth = LC_ID_PLAIN;
2681 ci->mc_auth = LC_ID_NONE;
2684 info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
2685 info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
2686 info->mti_mos[0] = NULL;
2687 info->mti_mos[1] = NULL;
2688 info->mti_mos[2] = NULL;
2689 info->mti_mos[3] = NULL;
2691 memset(&info->mti_attr, 0, sizeof(info->mti_attr));
2692 info->mti_body = NULL;
2693 info->mti_object = NULL;
2694 info->mti_dlm_req = NULL;
2695 info->mti_has_trans = 0;
2696 info->mti_no_need_trans = 0;
2697 info->mti_cross_ref = 0;
2698 info->mti_opdata = 0;
2700 /* To not check for split by default. */
2701 info->mti_spec.sp_ck_split = 0;
2702 info->mti_spec.no_create = 0;
2705 static void mdt_thread_info_fini(struct mdt_thread_info *info)
2709 req_capsule_fini(info->mti_pill);
2710 if (info->mti_object != NULL) {
2712 * freeing an object may lead to OSD level transaction, do not
2713 * let it mess with MDT. bz19385.
2715 info->mti_no_need_trans = 1;
2716 mdt_object_put(info->mti_env, info->mti_object);
2717 info->mti_object = NULL;
2719 for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2720 mdt_lock_handle_fini(&info->mti_lh[i]);
2721 info->mti_env = NULL;
2724 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
2725 struct obd_device *obd, int *process)
2727 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2728 case MDS_CONNECT: /* This will never get here, but for completeness. */
2729 case OST_CONNECT: /* This will never get here, but for completeness. */
2730 case MDS_DISCONNECT:
2731 case OST_DISCONNECT:
2736 case MDS_DONE_WRITING:
2737 case MDS_SYNC: /* used in unmounting */
2743 *process = target_queue_recovery_request(req, obd);
2747 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
2754 * Handle recovery. Return:
2755 * +1: continue request processing;
2756 * -ve: abort immediately with the given error code;
2757 * 0: send reply with error code in req->rq_status;
2759 static int mdt_recovery(struct mdt_thread_info *info)
2761 struct ptlrpc_request *req = mdt_info_req(info);
2762 struct obd_device *obd;
2766 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2769 case SEC_CTX_INIT_CONT:
2775 rc = mdt_handle_idmap(info);
2784 if (unlikely(!class_connected_export(req->rq_export))) {
2785 CERROR("operation %d on unconnected MDS from %s\n",
2786 lustre_msg_get_opc(req->rq_reqmsg),
2787 libcfs_id2str(req->rq_peer));
2788 /* FIXME: For CMD cleanup, when mds_B stop, the req from
2789 * mds_A will get -ENOTCONN(especially for ping req),
2790 * which will cause that mds_A deactive timeout, then when
2791 * mds_A cleanup, the cleanup process will be suspended since
2792 * deactive timeout is not zero.
2794 req->rq_status = -ENOTCONN;
2795 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
2799 /* sanity check: if the xid matches, the request must be marked as a
2800 * resent or replayed */
2801 if (req_xid_is_last(req)) {
2802 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
2803 (MSG_RESENT | MSG_REPLAY))) {
2804 DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
2805 "expected REPLAY or RESENT flag (%x)", req->rq_xid,
2806 lustre_msg_get_flags(req->rq_reqmsg));
2808 req->rq_status = -ENOTCONN;
2813 /* else: note the opposite is not always true; a RESENT req after a
2814 * failover will usually not match the last_xid, since it was likely
2815 * never committed. A REPLAYed request will almost never match the
2816 * last xid, however it could for a committed, but still retained,
2819 obd = req->rq_export->exp_obd;
2821 /* Check for aborted recovery... */
2822 if (unlikely(obd->obd_recovering)) {
2825 DEBUG_REQ(D_INFO, req, "Got new replay");
2826 rc = mdt_filter_recovery_request(req, obd, &should_process);
2827 if (rc != 0 || !should_process)
2829 else if (should_process < 0) {
2830 req->rq_status = should_process;
2831 rc = ptlrpc_error(req);
2838 static int mdt_msg_check_version(struct lustre_msg *msg)
2842 switch (lustre_msg_get_opc(msg)) {
2844 case MDS_DISCONNECT:
2848 case SEC_CTX_INIT_CONT:
2850 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2852 CERROR("bad opc %u version %08x, expecting %08x\n",
2853 lustre_msg_get_opc(msg),
2854 lustre_msg_get_version(msg),
2855 LUSTRE_OBD_VERSION);
2859 case MDS_GETATTR_NAME:
2866 case MDS_DONE_WRITING:
2872 case MDS_QUOTACHECK:
2878 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
2880 CERROR("bad opc %u version %08x, expecting %08x\n",
2881 lustre_msg_get_opc(msg),
2882 lustre_msg_get_version(msg),
2883 LUSTRE_MDS_VERSION);
2887 case LDLM_BL_CALLBACK:
2888 case LDLM_CP_CALLBACK:
2889 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
2891 CERROR("bad opc %u version %08x, expecting %08x\n",
2892 lustre_msg_get_opc(msg),
2893 lustre_msg_get_version(msg),
2894 LUSTRE_DLM_VERSION);
2896 case OBD_LOG_CANCEL:
2897 case LLOG_ORIGIN_HANDLE_CREATE:
2898 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2899 case LLOG_ORIGIN_HANDLE_READ_HEADER:
2900 case LLOG_ORIGIN_HANDLE_CLOSE:
2901 case LLOG_ORIGIN_HANDLE_DESTROY:
2902 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2904 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
2906 CERROR("bad opc %u version %08x, expecting %08x\n",
2907 lustre_msg_get_opc(msg),
2908 lustre_msg_get_version(msg),
2909 LUSTRE_LOG_VERSION);
2912 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
2918 static int mdt_handle0(struct ptlrpc_request *req,
2919 struct mdt_thread_info *info,
2920 struct mdt_opc_slice *supported)
2922 struct mdt_handler *h;
2923 struct lustre_msg *msg;
2928 if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
2931 LASSERT(current->journal_info == NULL);
2933 msg = req->rq_reqmsg;
2934 rc = mdt_msg_check_version(msg);
2935 if (likely(rc == 0)) {
2936 rc = mdt_recovery(info);
2937 if (likely(rc == +1)) {
2938 h = mdt_handler_find(lustre_msg_get_opc(msg),
2940 if (likely(h != NULL)) {
2941 rc = mdt_req_handle(info, h, req);
2943 CERROR("The unsupported opc: 0x%x\n",
2944 lustre_msg_get_opc(msg) );
2945 req->rq_status = -ENOTSUPP;
2946 rc = ptlrpc_error(req);
2951 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
2956 * MDT handler function called by ptlrpc service thread when request comes.
2958 * XXX common "target" functionality should be factored into separate module
2959 * shared by mdt, ost and stand-alone services like fld.
2961 static int mdt_handle_common(struct ptlrpc_request *req,
2962 struct mdt_opc_slice *supported)
2965 struct mdt_thread_info *info;
2969 env = req->rq_svc_thread->t_env;
2970 LASSERT(env != NULL);
2971 LASSERT(env->le_ses != NULL);
2972 LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
2973 info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
2974 LASSERT(info != NULL);
2976 mdt_thread_info_init(req, info);
2978 rc = mdt_handle0(req, info, supported);
2980 mdt_thread_info_fini(info);
2985 * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
2988 int mdt_recovery_handle(struct ptlrpc_request *req)
2993 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2995 rc = mdt_handle_common(req, mdt_fld_handlers);
2998 rc = mdt_handle_common(req, mdt_seq_handlers);
3001 rc = mdt_handle_common(req, mdt_regular_handlers);
3008 static int mdt_regular_handle(struct ptlrpc_request *req)
3010 return mdt_handle_common(req, mdt_regular_handlers);
3013 static int mdt_readpage_handle(struct ptlrpc_request *req)
3015 return mdt_handle_common(req, mdt_readpage_handlers);
3018 static int mdt_xmds_handle(struct ptlrpc_request *req)
3020 return mdt_handle_common(req, mdt_xmds_handlers);
3023 static int mdt_mdsc_handle(struct ptlrpc_request *req)
3025 return mdt_handle_common(req, mdt_seq_handlers);
3028 static int mdt_mdss_handle(struct ptlrpc_request *req)
3030 return mdt_handle_common(req, mdt_seq_handlers);
3033 static int mdt_dtss_handle(struct ptlrpc_request *req)
3035 return mdt_handle_common(req, mdt_seq_handlers);
3038 static int mdt_fld_handle(struct ptlrpc_request *req)
3040 return mdt_handle_common(req, mdt_fld_handlers);
3056 static int mdt_intent_getattr(enum mdt_it_code opcode,
3057 struct mdt_thread_info *info,
3058 struct ldlm_lock **,
3060 static int mdt_intent_reint(enum mdt_it_code opcode,
3061 struct mdt_thread_info *info,
3062 struct ldlm_lock **,
3065 static struct mdt_it_flavor {
3066 const struct req_format *it_fmt;
3068 int (*it_act)(enum mdt_it_code ,
3069 struct mdt_thread_info *,
3070 struct ldlm_lock **,
3073 } mdt_it_flavor[] = {
3075 .it_fmt = &RQF_LDLM_INTENT,
3076 /*.it_flags = HABEO_REFERO,*/
3078 .it_act = mdt_intent_reint,
3079 .it_reint = REINT_OPEN
3082 .it_fmt = &RQF_LDLM_INTENT,
3083 .it_flags = MUTABOR,
3084 .it_act = mdt_intent_reint,
3085 .it_reint = REINT_OPEN
3088 .it_fmt = &RQF_LDLM_INTENT,
3089 .it_flags = MUTABOR,
3090 .it_act = mdt_intent_reint,
3091 .it_reint = REINT_CREATE
3093 [MDT_IT_GETATTR] = {
3094 .it_fmt = &RQF_LDLM_INTENT_GETATTR,
3095 .it_flags = HABEO_REFERO,
3096 .it_act = mdt_intent_getattr
3098 [MDT_IT_READDIR] = {
3104 .it_fmt = &RQF_LDLM_INTENT_GETATTR,
3105 .it_flags = HABEO_REFERO,
3106 .it_act = mdt_intent_getattr
3109 .it_fmt = &RQF_LDLM_INTENT_UNLINK,
3110 .it_flags = MUTABOR,
3112 .it_reint = REINT_UNLINK
3116 .it_flags = MUTABOR,
3119 [MDT_IT_GETXATTR] = {
3126 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3127 struct ldlm_lock **lockp,
3128 struct ldlm_lock *new_lock,
3129 struct mdt_lock_handle *lh,
3132 struct ptlrpc_request *req = mdt_info_req(info);
3133 struct ldlm_lock *lock = *lockp;
3136 * Get new lock only for cases when possible resent did not find any
3139 if (new_lock == NULL)
3140 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3142 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3143 lh->mlh_reg_lh.cookie = 0;
3147 LASSERTF(new_lock != NULL,
3148 "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3151 * If we've already given this lock to a client once, then we should
3152 * have no readers or writers. Otherwise, we should have one reader
3153 * _or_ writer ref (which will be zeroed below) before returning the
3156 if (new_lock->l_export == req->rq_export) {
3157 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3159 LASSERT(new_lock->l_export == NULL);
3160 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3165 if (new_lock->l_export == req->rq_export) {
3167 * Already gave this to the client, which means that we
3168 * reconstructed a reply.
3170 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3172 lh->mlh_reg_lh.cookie = 0;
3173 RETURN(ELDLM_LOCK_REPLACED);
3177 * Fixup the lock to be given to the client.
3179 lock_res_and_lock(new_lock);
3180 /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3181 * possible blocking AST. */
3182 while (new_lock->l_readers > 0) {
3183 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3184 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3185 new_lock->l_readers--;
3187 while (new_lock->l_writers > 0) {
3188 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3189 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3190 new_lock->l_writers--;
3193 new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
3194 new_lock->l_blocking_ast = lock->l_blocking_ast;
3195 new_lock->l_completion_ast = lock->l_completion_ast;
3196 new_lock->l_remote_handle = lock->l_remote_handle;
3197 new_lock->l_flags &= ~LDLM_FL_LOCAL;
3199 unlock_res_and_lock(new_lock);
3201 cfs_hash_add(new_lock->l_export->exp_lock_hash,
3202 &new_lock->l_remote_handle,
3203 &new_lock->l_exp_hash);
3205 LDLM_LOCK_RELEASE(new_lock);
3206 lh->mlh_reg_lh.cookie = 0;
3208 RETURN(ELDLM_LOCK_REPLACED);
3211 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3212 struct ldlm_lock *new_lock,
3213 struct ldlm_lock **old_lock,
3214 struct mdt_lock_handle *lh)
3216 struct ptlrpc_request *req = mdt_info_req(info);
3217 struct obd_export *exp = req->rq_export;
3218 struct lustre_handle remote_hdl;
3219 struct ldlm_request *dlmreq;
3220 struct ldlm_lock *lock;
3222 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3225 dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3226 remote_hdl = dlmreq->lock_handle[0];
3228 lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3230 if (lock != new_lock) {
3231 lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3232 lh->mlh_reg_mode = lock->l_granted_mode;
3234 LDLM_DEBUG(lock, "Restoring lock cookie");
3235 DEBUG_REQ(D_DLMTRACE, req,
3236 "restoring lock cookie "LPX64,
3237 lh->mlh_reg_lh.cookie);
3239 *old_lock = LDLM_LOCK_GET(lock);
3240 cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3244 cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3248 * If the xid matches, then we know this is a resent request, and allow
3249 * it. (It's probably an OPEN, for which we don't send a lock.
3251 if (req_xid_is_last(req))