X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fmdt%2Fmdt_handler.c;h=91943141d566fa541436682a604cdfa1138244a9;hp=b6f96492409d4d0844ae663ae841ea3c881aa950;hb=355fc1274b484a65e96be1e0289f5aa453c6474d;hpb=f89a61cada93accbd02beee89f0610756ca595e9 diff --git a/lustre/mdt/mdt_handler.c b/lustre/mdt/mdt_handler.c index b6f9649..aec469b 100644 --- a/lustre/mdt/mdt_handler.c +++ b/lustre/mdt/mdt_handler.c @@ -1,35 +1,49 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * lustre/mdt/mdt_handler.c - * Lustre Metadata Target (mdt) request handler + * GPL HEADER START * - * Copyright (c) 2006 Cluster File Systems, Inc. - * Author: Peter Braam - * Author: Andreas Dilger - * Author: Phil Schwan - * Author: Mike Shaver - * Author: Nikita Danilov - * Author: Huang Hua - * Author: Yury Umanets + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This file is part of the Lustre file system, http://www.lustre.org - * Lustre is a trademark of Cluster File Systems, Inc. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * You may have signed or agreed to another license before downloading - * this software. If so, you are bound by the terms and conditions - * of that agreement, and the following does not apply to you. See the - * LICENSE file included with this distribution for more information. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * If you did not agree to a different license, then this copy of Lustre - * is open source software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * - * In either case, Lustre is distributed in the hope that it will be - * useful, but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * license text for more details. + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lustre/mdt/mdt_handler.c + * + * Lustre Metadata Target (mdt) request handler + * + * Author: Peter Braam + * Author: Andreas Dilger + * Author: Phil Schwan + * Author: Mike Shaver + * Author: Nikita Danilov + * Author: Huang Hua + * Author: Yury Umanets */ #ifndef EXPORT_SYMTAB @@ -40,7 +54,6 @@ #include /* * struct OBD_{ALLOC,FREE}*() - * MDT_FAIL_CHECK */ #include /* struct ptlrpc_request */ @@ -54,8 +67,12 @@ #include #include #include "mdt_internal.h" -#include +#ifdef HAVE_QUOTA_SUPPORT +# include +#endif +#include #include +#include mdl_mode_t mdt_mdl_lock_modes[] = { [LCK_MINMODE] = MDL_MINMODE, @@ -82,7 +99,9 @@ ldlm_mode_t mdt_dlm_lock_modes[] = { /* * Initialized in mdt_mod_init(). */ -unsigned long mdt_num_threads; +static unsigned long mdt_num_threads; +static unsigned long mdt_min_threads; +static unsigned long mdt_max_threads; /* ptlrpc request handler for MDT. All handlers are * grouped into several slices - struct mdt_opc_slice, @@ -148,8 +167,10 @@ static struct mdt_opc_slice mdt_fld_handlers[]; static struct mdt_device *mdt_dev(struct lu_device *d); static int mdt_regular_handle(struct ptlrpc_request *req); static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags); +static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt, + struct getinfo_fid2path *fp); -static struct lu_object_operations mdt_obj_ops; +static const struct lu_object_operations mdt_obj_ops; int mdt_get_disposition(struct ldlm_reply *rep, int flag) { @@ -286,17 +307,18 @@ static int mdt_getstatus(struct mdt_thread_info *info) if (rc) RETURN(err_serious(rc)); - if (MDT_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) + if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) RETURN(err_serious(-ENOMEM)); - repbody = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY); + repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY); rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1); if (rc != 0) RETURN(rc); repbody->valid |= OBD_MD_FLID; - if (mdt->mdt_opts.mo_mds_capa) { + if (mdt->mdt_opts.mo_mds_capa && + info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) { struct mdt_object *root; struct lustre_capa *capa; @@ -304,10 +326,9 @@ static int mdt_getstatus(struct mdt_thread_info *info) if (IS_ERR(root)) RETURN(PTR_ERR(root)); - capa = req_capsule_server_get(&info->mti_pill, &RMF_CAPA1); + capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1); LASSERT(capa); capa->lc_opc = CAPA_OPC_MDS_DEFAULT; - rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa, 0); mdt_object_put(info->mti_env, root); @@ -320,24 +341,28 @@ static int mdt_getstatus(struct mdt_thread_info *info) static int mdt_statfs(struct mdt_thread_info *info) { - struct md_device *next = info->mti_mdt->mdt_child; - struct obd_statfs *osfs; - int rc; + struct md_device *next = info->mti_mdt->mdt_child; + struct ptlrpc_service *svc; + struct obd_statfs *osfs; + int rc; ENTRY; + svc = info->mti_pill->rc_req->rq_rqbd->rqbd_service; + /* This will trigger a watchdog timeout */ OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP, - (MDT_SERVICE_WATCHDOG_TIMEOUT / 1000) + 1); + (MDT_SERVICE_WATCHDOG_FACTOR * + at_get(&svc->srv_at_estimate)) + 1); rc = mdt_check_ucred(info); if (rc) RETURN(err_serious(rc)); - if (MDT_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) { + if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) { rc = err_serious(-ENOMEM); } else { - osfs = req_capsule_server_get(&info->mti_pill,&RMF_OBD_STATFS); + osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS); rc = next->md_ops->mdo_statfs(info->mti_env, next, &info->mti_u.ksfs); statfs_pack(osfs, &info->mti_u.ksfs); @@ -345,33 +370,39 @@ static int mdt_statfs(struct mdt_thread_info *info) RETURN(rc); } -void mdt_pack_size2body(struct mdt_thread_info *info, struct mdt_object *o) +/** + * Pack SOM attributes into the reply. + * Call under a DLM UPDATE lock. + */ +static void mdt_pack_size2body(struct mdt_thread_info *info, + struct mdt_object *mo) { struct mdt_body *b; - struct lu_attr *attr = &info->mti_attr.ma_attr; - - b = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY); + struct md_attr *ma = &info->mti_attr; - /* Check if Size-on-MDS is enabled. */ - if ((mdt_conn_flags(info) & OBD_CONNECT_SOM) && - S_ISREG(attr->la_mode) && mdt_sizeonmds_enabled(o)) { - b->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS); - b->size = attr->la_size; - b->blocks = attr->la_blocks; - } + LASSERT(ma->ma_attr.la_valid & LA_MODE); + b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY); + + /* Check if Size-on-MDS is supported, if this is a regular file, + * if SOM is enabled on the object and if SOM cache exists and valid. + * Otherwise do not pack Size-on-MDS attributes to the reply. */ + if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) || + !S_ISREG(ma->ma_attr.la_mode) || + !mdt_object_is_som_enabled(mo) || + !(ma->ma_valid & MA_SOM)) + return; + + b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; + b->size = ma->ma_som->msd_size; + b->blocks = ma->ma_som->msd_blocks; } void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b, const struct lu_attr *attr, const struct lu_fid *fid) { - /*XXX should pack the reply body according to lu_valid*/ - b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID | - OBD_MD_FLGID | OBD_MD_FLTYPE | - OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLFLAGS | - OBD_MD_FLATIME | OBD_MD_FLMTIME ; + struct md_attr *ma = &info->mti_attr; - if (!S_ISREG(attr->la_mode)) - b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV; + LASSERT(ma->ma_valid & MA_INODE); b->atime = attr->la_atime; b->mtime = attr->la_mtime; @@ -385,15 +416,42 @@ void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b, b->nlink = attr->la_nlink; b->rdev = attr->la_rdev; + /*XXX should pack the reply body according to lu_valid*/ + b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID | + OBD_MD_FLGID | OBD_MD_FLTYPE | + OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLFLAGS | + OBD_MD_FLATIME | OBD_MD_FLMTIME ; + + if (!S_ISREG(attr->la_mode)) { + b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV; + } else if (ma->ma_need & MA_LOV && ma->ma_lmm_size == 0) { + /* means no objects are allocated on osts. */ + LASSERT(!(ma->ma_valid & MA_LOV)); + /* just ignore blocks occupied by extend attributes on MDS */ + b->blocks = 0; + /* if no object is allocated on osts, the size on mds is valid. b=22272 */ + b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; + } + if (fid) { b->fid1 = *fid; b->valid |= OBD_MD_FLID; - CDEBUG(D_INODE, ""DFID": nlink=%d, mode=%o, size="LPU64"\n", + + /* FIXME: these should be fixed when new igif ready.*/ + b->ino = fid_oid(fid); /* 1.6 compatibility */ + b->generation = fid_ver(fid); /* 1.6 compatibility */ + b->valid |= OBD_MD_FLGENER; /* 1.6 compatibility */ + + CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n", PFID(fid), b->nlink, b->mode, b->size); } if (info) mdt_body_reverse_idmap(info, b); + + if (b->valid & OBD_MD_FLSIZE) + CDEBUG(D_VFSTRACE, DFID": returning size %llu\n", + PFID(fid), b->size); } static inline int mdt_body_has_lov(const struct lu_attr *la, @@ -404,22 +462,21 @@ static inline int mdt_body_has_lov(const struct lu_attr *la, } static int mdt_getattr_internal(struct mdt_thread_info *info, - struct mdt_object *o) + struct mdt_object *o, int ma_need) { struct md_object *next = mdt_object_child(o); const struct mdt_body *reqbody = info->mti_body; struct ptlrpc_request *req = mdt_info_req(info); - struct mdt_export_data *med = &req->rq_export->exp_mdt_data; struct md_attr *ma = &info->mti_attr; struct lu_attr *la = &ma->ma_attr; - struct req_capsule *pill = &info->mti_pill; + struct req_capsule *pill = info->mti_pill; const struct lu_env *env = info->mti_env; struct mdt_body *repbody; struct lu_buf *buffer = &info->mti_buf; int rc; ENTRY; - if (unlikely(MDT_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))) + if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) RETURN(err_serious(-ENOMEM)); repbody = req_capsule_server_get(pill, &RMF_MDT_BODY); @@ -450,6 +507,16 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, ma->ma_need = MA_LOV | MA_INODE; } + if (S_ISDIR(lu_object_attr(&next->mo_lu)) && + reqbody->valid & OBD_MD_FLDIREA && + lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) { + /* get default stripe info for this dir. */ + ma->ma_need |= MA_LOV_DEF; + } + ma->ma_need |= ma_need; + if (ma->ma_need & MA_SOM) + ma->ma_som = &info->mti_u.som.data; + rc = mo_attr_get(env, next, ma); if (unlikely(rc)) { CERROR("getattr error for "DFID": %d\n", @@ -477,9 +544,6 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, repbody->eadatasize = ma->ma_lmv_size; repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA); } - if (!(ma->ma_valid & MA_LOV) && !(ma->ma_valid & MA_LMV)) { - repbody->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; - } } else if (S_ISLNK(la->la_mode) && reqbody->valid & OBD_MD_LINKNAME) { buffer->lb_buf = ma->ma_lmm; @@ -489,6 +553,8 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, CERROR("readlink failed: %d\n", rc); rc = -EFAULT; } else { + if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO)) + rc -= 2; repbody->valid |= OBD_MD_LINKNAME; repbody->eadatasize = rc; /* NULL terminate */ @@ -508,7 +574,8 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, repbody->max_cookiesize); } - if (med->med_rmtclient && (reqbody->valid & OBD_MD_FLRMTPERM)) { + if (exp_connect_rmtclient(info->mti_exp) && + reqbody->valid & OBD_MD_FLRMTPERM) { void *buf = req_capsule_server_get(pill, &RMF_ACL); /* mdt_getattr_lock only */ @@ -550,11 +617,12 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, } #endif - if ((reqbody->valid & OBD_MD_FLMDSCAPA) && - info->mti_mdt->mdt_opts.mo_mds_capa) { + if (reqbody->valid & OBD_MD_FLMDSCAPA && + info->mti_mdt->mdt_opts.mo_mds_capa && + info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) { struct lustre_capa *capa; - capa = req_capsule_server_get(&info->mti_pill, &RMF_CAPA1); + capa = req_capsule_server_get(pill, &RMF_CAPA1); LASSERT(capa); capa->lc_opc = CAPA_OPC_MDS_DEFAULT; rc = mo_capa_get(env, next, capa, 0); @@ -567,7 +635,6 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, static int mdt_renew_capa(struct mdt_thread_info *info) { - struct mdt_device *mdt = info->mti_mdt; struct mdt_object *obj = info->mti_object; struct mdt_body *body; struct lustre_capa *capa, *c; @@ -578,30 +645,30 @@ static int mdt_renew_capa(struct mdt_thread_info *info) * return directly, client will find body->valid OBD_MD_FLOSSCAPA * flag not set. */ - if (!obj || !mdt->mdt_opts.mo_mds_capa) + if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa || + !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA)) RETURN(0); - body = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY); + body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY); LASSERT(body != NULL); - c = req_capsule_client_get(&info->mti_pill, &RMF_CAPA1); + c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1); LASSERT(c); - capa = req_capsule_server_get(&info->mti_pill, &RMF_CAPA1); + capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2); LASSERT(capa); *capa = *c; rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1); if (rc == 0) body->valid |= OBD_MD_FLOSSCAPA; - RETURN(rc); } static int mdt_getattr(struct mdt_thread_info *info) { struct mdt_object *obj = info->mti_object; - struct req_capsule *pill = &info->mti_pill; + struct req_capsule *pill = info->mti_pill; struct mdt_body *reqbody; struct mdt_body *repbody; mode_t mode; @@ -613,14 +680,11 @@ static int mdt_getattr(struct mdt_thread_info *info) LASSERT(reqbody); if (reqbody->valid & OBD_MD_FLOSSCAPA) { - rc = req_capsule_pack(pill); + rc = req_capsule_server_pack(pill); if (unlikely(rc)) - rc = err_serious(rc); - else { - rc = mdt_renew_capa(info); - mdt_shrink_reply(info); - } - GOTO(out, rc); + RETURN(err_serious(rc)); + rc = mdt_renew_capa(info); + GOTO(out_shrink, rc); } LASSERT(obj != NULL); @@ -635,9 +699,9 @@ static int mdt_getattr(struct mdt_thread_info *info) req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, md_size); - rc = req_capsule_pack(pill); + rc = req_capsule_server_pack(pill); if (unlikely(rc != 0)) - GOTO(out, rc = err_serious(rc)); + RETURN(err_serious(rc)); repbody = req_capsule_server_get(pill, &RMF_MDT_BODY); LASSERT(repbody != NULL); @@ -659,20 +723,19 @@ static int mdt_getattr(struct mdt_thread_info *info) * remote obj, and at that time no capability is available. */ mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA); - rc = mdt_getattr_internal(info, obj); + rc = mdt_getattr_internal(info, obj, 0); if (reqbody->valid & OBD_MD_FLRMTPERM) mdt_exit_ucred(info); EXIT; out_shrink: mdt_shrink_reply(info); -out: return rc; } static int mdt_is_subdir(struct mdt_thread_info *info) { struct mdt_object *o = info->mti_object; - struct req_capsule *pill = &info->mti_pill; + struct req_capsule *pill = info->mti_pill; const struct mdt_body *body = info->mti_body; struct mdt_body *repbody; int rc; @@ -725,11 +788,10 @@ static int mdt_raw_lookup(struct mdt_thread_info *info, } else mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS); - repbody = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY); + repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY); #endif if (rc == 0) { - repbody = req_capsule_server_get(&info->mti_pill, - &RMF_MDT_BODY); + repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY); repbody->fid1 = *child_fid; repbody->valid = OBD_MD_FLID; } @@ -754,12 +816,13 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, struct md_object *next = mdt_object_child(parent); struct lu_fid *child_fid = &info->mti_tmp_fid1; struct lu_name *lname = NULL; - const char *name; + const char *name = NULL; int namelen = 0; struct mdt_lock_handle *lhp; struct ldlm_lock *lock; struct ldlm_res_id *res_id; int is_resent; + int ma_need = 0; int rc; ENTRY; @@ -769,33 +832,36 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT)); LASSERT(parent != NULL); - name = req_capsule_client_get(&info->mti_pill, &RMF_NAME); + name = req_capsule_client_get(info->mti_pill, &RMF_NAME); if (name == NULL) RETURN(err_serious(-EFAULT)); - namelen = req_capsule_get_size(&info->mti_pill, &RMF_NAME, + namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME, RCL_CLIENT) - 1; - LASSERT(namelen >= 0); - - /* XXX: "namelen == 0" is for getattr by fid (OBD_CONNECT_ATTRFID), - * otherwise do not allow empty name, that is the name must contain - * at least one character and the terminating '\0'*/ - if (namelen == 0) { - reqbody =req_capsule_client_get(&info->mti_pill, &RMF_MDT_BODY); - LASSERT(fid_is_sane(&reqbody->fid2)); - name = NULL; - - CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", " - "ldlm_rep = %p\n", - PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2), - ldlm_rep); - } else { - lname = mdt_name(info->mti_env, (char *)name, namelen); - CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, " - "ldlm_rep = %p\n", - PFID(mdt_object_fid(parent)), name, ldlm_rep); - } + if (!info->mti_cross_ref) { + /* + * XXX: Check for "namelen == 0" is for getattr by fid + * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name, + * that is the name must contain at least one character and + * the terminating '\0' + */ + if (namelen == 0) { + reqbody = req_capsule_client_get(info->mti_pill, + &RMF_MDT_BODY); + LASSERT(fid_is_sane(&reqbody->fid2)); + name = NULL; + CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", " + "ldlm_rep = %p\n", + PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2), + ldlm_rep); + } else { + lname = mdt_name(info->mti_env, (char *)name, namelen); + CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, " + "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)), + name, ldlm_rep); + } + } mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD); rc = mdt_object_exists(parent); @@ -804,10 +870,10 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, &parent->mot_obj.mo_lu, "Parent doesn't exist!\n"); RETURN(-ESTALE); - } else + } else if (!info->mti_cross_ref) { LASSERTF(rc > 0, "Parent "DFID" is on remote server\n", PFID(mdt_object_fid(parent))); - + } if (lname) { rc = mdt_raw_lookup(info, parent, lname, ldlm_rep); if (rc != 0) { @@ -851,11 +917,11 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, /* Finally, we can get attr for child. */ mdt_set_capainfo(info, 0, mdt_object_fid(child), BYPASS_CAPA); - rc = mdt_getattr_internal(info, child); + rc = mdt_getattr_internal(info, child, 0); if (unlikely(rc != 0)) mdt_object_unlock(info, child, lhc, 1); } - GOTO(out, rc); + RETURN(rc); } /* step 1: lock parent */ @@ -918,7 +984,11 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, LDLM_LOCK_PUT(lock); rc = 0; } else { + struct md_attr *ma; relock: + ma = &info->mti_attr; + + OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2); mdt_lock_handle_init(lhc); mdt_lock_reg_init(lhc, LCK_PR); @@ -926,7 +996,25 @@ relock: LU_OBJECT_DEBUG(D_WARNING, info->mti_env, &child->mot_obj.mo_lu, "Object doesn't exist!\n"); + GOTO(out_child, rc = -ESTALE); } + + ma->ma_valid = 0; + ma->ma_need = MA_INODE; + rc = mo_attr_get(info->mti_env, next, ma); + if (unlikely(rc != 0)) + GOTO(out_child, rc); + + /* If the file has not been changed for some time, we return + * not only a LOOKUP lock, but also an UPDATE lock and this + * might save us RPC on later STAT. For directories, it also + * let negative dentry starts working for this dir. */ + if (ma->ma_valid & MA_INODE && + ma->ma_attr.la_valid & LA_CTIME && + info->mti_mdt->mdt_namespace->ns_ctime_age_limit + + ma->ma_attr.la_ctime < cfs_time_current_sec()) + child_bits |= MDS_INODELOCK_UPDATE; + rc = mdt_object_lock(info, child, lhc, child_bits, MDT_CROSS_LOCK); @@ -934,46 +1022,39 @@ relock: GOTO(out_child, rc); } + lock = ldlm_handle2lock(&lhc->mlh_reg_lh); + /* Get MA_SOM attributes if update lock is given. */ + if (lock && + lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE && + S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu))) + ma_need = MA_SOM; + /* finally, we can get attr for child. */ mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA); - rc = mdt_getattr_internal(info, child); + rc = mdt_getattr_internal(info, child, ma_need); if (unlikely(rc != 0)) { mdt_object_unlock(info, child, lhc, 1); - } else { - lock = ldlm_handle2lock(&lhc->mlh_reg_lh); - if (lock) { - struct mdt_body *repbody; - struct lu_attr *ma; - - /* Debugging code. */ - res_id = &lock->l_resource->lr_name; - LDLM_DEBUG(lock, "Returning lock to client\n"); - LASSERTF(fid_res_name_eq(mdt_object_fid(child), - &lock->l_resource->lr_name), - "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n", - (unsigned long)res_id->name[0], - (unsigned long)res_id->name[1], - (unsigned long)res_id->name[2], - PFID(mdt_object_fid(child))); - /* - * Pack Size-on-MDS inode attributes to the body if - * update lock is given. - */ - repbody = req_capsule_server_get(&info->mti_pill, - &RMF_MDT_BODY); - ma = &info->mti_attr.ma_attr; - if (lock->l_policy_data.l_inodebits.bits & - MDS_INODELOCK_UPDATE) - mdt_pack_size2body(info, child); - LDLM_LOCK_PUT(lock); - } - } + } else if (lock) { + /* Debugging code. */ + res_id = &lock->l_resource->lr_name; + LDLM_DEBUG(lock, "Returning lock to client"); + LASSERTF(fid_res_name_eq(mdt_object_fid(child), + &lock->l_resource->lr_name), + "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n", + (unsigned long)res_id->name[0], + (unsigned long)res_id->name[1], + (unsigned long)res_id->name[2], + PFID(mdt_object_fid(child))); + mdt_pack_size2body(info, child); + } + if (lock) + LDLM_LOCK_PUT(lock); + EXIT; out_child: mdt_object_put(info->mti_env, child); out_parent: mdt_object_unlock(info, parent, lhp, 1); -out: return rc; } @@ -986,9 +1067,9 @@ static int mdt_getattr_name(struct mdt_thread_info *info) int rc; ENTRY; - reqbody = req_capsule_client_get(&info->mti_pill, &RMF_MDT_BODY); + reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY); LASSERT(reqbody != NULL); - repbody = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY); + repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY); LASSERT(repbody != NULL); info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT); @@ -998,7 +1079,7 @@ static int mdt_getattr_name(struct mdt_thread_info *info) rc = mdt_init_ucred(info, reqbody); if (unlikely(rc)) - GOTO(out, rc); + GOTO(out_shrink, rc); rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL); if (lustre_handle_is_used(&lhc->mlh_reg_lh)) { @@ -1007,59 +1088,82 @@ static int mdt_getattr_name(struct mdt_thread_info *info) } mdt_exit_ucred(info); EXIT; -out: +out_shrink: mdt_shrink_reply(info); return rc; } -static struct lu_device_operations mdt_lu_ops; +static const struct lu_device_operations mdt_lu_ops; static int lu_device_is_mdt(struct lu_device *d) { return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops); } +static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len, + void *karg, void *uarg); + static int mdt_set_info(struct mdt_thread_info *info) { struct ptlrpc_request *req = mdt_info_req(info); char *key; - __u32 *val; - int keylen, rc = 0; + void *val; + int keylen, vallen, rc = 0; ENTRY; - rc = lustre_pack_reply(req, 1, NULL, NULL); + rc = req_capsule_server_pack(info->mti_pill); if (rc) RETURN(rc); - key = req_capsule_client_get(&info->mti_pill, &RMF_SETINFO_KEY); + key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY); if (key == NULL) { DEBUG_REQ(D_HA, req, "no set_info key"); RETURN(-EFAULT); } - keylen = req_capsule_get_size(&info->mti_pill, &RMF_SETINFO_KEY, + keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY, RCL_CLIENT); - val = req_capsule_client_get(&info->mti_pill, &RMF_SETINFO_VAL); + val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL); if (val == NULL) { DEBUG_REQ(D_HA, req, "no set_info val"); RETURN(-EFAULT); } - if (keylen != (sizeof(KEY_READ_ONLY) - 1) || - memcmp(key, KEY_READ_ONLY, keylen) != 0) - RETURN(-EINVAL); + vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL, + RCL_CLIENT); - req->rq_status = 0; - lustre_msg_set_status(req->rq_repmsg, 0); + /* Swab any part of val you need to here */ + if (KEY_IS(KEY_READ_ONLY)) { + req->rq_status = 0; + lustre_msg_set_status(req->rq_repmsg, 0); - spin_lock(&req->rq_export->exp_lock); - if (*val) - req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY; - else - req->rq_export->exp_connect_flags &= ~OBD_CONNECT_RDONLY; - spin_unlock(&req->rq_export->exp_lock); + cfs_spin_lock(&req->rq_export->exp_lock); + if (*(__u32 *)val) + req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY; + else + req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY; + cfs_spin_unlock(&req->rq_export->exp_lock); + + } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) { + struct changelog_setinfo *cs = + (struct changelog_setinfo *)val; + if (vallen != sizeof(*cs)) { + CERROR("Bad changelog_clear setinfo size %d\n", vallen); + RETURN(-EINVAL); + } + if (ptlrpc_req_need_swab(req)) { + __swab64s(&cs->cs_recno); + __swab32s(&cs->cs_id); + } + rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp, + vallen, val, NULL); + lustre_msg_set_status(req->rq_repmsg, rc); + + } else { + RETURN(-EINVAL); + } RETURN(0); } @@ -1073,12 +1177,14 @@ static int mdt_connect(struct mdt_thread_info *info) if (rc == 0) { LASSERT(req->rq_export != NULL); info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev); - rc = mdt_init_idmap(info); + rc = mdt_init_sec_level(info); + if (rc == 0) + rc = mdt_init_idmap(info); if (rc != 0) - /* if mdt_init_idmap failed, revocation for connect */ obd_disconnect(class_export_get(req->rq_export)); - } else + } else { rc = err_serious(rc); + } return rc; } @@ -1097,10 +1203,12 @@ static int mdt_sendpage(struct mdt_thread_info *info, struct lu_rdpg *rdpg) { struct ptlrpc_request *req = mdt_info_req(info); + struct obd_export *exp = req->rq_export; struct ptlrpc_bulk_desc *desc; struct l_wait_info *lwi = &info->mti_u.rdpg.mti_wait_info; int tmpcount; int tmpsize; + int timeout; int i; int rc; ENTRY; @@ -1108,7 +1216,7 @@ static int mdt_sendpage(struct mdt_thread_info *info, desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE, MDS_BULK_PORTAL); if (desc == NULL) - GOTO(out, rc = -ENOMEM); + RETURN(-ENOMEM); for (i = 0, tmpcount = rdpg->rp_count; i < rdpg->rp_npages; i++, tmpcount -= tmpsize) { @@ -1117,39 +1225,55 @@ static int mdt_sendpage(struct mdt_thread_info *info, } LASSERT(desc->bd_nob == rdpg->rp_count); - rc = ptlrpc_start_bulk_transfer(desc); + rc = sptlrpc_svc_wrap_bulk(req, desc); if (rc) GOTO(free_desc, rc); - if (MDT_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) - GOTO(abort_bulk, rc); + rc = ptlrpc_start_bulk_transfer(desc); + if (rc) + GOTO(free_desc, rc); - *lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL); - rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), lwi); - LASSERT (rc == 0 || rc == -ETIMEDOUT); + if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) + GOTO(abort_bulk, rc = 0); + + do { + timeout = (int) req->rq_deadline - cfs_time_current_sec(); + if (timeout < 0) + CERROR("Req deadline already passed %lu (now: %lu)\n", + req->rq_deadline, cfs_time_current_sec()); + *lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(max(timeout, 1)), + cfs_time_seconds(1), NULL, NULL); + rc = l_wait_event(desc->bd_waitq, + !ptlrpc_server_bulk_active(desc) || + exp->exp_failed || + exp->exp_abort_active_req, lwi); + LASSERT (rc == 0 || rc == -ETIMEDOUT); + } while ((rc == -ETIMEDOUT) && + (req->rq_deadline > cfs_time_current_sec())); if (rc == 0) { if (desc->bd_success && desc->bd_nob_transferred == rdpg->rp_count) GOTO(free_desc, rc); - rc = -ETIMEDOUT; /* XXX should this be a different errno? */ + rc = -ETIMEDOUT; + if (exp->exp_abort_active_req || exp->exp_failed) + GOTO(abort_bulk, rc); } DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s", (rc == -ETIMEDOUT) ? "timeout" : "network error", desc->bd_nob_transferred, rdpg->rp_count, - req->rq_export->exp_client_uuid.uuid, - req->rq_export->exp_connection->c_remote_uuid.uuid); + exp->exp_client_uuid.uuid, + exp->exp_connection->c_remote_uuid.uuid); - class_fail_export(req->rq_export); + class_fail_export(exp); EXIT; abort_bulk: ptlrpc_abort_bulk(desc); free_desc: ptlrpc_free_bulk(desc); -out: return rc; } @@ -1187,7 +1311,7 @@ static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page, ma->ma_attr.la_valid = LA_MODE; ma->ma_valid = MA_INODE; - kmap(page); + cfs_kmap(page); dp = page_address(page); offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp); @@ -1200,7 +1324,7 @@ static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page, continue; fid_le_to_cpu(lf, &ent->lde_fid); - if (le32_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT) + if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT) ma->ma_attr.la_mode = S_IFDIR; else ma->ma_attr.la_mode = 0; @@ -1210,8 +1334,8 @@ static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page, memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen)); lname = mdt_name(info->mti_env, name, - le16_to_cpu(ent->lde_namelen) + 1); - ma->ma_attr_flags |= MDS_PERM_BYPASS; + le16_to_cpu(ent->lde_namelen)); + ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE); rc = mdo_name_insert(info->mti_env, md_object_next(&object->mot_obj), lname, lf, ma); @@ -1230,7 +1354,7 @@ static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page, } EXIT; out: - kunmap(page); + cfs_kunmap(page); return rc; } @@ -1254,7 +1378,7 @@ static int mdt_writepage(struct mdt_thread_info *info) ENTRY; - reqbody = req_capsule_client_get(&info->mti_pill, &RMF_MDT_BODY); + reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY); if (reqbody == NULL) RETURN(err_serious(-EFAULT)); @@ -1273,6 +1397,9 @@ static int mdt_writepage(struct mdt_thread_info *info) ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size, (int)reqbody->nlink); + rc = sptlrpc_svc_prep_bulk(req, desc); + if (rc != 0) + GOTO(cleanup_page, rc); /* * Check if client was evicted while we were doing i/o before touching * network. @@ -1286,7 +1413,7 @@ static int mdt_writepage(struct mdt_thread_info *info) else rc = ptlrpc_start_bulk_transfer (desc); if (rc == 0) { - *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ / 4, HZ, + *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ, mdt_bulk_timeout, desc); rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) || desc->bd_export->exp_failed, lwi); @@ -1317,7 +1444,7 @@ static int mdt_writepage(struct mdt_thread_info *info) cleanup_lwi: OBD_FREE_PTR(lwi); cleanup_page: - __cfs_free_page(page); + cfs_free_page(page); desc_cleanup: ptlrpc_free_bulk(desc); RETURN(rc); @@ -1334,29 +1461,27 @@ static int mdt_readpage(struct mdt_thread_info *info) int i; ENTRY; - if (MDT_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK)) + if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK)) RETURN(err_serious(-ENOMEM)); - reqbody = req_capsule_client_get(&info->mti_pill, &RMF_MDT_BODY); - repbody = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY); + reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY); + repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY); if (reqbody == NULL || repbody == NULL) RETURN(err_serious(-EFAULT)); - rc = mdt_check_ucred(info); - if (rc) - RETURN(err_serious(rc)); - /* * prepare @rdpg before calling lower layers and transfer itself. Here * reqbody->size contains offset of where to start to read and * reqbody->nlink contains number bytes to read. */ rdpg->rp_hash = reqbody->size; - if ((__u64)rdpg->rp_hash != reqbody->size) { - CERROR("Invalid hash: %#llx != %#llx\n", - (__u64)rdpg->rp_hash, reqbody->size); + if (rdpg->rp_hash != reqbody->size) { + CERROR("Invalid hash: "LPX64" != "LPX64"\n", + rdpg->rp_hash, reqbody->size); RETURN(-EFAULT); } + + rdpg->rp_attrs = reqbody->mode; rdpg->rp_count = reqbody->nlink; rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1)>>CFS_PAGE_SHIFT; OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]); @@ -1382,10 +1507,11 @@ free_rdpg: for (i = 0; i < rdpg->rp_npages; i++) if (rdpg->rp_pages[i] != NULL) - __cfs_free_page(rdpg->rp_pages[i]); + cfs_free_page(rdpg->rp_pages[i]); OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]); - MDT_FAIL_RETURN(OBD_FAIL_MDS_SENDPAGE, 0); + if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) + RETURN(0); return rc; } @@ -1394,25 +1520,22 @@ static int mdt_reint_internal(struct mdt_thread_info *info, struct mdt_lock_handle *lhc, __u32 op) { - struct req_capsule *pill = &info->mti_pill; + struct req_capsule *pill = info->mti_pill; struct mdt_device *mdt = info->mti_mdt; + struct md_quota *mq = md_quota(info->mti_env); struct mdt_body *repbody; - int need_shrink = 0; - int rc; + int rc = 0; ENTRY; /* pack reply */ - if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) { + if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, mdt->mdt_max_mdsize); - need_shrink = 1; - } - if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER)) { + if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER)) req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, mdt->mdt_max_cookiesize); - need_shrink = 1; - } - rc = req_capsule_pack(pill); + + rc = req_capsule_server_pack(pill); if (rc != 0) { CERROR("Can't pack response, rc %d\n", rc); RETURN(err_serious(rc)); @@ -1425,7 +1548,7 @@ static int mdt_reint_internal(struct mdt_thread_info *info, repbody->aclsize = 0; } - if (MDT_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) + if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) GOTO(out_shrink, rc = err_serious(-EFAULT)); rc = mdt_reint_unpack(info, op); @@ -1434,6 +1557,18 @@ static int mdt_reint_internal(struct mdt_thread_info *info, GOTO(out_shrink, rc = err_serious(rc)); } + OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10); + + /* for replay no cookkie / lmm need, because client have this already */ + if (info->mti_spec.no_create == 1) { + if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) + req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0); + + if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER)) + req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, + 0); + } + rc = mdt_init_ucred_reint(info); if (rc) GOTO(out_shrink, rc); @@ -1442,35 +1577,33 @@ static int mdt_reint_internal(struct mdt_thread_info *info, if (rc != 0) GOTO(out_ucred, rc = err_serious(rc)); - need_shrink = 0; if (mdt_check_resent(info, mdt_reconstruct, lhc)) { rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg); GOTO(out_ucred, rc); } - + mq->mq_exp = info->mti_exp; rc = mdt_reint_rec(info, lhc); EXIT; out_ucred: mdt_exit_ucred(info); out_shrink: - if (need_shrink) - mdt_shrink_reply(info); + mdt_shrink_reply(info); return rc; } static long mdt_reint_opcode(struct mdt_thread_info *info, const struct req_format **fmt) { - __u32 *ptr; + struct mdt_rec_reint *rec; long opc; opc = err_serious(-EFAULT); - ptr = req_capsule_client_get(&info->mti_pill, &RMF_REINT_OPC); - if (ptr != NULL) { - opc = *ptr; + rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT); + if (rec != NULL) { + opc = rec->rr_opcode; DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc); if (opc < REINT_MAX && fmt[opc] != NULL) - req_capsule_extend(&info->mti_pill, fmt[opc]); + req_capsule_extend(info->mti_pill, fmt[opc]); else { CERROR("Unsupported opc: %ld\n", opc); opc = err_serious(opc); @@ -1485,12 +1618,13 @@ static int mdt_reint(struct mdt_thread_info *info) int rc; static const struct req_format *reint_fmts[REINT_MAX] = { - [REINT_SETATTR] = &RQF_MDS_REINT_SETATTR, - [REINT_CREATE] = &RQF_MDS_REINT_CREATE, - [REINT_LINK] = &RQF_MDS_REINT_LINK, - [REINT_UNLINK] = &RQF_MDS_REINT_UNLINK, - [REINT_RENAME] = &RQF_MDS_REINT_RENAME, - [REINT_OPEN] = &RQF_MDS_REINT_OPEN + [REINT_SETATTR] = &RQF_MDS_REINT_SETATTR, + [REINT_CREATE] = &RQF_MDS_REINT_CREATE, + [REINT_LINK] = &RQF_MDS_REINT_LINK, + [REINT_UNLINK] = &RQF_MDS_REINT_UNLINK, + [REINT_RENAME] = &RQF_MDS_REINT_RENAME, + [REINT_OPEN] = &RQF_MDS_REINT_OPEN, + [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR }; ENTRY; @@ -1510,23 +1644,38 @@ static int mdt_reint(struct mdt_thread_info *info) RETURN(rc); } -/* TODO these two methods not available now. */ - /* this should sync the whole device */ -static int mdt_device_sync(struct mdt_thread_info *info) +static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt) { - return 0; + struct dt_device *dt = mdt->mdt_bottom; + int rc; + ENTRY; + + rc = dt->dd_ops->dt_sync(env, dt); + RETURN(rc); } /* this should sync this object */ static int mdt_object_sync(struct mdt_thread_info *info) { - return 0; + struct md_object *next; + int rc; + ENTRY; + + if (!mdt_object_exists(info->mti_object)) { + CWARN("Non existing object "DFID"!\n", + PFID(mdt_object_fid(info->mti_object))); + RETURN(-ESTALE); + } + next = mdt_object_child(info->mti_object); + rc = mo_object_sync(info->mti_env, next); + + RETURN(rc); } static int mdt_sync(struct mdt_thread_info *info) { - struct req_capsule *pill = &info->mti_pill; + struct req_capsule *pill = info->mti_pill; struct mdt_body *body; int rc; ENTRY; @@ -1538,14 +1687,14 @@ static int mdt_sync(struct mdt_thread_info *info) if (body == NULL) RETURN(err_serious(-EINVAL)); - if (MDT_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK)) + if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK)) RETURN(err_serious(-ENOMEM)); if (fid_seq(&body->fid1) == 0) { /* sync the whole device */ - rc = req_capsule_pack(pill); + rc = req_capsule_server_pack(pill); if (rc == 0) - rc = mdt_device_sync(info); + rc = mdt_device_sync(info->mti_env, info->mti_mdt); else rc = err_serious(rc); } else { @@ -1576,15 +1725,133 @@ static int mdt_sync(struct mdt_thread_info *info) RETURN(rc); } +#ifdef HAVE_QUOTA_SUPPORT static int mdt_quotacheck_handle(struct mdt_thread_info *info) { - return err_serious(-EOPNOTSUPP); + struct obd_quotactl *oqctl; + struct req_capsule *pill = info->mti_pill; + struct obd_export *exp = info->mti_exp; + struct md_quota *mq = md_quota(info->mti_env); + struct md_device *next = info->mti_mdt->mdt_child; + int rc; + ENTRY; + + oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL); + if (oqctl == NULL) + RETURN(-EPROTO); + + /* remote client has no permission for quotacheck */ + if (unlikely(exp_connect_rmtclient(exp))) + RETURN(-EPERM); + + rc = req_capsule_server_pack(pill); + if (rc) + RETURN(rc); + + mq->mq_exp = exp; + rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next, + oqctl->qc_type); + RETURN(rc); } static int mdt_quotactl_handle(struct mdt_thread_info *info) { - return err_serious(-EOPNOTSUPP); + struct obd_quotactl *oqctl, *repoqc; + struct req_capsule *pill = info->mti_pill; + struct obd_export *exp = info->mti_exp; + struct md_quota *mq = md_quota(info->mti_env); + struct md_device *next = info->mti_mdt->mdt_child; + const struct md_quota_operations *mqo = &next->md_ops->mdo_quota; + int id, rc; + ENTRY; + + oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL); + if (oqctl == NULL) + RETURN(-EPROTO); + + id = oqctl->qc_id; + if (exp_connect_rmtclient(exp)) { + struct ptlrpc_request *req = mdt_info_req(info); + struct mdt_export_data *med = mdt_req2med(req); + struct lustre_idmap_table *idmap = med->med_idmap; + + if (unlikely(oqctl->qc_cmd != Q_GETQUOTA && + oqctl->qc_cmd != Q_GETINFO)) + RETURN(-EPERM); + + + if (oqctl->qc_type == USRQUOTA) + id = lustre_idmap_lookup_uid(NULL, idmap, 0, + oqctl->qc_id); + else if (oqctl->qc_type == GRPQUOTA) + id = lustre_idmap_lookup_gid(NULL, idmap, 0, + oqctl->qc_id); + else + RETURN(-EINVAL); + + if (id == CFS_IDMAP_NOTFOUND) { + CDEBUG(D_QUOTA, "no mapping for id %u\n", + oqctl->qc_id); + RETURN(-EACCES); + } + } + + rc = req_capsule_server_pack(pill); + if (rc) + RETURN(rc); + + repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL); + LASSERT(repoqc != NULL); + + mq->mq_exp = exp; + switch (oqctl->qc_cmd) { + case Q_QUOTAON: + rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type); + break; + case Q_QUOTAOFF: + rc = mqo->mqo_off(info->mti_env, next, oqctl->qc_type); + break; + case Q_SETINFO: + rc = mqo->mqo_setinfo(info->mti_env, next, oqctl->qc_type, id, + &oqctl->qc_dqinfo); + break; + case Q_GETINFO: + rc = mqo->mqo_getinfo(info->mti_env, next, oqctl->qc_type, id, + &oqctl->qc_dqinfo); + break; + case Q_SETQUOTA: + rc = mqo->mqo_setquota(info->mti_env, next, oqctl->qc_type, id, + &oqctl->qc_dqblk); + break; + case Q_GETQUOTA: + rc = mqo->mqo_getquota(info->mti_env, next, oqctl->qc_type, id, + &oqctl->qc_dqblk); + break; + case Q_GETOINFO: + rc = mqo->mqo_getoinfo(info->mti_env, next, oqctl->qc_type, id, + &oqctl->qc_dqinfo); + break; + case Q_GETOQUOTA: + rc = mqo->mqo_getoquota(info->mti_env, next, oqctl->qc_type, id, + &oqctl->qc_dqblk); + break; + case LUSTRE_Q_INVALIDATE: + rc = mqo->mqo_invalidate(info->mti_env, next, oqctl->qc_type); + break; + case LUSTRE_Q_FINVALIDATE: + rc = mqo->mqo_finvalidate(info->mti_env, next, oqctl->qc_type); + break; + default: + CERROR("unsupported mdt_quotactl command: %d\n", + oqctl->qc_cmd); + RETURN(-EFAULT); + } + + *repoqc = *oqctl; + RETURN(rc); } +#endif + /* * OBD PING and other handlers. @@ -1593,6 +1860,9 @@ static int mdt_obd_ping(struct mdt_thread_info *info) { int rc; ENTRY; + + req_capsule_set(info->mti_pill, &RQF_OBD_PING); + rc = target_handle_ping(mdt_info_req(info)); if (rc < 0) rc = err_serious(rc); @@ -1611,6 +1881,101 @@ static int mdt_obd_qc_callback(struct mdt_thread_info *info) /* + * LLOG handlers. + */ + +/** clone llog ctxt from child (mdd) + * This allows remote llog (replicator) access. + * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the + * context was originally set up, or we can handle them directly. + * I choose the latter, but that means I need any llog + * contexts set up by child to be accessable by the mdt. So we clone the + * context into our context list here. + */ +static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt, + int idx) +{ + struct md_device *next = mdt->mdt_child; + struct llog_ctxt *ctxt; + int rc; + + if (!llog_ctxt_null(mdt2obd_dev(mdt), idx)) + return 0; + + rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt); + if (rc || ctxt == NULL) { + CERROR("Can't get mdd ctxt %d\n", rc); + return rc; + } + + rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx); + if (rc) + CERROR("Can't set mdt ctxt %d\n", rc); + + return rc; +} + +static int mdt_llog_ctxt_unclone(const struct lu_env *env, + struct mdt_device *mdt, int idx) +{ + struct llog_ctxt *ctxt; + + ctxt = llog_get_context(mdt2obd_dev(mdt), idx); + if (ctxt == NULL) + return 0; + /* Put once for the get we just did, and once for the clone */ + llog_ctxt_put(ctxt); + llog_ctxt_put(ctxt); + return 0; +} + +static int mdt_llog_create(struct mdt_thread_info *info) +{ + int rc; + + req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE); + rc = llog_origin_handle_create(mdt_info_req(info)); + return (rc < 0 ? err_serious(rc) : rc); +} + +static int mdt_llog_destroy(struct mdt_thread_info *info) +{ + int rc; + + req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY); + rc = llog_origin_handle_destroy(mdt_info_req(info)); + return (rc < 0 ? err_serious(rc) : rc); +} + +static int mdt_llog_read_header(struct mdt_thread_info *info) +{ + int rc; + + req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER); + rc = llog_origin_handle_read_header(mdt_info_req(info)); + return (rc < 0 ? err_serious(rc) : rc); +} + +static int mdt_llog_next_block(struct mdt_thread_info *info) +{ + int rc; + + req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK); + rc = llog_origin_handle_next_block(mdt_info_req(info)); + return (rc < 0 ? err_serious(rc) : rc); +} + +static int mdt_llog_prev_block(struct mdt_thread_info *info) +{ + int rc; + + req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK); + rc = llog_origin_handle_prev_block(mdt_info_req(info)); + return (rc < 0 ? err_serious(rc) : rc); +} + + +/* * DLM handlers. */ static struct ldlm_callback_suite cbs = { @@ -1622,7 +1987,6 @@ static struct ldlm_callback_suite cbs = { static int mdt_enqueue(struct mdt_thread_info *info) { struct ptlrpc_request *req; - __u64 req_bits; int rc; /* @@ -1631,21 +1995,7 @@ static int mdt_enqueue(struct mdt_thread_info *info) */ LASSERT(info->mti_dlm_req != NULL); - if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE)) { - info->mti_fail_id = OBD_FAIL_LDLM_ENQUEUE; - return 0; - } - req = mdt_info_req(info); - - /* - * Lock without inodebits makes no sense and will oops later in - * ldlm. Let's check it now to see if we have wrong lock from client or - * bits get corrupted somewhere in mdt_intent_policy(). - */ - req_bits = info->mti_dlm_req->lock_desc.l_policy_data.l_inodebits.bits; - LASSERT(req_bits != 0); - rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace, req, info->mti_dlm_req, &cbs); info->mti_fail_id = OBD_FAIL_LDLM_REPLY; @@ -1695,6 +2045,8 @@ static int mdt_sec_ctx_handle(struct mdt_thread_info *info) sptlrpc_svc_ctx_invalidate(req); } + OBD_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, obd_fail_val); + return rc; } @@ -1712,7 +2064,8 @@ struct mdt_object *mdt_object_find(const struct lu_env *env, struct mdt_object *m; ENTRY; - o = lu_object_find(env, d->mdt_md_dev.md_lu_dev.ld_site, f); + CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f)); + o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL); if (unlikely(IS_ERR(o))) m = (struct mdt_object *)o; else @@ -1720,10 +2073,112 @@ struct mdt_object *mdt_object_find(const struct lu_env *env, RETURN(m); } -int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o, - struct mdt_lock_handle *lh, __u64 ibits, int locality) -{ - struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace; +/** + * Asyncronous commit for mdt device. + * + * Pass asynchonous commit call down the MDS stack. + * + * \param env environment + * \param mdt the mdt device + */ +static void mdt_device_commit_async(const struct lu_env *env, + struct mdt_device *mdt) +{ + struct dt_device *dt = mdt->mdt_bottom; + int rc; + + rc = dt->dd_ops->dt_commit_async(env, dt); + if (unlikely(rc != 0)) + CWARN("async commit start failed with rc = %d", rc); +} + +/** + * Mark the lock as "synchonous". + * + * Mark the lock to deffer transaction commit to the unlock time. + * + * \param lock the lock to mark as "synchonous" + * + * \see mdt_is_lock_sync + * \see mdt_save_lock + */ +static inline void mdt_set_lock_sync(struct ldlm_lock *lock) +{ + lock->l_ast_data = (void*)1; +} + +/** + * Check whehter the lock "synchonous" or not. + * + * \param lock the lock to check + * \retval 1 the lock is "synchonous" + * \retval 0 the lock isn't "synchronous" + * + * \see mdt_set_lock_sync + * \see mdt_save_lock + */ +static inline int mdt_is_lock_sync(struct ldlm_lock *lock) +{ + return lock->l_ast_data != NULL; +} + +/** + * Blocking AST for mdt locks. + * + * Starts transaction commit if in case of COS lock conflict or + * deffers such a commit to the mdt_save_lock. + * + * \param lock the lock which blocks a request or cancelling lock + * \param desc unused + * \param data unused + * \param flag indicates whether this cancelling or blocking callback + * \retval 0 + * \see ldlm_blocking_ast_nocheck + */ +int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, + void *data, int flag) +{ + struct obd_device *obd = lock->l_resource->lr_namespace->ns_obd; + struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev); + int rc; + ENTRY; + + if (flag == LDLM_CB_CANCELING) + RETURN(0); + lock_res_and_lock(lock); + if (lock->l_blocking_ast != mdt_blocking_ast) { + unlock_res_and_lock(lock); + RETURN(0); + } + if (mdt_cos_is_enabled(mdt) && + lock->l_req_mode & (LCK_PW | LCK_EX) && + lock->l_blocking_lock != NULL && + lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) { + mdt_set_lock_sync(lock); + } + rc = ldlm_blocking_ast_nocheck(lock); + + /* There is no lock conflict if l_blocking_lock == NULL, + * it indicates a blocking ast sent from ldlm_lock_decref_internal + * when the last reference to a local lock was released */ + if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) { + struct lu_env env; + + rc = lu_env_init(&env, LCT_MD_THREAD); + if (unlikely(rc != 0)) + CWARN("lu_env initialization failed with rc = %d," + "cannot start asynchronous commit\n", rc); + else + mdt_device_commit_async(&env, mdt); + lu_env_fini(&env); + } + RETURN(rc); +} + +int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o, + struct mdt_lock_handle *lh, __u64 ibits, int locality) +{ + struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace; ldlm_policy_data_t *policy = &info->mti_policy; struct ldlm_res_id *res_id = &info->mti_res_id; int rc; @@ -1747,6 +2202,17 @@ int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o, LASSERT(lh->mlh_type != MDT_PDO_LOCK); } + if (lh->mlh_type == MDT_PDO_LOCK) { + /* check for exists after object is locked */ + if (mdt_object_exists(o) == 0) { + /* Non-existent object shouldn't have PDO lock */ + RETURN(-ESTALE); + } else { + /* Non-dir object shouldn't have PDO lock */ + LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu))); + } + } + memset(policy, 0, sizeof(*policy)); fid_build_reg_res_name(mdt_object_fid(o), res_id); @@ -1765,13 +2231,14 @@ int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o, */ policy->l_inodebits.bits = MDS_INODELOCK_UPDATE; rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, - policy, res_id, LDLM_FL_ATOMIC_CB); + policy, res_id, LDLM_FL_ATOMIC_CB, + &info->mti_exp->exp_handle.h_cookie); if (unlikely(rc)) RETURN(rc); } /* - * Finish res_id initializing by name hash marking patr of + * Finish res_id initializing by name hash marking part of * directory which is taking modification. */ res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash; @@ -1782,60 +2249,95 @@ int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o, /* * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is * going to be sent to client. If it is - mdt_intent_policy() path will - * fix it up and turns FL_LOCAL flag off. + * fix it up and turn FL_LOCAL flag off. */ rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy, - res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB); - + res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB, + &info->mti_exp->exp_handle.h_cookie); if (rc) - GOTO(out, rc); + mdt_object_unlock(info, o, lh, 1); + else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) && + lh->mlh_pdo_hash != 0 && + (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) { + OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 10); + } - if (lh->mlh_type == MDT_PDO_LOCK) { - /* check for exists after object is locked */ - if (mdt_object_exists(o) == 0) { - /* Non-existent object shouldn't have PDO lock */ - rc = -ESTALE; + RETURN(rc); +} + +/** + * Save a lock within request object. + * + * Keep the lock referenced until whether client ACK or transaction + * commit happens or release the lock immediately depending on input + * parameters. If COS is ON, a write lock is converted to COS lock + * before saving. + * + * \param info thead info object + * \param h lock handle + * \param mode lock mode + * \param decref force immediate lock releasing + */ +static +void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h, + ldlm_mode_t mode, int decref) +{ + ENTRY; + + if (lustre_handle_is_used(h)) { + if (decref || !info->mti_has_trans || + !(mode & (LCK_PW | LCK_EX))){ + mdt_fid_unlock(h, mode); } else { - /* Non-dir object shouldn't have PDO lock */ - LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu))); + struct mdt_device *mdt = info->mti_mdt; + struct ldlm_lock *lock = ldlm_handle2lock(h); + struct ptlrpc_request *req = mdt_info_req(info); + int no_ack = 0; + + LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n", + h->cookie); + CDEBUG(D_HA, "request = %p reply state = %p" + " transno = "LPD64"\n", + req, req->rq_reply_state, req->rq_transno); + if (mdt_cos_is_enabled(mdt)) { + no_ack = 1; + ldlm_lock_downgrade(lock, LCK_COS); + mode = LCK_COS; + } + ptlrpc_save_lock(req, h, mode, no_ack); + if (mdt_is_lock_sync(lock)) { + CDEBUG(D_HA, "found sync-lock," + " async commit started\n"); + mdt_device_commit_async(info->mti_env, + mdt); + } + LDLM_LOCK_PUT(lock); } + h->cookie = 0ull; } -out: - if (rc) - mdt_object_unlock(info, o, lh, 1); - - RETURN(rc); + EXIT; } -/* - * Just call ldlm_lock_decref() if decref, else we only call ptlrpc_save_lock() - * to save this lock in req. when transaction committed, req will be released, - * and lock will, too. +/** + * Unlock mdt object. + * + * Immeditely release the regular lock and the PDO lock or save the + * lock in reqeuest and keep them referenced until client ACK or + * transaction commit. + * + * \param info thread info object + * \param o mdt object + * \param lh mdt lock handle referencing regular and PDO locks + * \param decref force immediate lock releasing */ void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o, struct mdt_lock_handle *lh, int decref) { - struct ptlrpc_request *req = mdt_info_req(info); ENTRY; - if (lustre_handle_is_used(&lh->mlh_pdo_lh)) { - /* Do not save PDO locks to request, just decref. */ - mdt_fid_unlock(&lh->mlh_pdo_lh, - lh->mlh_pdo_mode); - lh->mlh_pdo_lh.cookie = 0ull; - } - - if (lustre_handle_is_used(&lh->mlh_reg_lh)) { - if (decref) { - mdt_fid_unlock(&lh->mlh_reg_lh, - lh->mlh_reg_mode); - } else { - ptlrpc_save_lock(req, &lh->mlh_reg_lh, - lh->mlh_reg_mode); - } - lh->mlh_reg_lh.cookie = 0ull; - } + mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref); + mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref); EXIT; } @@ -1881,7 +2383,9 @@ static struct mdt_handler *mdt_handler_find(__u32 opc, if (s->mos_opc_start <= opc && opc < s->mos_opc_end) { h = s->mos_hs + (opc - s->mos_opc_start); if (likely(h->mh_opc != 0)) - LASSERT(h->mh_opc == opc); + LASSERTF(h->mh_opc == opc, + "opcode mismatch %d != %d\n", + h->mh_opc, opc); else h = NULL; /* unsupported opc */ break; @@ -1922,17 +2426,21 @@ static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags) const struct lu_env *env; struct req_capsule *pill; int rc; + ENTRY; env = info->mti_env; - pill = &info->mti_pill; + pill = info->mti_pill; body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY); if (body == NULL) - return -EFAULT; + RETURN(-EFAULT); + + if (!(body->valid & OBD_MD_FLID)) + RETURN(0); if (!fid_is_sane(&body->fid1)) { CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1)); - return -EINVAL; + RETURN(-EINVAL); } /* @@ -1963,16 +2471,14 @@ static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags) } else rc = PTR_ERR(obj); - return rc; + RETURN(rc); } static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags) { - struct req_capsule *pill; + struct req_capsule *pill = info->mti_pill; int rc; - ENTRY; - pill = &info->mti_pill; if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT)) rc = mdt_body_unpack(info, flags); @@ -1983,6 +2489,7 @@ static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags) struct mdt_device *mdt = info->mti_mdt; /* Pack reply. */ + if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, mdt->mdt_max_mdsize); @@ -1990,7 +2497,7 @@ static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags) req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, mdt->mdt_max_cookiesize); - rc = req_capsule_pack(pill); + rc = req_capsule_server_pack(pill); } RETURN(rc); } @@ -2024,28 +2531,25 @@ static int mdt_req_handle(struct mdt_thread_info *info, LASSERT(current->journal_info == NULL); /* - * Do not use *_FAIL_CHECK_ONCE() macros, because they will stop - * correct handling of failed req later in ldlm due to doing - * obd_fail_loc |= OBD_FAIL_ONCE | OBD_FAILED without actually - * correct actions like it is done in target_send_reply_msg(). + * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try + * to put same checks into handlers like mdt_close(), mdt_reint(), + * etc., without talking to mdt authors first. Checking same thing + * there again is useless and returning 0 error without packing reply + * is buggy! Handlers either pack reply or return error. + * + * We return 0 here and do not send any reply in order to emulate + * network failure. Do not send any reply in case any of NET related + * fail_id has occured. */ - if (h->mh_fail_id != 0) { - /* - * Set to info->mti_fail_id to handler fail_id, it will be used - * later, and better than use default fail_id. - */ - if (OBD_FAIL_CHECK(h->mh_fail_id)) { - info->mti_fail_id = h->mh_fail_id; - RETURN(0); - } - } + if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE)) + RETURN(0); rc = 0; flags = h->mh_flags; LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL)); if (h->mh_fmt != NULL) { - req_capsule_set(&info->mti_pill, h->mh_fmt); + req_capsule_set(info->mti_pill, h->mh_fmt); rc = mdt_unpack_req_pack_rep(info, flags); } @@ -2059,14 +2563,28 @@ static int mdt_req_handle(struct mdt_thread_info *info, LASSERT(h->mh_fmt != NULL); - dlm_req = req_capsule_client_get(&info->mti_pill, &RMF_DLM_REQ); + dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ); if (dlm_req != NULL) { - if (info->mti_mdt->mdt_opts.mo_compat_resname) - rc = mdt_lock_resname_compat(info->mti_mdt, - dlm_req); - info->mti_dlm_req = dlm_req; + if (unlikely(dlm_req->lock_desc.l_resource.lr_type == + LDLM_IBITS && + dlm_req->lock_desc.l_policy_data.\ + l_inodebits.bits == 0)) { + /* + * Lock without inodebits makes no sense and + * will oops later in ldlm. If client miss to + * set such bits, do not trigger ASSERTION. + * + * For liblustre flock case, it maybe zero. + */ + rc = -EPROTO; + } else { + if (info->mti_mdt->mdt_opts.mo_compat_resname) + rc = mdt_lock_resname_compat( + info->mti_mdt, + dlm_req); + info->mti_dlm_req = dlm_req; + } } else { - CERROR("Can't unpack dlm request\n"); rc = -EFAULT; } } @@ -2086,6 +2604,13 @@ static int mdt_req_handle(struct mdt_thread_info *info, * only */ rc = h->mh_act(info); + if (rc == 0 && + !req->rq_no_reply && req->rq_reply_state == NULL) { + DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not " + "pack reply and returned 0 error\n", + h->mh_name); + LBUG(); + } serious = is_serious(rc); rc = clear_serious(rc); } else @@ -2106,22 +2631,23 @@ static int mdt_req_handle(struct mdt_thread_info *info, info->mti_mdt->mdt_opts.mo_compat_resname) { struct ldlm_reply *dlmrep; - dlmrep = req_capsule_server_get(&info->mti_pill, &RMF_DLM_REP); + dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP); if (dlmrep != NULL) rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep); } /* If we're DISCONNECTing, the mdt_export_data is already freed */ - if (likely(rc == 0 && h->mh_opc != MDS_DISCONNECT)) + if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT)) target_committed_to_req(req); - if (unlikely((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) && + if (unlikely(req_is_replay(req) && lustre_msg_get_transno(req->rq_reqmsg) == 0)) { DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY"); LBUG(); } - RETURN(rc); + target_send_reply(req, rc, info->mti_fail_id); + RETURN(0); } void mdt_lock_handle_init(struct mdt_lock_handle *lh) @@ -2150,11 +2676,8 @@ static void mdt_thread_info_init(struct ptlrpc_request *req, int i; struct md_capainfo *ci; - info->mti_rep_buf_nr = ARRAY_SIZE(info->mti_rep_buf_size); - for (i = 0; i < ARRAY_SIZE(info->mti_rep_buf_size); i++) - info->mti_rep_buf_size[i] = -1; - req_capsule_init(&info->mti_pill, req, RCL_SERVER, - info->mti_rep_buf_size); + req_capsule_init(&req->rq_pill, req, RCL_SERVER); + info->mti_pill = &req->rq_pill; /* lock handle */ for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++) @@ -2164,14 +2687,24 @@ static void mdt_thread_info_init(struct ptlrpc_request *req, if (req->rq_export) { info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev); info->mti_exp = req->rq_export; - } else + } else info->mti_mdt = NULL; info->mti_env = req->rq_svc_thread->t_env; ci = md_capainfo(info->mti_env); memset(ci, 0, sizeof *ci); + if (req->rq_export) { + if (exp_connect_rmtclient(req->rq_export)) + ci->mc_auth = LC_ID_CONVERT; + else if (req->rq_export->exp_connect_flags & + OBD_CONNECT_MDS_CAPA) + ci->mc_auth = LC_ID_PLAIN; + else + ci->mc_auth = LC_ID_NONE; + } info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET; info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg); + info->mti_mos = NULL; memset(&info->mti_attr, 0, sizeof(info->mti_attr)); info->mti_body = NULL; @@ -2184,14 +2717,20 @@ static void mdt_thread_info_init(struct ptlrpc_request *req, /* To not check for split by default. */ info->mti_spec.sp_ck_split = 0; + info->mti_spec.no_create = 0; } static void mdt_thread_info_fini(struct mdt_thread_info *info) { int i; - req_capsule_fini(&info->mti_pill); + req_capsule_fini(info->mti_pill); if (info->mti_object != NULL) { + /* + * freeing an object may lead to OSD level transaction, do not + * let it mess with MDT. bz19385. + */ + info->mti_no_need_trans = 1; mdt_object_put(info->mti_env, info->mti_object); info->mti_object = NULL; } @@ -2200,9 +2739,35 @@ static void mdt_thread_info_fini(struct mdt_thread_info *info) info->mti_env = NULL; } -/* mds/handler.c */ -extern int mds_filter_recovery_request(struct ptlrpc_request *req, - struct obd_device *obd, int *process); +static int mdt_filter_recovery_request(struct ptlrpc_request *req, + struct obd_device *obd, int *process) +{ + switch (lustre_msg_get_opc(req->rq_reqmsg)) { + case MDS_CONNECT: /* This will never get here, but for completeness. */ + case OST_CONNECT: /* This will never get here, but for completeness. */ + case MDS_DISCONNECT: + case OST_DISCONNECT: + *process = 1; + RETURN(0); + + case MDS_CLOSE: + case MDS_DONE_WRITING: + case MDS_SYNC: /* used in unmounting */ + case OBD_PING: + case MDS_REINT: + case SEQ_QUERY: + case FLD_QUERY: + case LDLM_ENQUEUE: + *process = target_queue_recovery_request(req, obd); + RETURN(0); + + default: + DEBUG_REQ(D_ERROR, req, "not permitted during recovery"); + *process = -EAGAIN; + RETURN(0); + } +} + /* * Handle recovery. Return: * +1: continue request processing; @@ -2212,7 +2777,6 @@ extern int mds_filter_recovery_request(struct ptlrpc_request *req, static int mdt_recovery(struct mdt_thread_info *info) { struct ptlrpc_request *req = mdt_info_req(info); - int recovering; struct obd_device *obd; ENTRY; @@ -2235,7 +2799,7 @@ static int mdt_recovery(struct mdt_thread_info *info) } } - if (unlikely(req->rq_export == NULL)) { + if (unlikely(!class_connected_export(req->rq_export))) { CERROR("operation %d on unconnected MDS from %s\n", lustre_msg_get_opc(req->rq_reqmsg), libcfs_id2str(req->rq_peer)); @@ -2273,14 +2837,11 @@ static int mdt_recovery(struct mdt_thread_info *info) obd = req->rq_export->exp_obd; /* Check for aborted recovery... */ - spin_lock_bh(&obd->obd_processing_task_lock); - recovering = obd->obd_recovering; - spin_unlock_bh(&obd->obd_processing_task_lock); - if (unlikely(recovering)) { + if (unlikely(obd->obd_recovering)) { int rc; int should_process; DEBUG_REQ(D_INFO, req, "Got new replay"); - rc = mds_filter_recovery_request(req, obd, &should_process); + rc = mdt_filter_recovery_request(req, obd, &should_process); if (rc != 0 || !should_process) RETURN(rc); else if (should_process < 0) { @@ -2292,24 +2853,86 @@ static int mdt_recovery(struct mdt_thread_info *info) RETURN(+1); } -static int mdt_reply(struct ptlrpc_request *req, int rc, - struct mdt_thread_info *info) +static int mdt_msg_check_version(struct lustre_msg *msg) { - ENTRY; + int rc; -#if 0 - if (req->rq_reply_state == NULL && rc == 0) { - req->rq_status = rc; - lustre_pack_reply(req, 1, NULL, NULL); + switch (lustre_msg_get_opc(msg)) { + case MDS_CONNECT: + case MDS_DISCONNECT: + case OBD_PING: + case SEC_CTX_INIT: + case SEC_CTX_INIT_CONT: + case SEC_CTX_FINI: + rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION); + if (rc) + CERROR("bad opc %u version %08x, expecting %08x\n", + lustre_msg_get_opc(msg), + lustre_msg_get_version(msg), + LUSTRE_OBD_VERSION); + break; + case MDS_GETSTATUS: + case MDS_GETATTR: + case MDS_GETATTR_NAME: + case MDS_STATFS: + case MDS_READPAGE: + case MDS_WRITEPAGE: + case MDS_IS_SUBDIR: + case MDS_REINT: + case MDS_CLOSE: + case MDS_DONE_WRITING: + case MDS_PIN: + case MDS_SYNC: + case MDS_GETXATTR: + case MDS_SETXATTR: + case MDS_SET_INFO: + case MDS_GET_INFO: + case MDS_QUOTACHECK: + case MDS_QUOTACTL: + case QUOTA_DQACQ: + case QUOTA_DQREL: + case SEQ_QUERY: + case FLD_QUERY: + rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION); + if (rc) + CERROR("bad opc %u version %08x, expecting %08x\n", + lustre_msg_get_opc(msg), + lustre_msg_get_version(msg), + LUSTRE_MDS_VERSION); + break; + case LDLM_ENQUEUE: + case LDLM_CONVERT: + case LDLM_BL_CALLBACK: + case LDLM_CP_CALLBACK: + rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION); + if (rc) + CERROR("bad opc %u version %08x, expecting %08x\n", + lustre_msg_get_opc(msg), + lustre_msg_get_version(msg), + LUSTRE_DLM_VERSION); + break; + case OBD_LOG_CANCEL: + case LLOG_ORIGIN_HANDLE_CREATE: + case LLOG_ORIGIN_HANDLE_NEXT_BLOCK: + case LLOG_ORIGIN_HANDLE_READ_HEADER: + case LLOG_ORIGIN_HANDLE_CLOSE: + case LLOG_ORIGIN_HANDLE_DESTROY: + case LLOG_ORIGIN_HANDLE_PREV_BLOCK: + case LLOG_CATINFO: + rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION); + if (rc) + CERROR("bad opc %u version %08x, expecting %08x\n", + lustre_msg_get_opc(msg), + lustre_msg_get_version(msg), + LUSTRE_LOG_VERSION); + break; + default: + CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg)); + rc = -ENOTSUPP; } -#endif - target_send_reply(req, rc, info->mti_fail_id); - RETURN(0); + return rc; } -/* mds/handler.c */ -extern int mds_msg_check_version(struct lustre_msg *msg); - static int mdt_handle0(struct ptlrpc_request *req, struct mdt_thread_info *info, struct mdt_opc_slice *supported) @@ -2320,12 +2943,13 @@ static int mdt_handle0(struct ptlrpc_request *req, ENTRY; - MDT_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0); + if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE)) + RETURN(0); LASSERT(current->journal_info == NULL); msg = req->rq_reqmsg; - rc = mds_msg_check_version(msg); + rc = mdt_msg_check_version(msg); if (likely(rc == 0)) { rc = mdt_recovery(info); if (likely(rc == +1)) { @@ -2333,9 +2957,9 @@ static int mdt_handle0(struct ptlrpc_request *req, supported); if (likely(h != NULL)) { rc = mdt_req_handle(info, h, req); - rc = mdt_reply(req, rc, info); } else { - CERROR("The unsupported opc: 0x%x\n", lustre_msg_get_opc(msg) ); + CERROR("The unsupported opc: 0x%x\n", + lustre_msg_get_opc(msg) ); req->rq_status = -ENOTSUPP; rc = ptlrpc_error(req); RETURN(rc); @@ -2531,7 +3155,7 @@ int mdt_intent_lock_replace(struct mdt_thread_info *info, * lock. */ if (new_lock == NULL) - new_lock = ldlm_handle2lock(&lh->mlh_reg_lh); + new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0); if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) { lh->mlh_reg_lh.cookie = 0; @@ -2567,29 +3191,36 @@ int mdt_intent_lock_replace(struct mdt_thread_info *info, RETURN(ELDLM_LOCK_REPLACED); } - /* This lock might already be given to the client by an resent req, - * in this case we should return ELDLM_LOCK_ABORTED, - * so we should check led_held_locks here, but it will affect - * performance, FIXME + /* + * Fixup the lock to be given to the client. */ - /* Fixup the lock to be given to the client */ lock_res_and_lock(new_lock); - new_lock->l_readers = 0; - new_lock->l_writers = 0; - - new_lock->l_export = class_export_get(req->rq_export); - spin_lock(&req->rq_export->exp_ldlm_data.led_lock); - list_add(&new_lock->l_export_chain, - &new_lock->l_export->exp_ldlm_data.led_held_locks); - spin_unlock(&req->rq_export->exp_ldlm_data.led_lock); + /* Zero new_lock->l_readers and new_lock->l_writers without triggering + * possible blocking AST. */ + while (new_lock->l_readers > 0) { + lu_ref_del(&new_lock->l_reference, "reader", new_lock); + lu_ref_del(&new_lock->l_reference, "user", new_lock); + new_lock->l_readers--; + } + while (new_lock->l_writers > 0) { + lu_ref_del(&new_lock->l_reference, "writer", new_lock); + lu_ref_del(&new_lock->l_reference, "user", new_lock); + new_lock->l_writers--; + } + new_lock->l_export = class_export_lock_get(req->rq_export, new_lock); new_lock->l_blocking_ast = lock->l_blocking_ast; new_lock->l_completion_ast = lock->l_completion_ast; new_lock->l_remote_handle = lock->l_remote_handle; new_lock->l_flags &= ~LDLM_FL_LOCAL; unlock_res_and_lock(new_lock); - LDLM_LOCK_PUT(new_lock); + + cfs_hash_add(new_lock->l_export->exp_lock_hash, + &new_lock->l_remote_handle, + &new_lock->l_exp_hash); + + LDLM_LOCK_RELEASE(new_lock); lh->mlh_reg_lh.cookie = 0; RETURN(ELDLM_LOCK_REPLACED); @@ -2604,34 +3235,32 @@ static void mdt_intent_fixup_resent(struct mdt_thread_info *info, struct obd_export *exp = req->rq_export; struct lustre_handle remote_hdl; struct ldlm_request *dlmreq; - struct list_head *iter; + struct ldlm_lock *lock; if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT)) return; - dlmreq = req_capsule_client_get(&info->mti_pill, &RMF_DLM_REQ); + dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ); remote_hdl = dlmreq->lock_handle[0]; - spin_lock(&exp->exp_ldlm_data.led_lock); - list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) { - struct ldlm_lock *lock; - lock = list_entry(iter, struct ldlm_lock, l_export_chain); - if (lock == new_lock) - continue; - if (lock->l_remote_handle.cookie == remote_hdl.cookie) { + lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl); + if (lock) { + if (lock != new_lock) { lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie; lh->mlh_reg_mode = lock->l_granted_mode; - LDLM_DEBUG(lock, "restoring lock cookie"); - DEBUG_REQ(D_HA, req, "restoring lock cookie "LPX64, + LDLM_DEBUG(lock, "Restoring lock cookie"); + DEBUG_REQ(D_DLMTRACE, req, + "restoring lock cookie "LPX64, lh->mlh_reg_lh.cookie); if (old_lock) *old_lock = LDLM_LOCK_GET(lock); - spin_unlock(&exp->exp_ldlm_data.led_lock); + cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash); return; } + + cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash); } - spin_unlock(&exp->exp_ldlm_data.led_lock); /* * If the xid matches, then we know this is a resent request, and allow @@ -2647,7 +3276,7 @@ static void mdt_intent_fixup_resent(struct mdt_thread_info *info, */ lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT); - DEBUG_REQ(D_HA, req, "no existing lock with rhandle "LPX64, + DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64, remote_hdl.cookie); } @@ -2666,10 +3295,10 @@ static int mdt_intent_getattr(enum mdt_it_code opcode, int rc; ENTRY; - reqbody = req_capsule_client_get(&info->mti_pill, &RMF_MDT_BODY); + reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY); LASSERT(reqbody); - repbody = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY); + repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY); LASSERT(repbody); info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT); @@ -2686,15 +3315,15 @@ static int mdt_intent_getattr(enum mdt_it_code opcode, break; default: CERROR("Unhandled till now"); - GOTO(out, rc = -EINVAL); + GOTO(out_shrink, rc = -EINVAL); } rc = mdt_init_ucred(info, reqbody); if (rc) - GOTO(out, rc); + GOTO(out_shrink, rc); - req = info->mti_pill.rc_req; - ldlm_rep = req_capsule_server_get(&info->mti_pill, &RMF_DLM_REP); + req = info->mti_pill->rc_req; + ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP); mdt_set_disposition(info, ldlm_rep, DISP_IT_EXECD); /* Get lock from request for possible resent case. */ @@ -2715,7 +3344,7 @@ static int mdt_intent_getattr(enum mdt_it_code opcode, EXIT; out_ucred: mdt_exit_ucred(info); -out: +out_shrink: mdt_shrink_reply(info); return rc; } @@ -2739,12 +3368,12 @@ static int mdt_intent_reint(enum mdt_it_code opcode, opc = mdt_reint_opcode(info, intent_fmts); if (opc < 0) - GOTO(out, rc = opc); + RETURN(opc); if (mdt_it_flavor[opcode].it_reint != opc) { CERROR("Reint code %ld doesn't match intent: %d\n", opc, opcode); - GOTO(out, rc = err_serious(-EPROTO)); + RETURN(err_serious(-EPROTO)); } /* Get lock from request for possible resent case. */ @@ -2754,9 +3383,9 @@ static int mdt_intent_reint(enum mdt_it_code opcode, /* Check whether the reply has been packed successfully. */ if (mdt_info_req(info)->rq_repmsg != NULL) - rep = req_capsule_server_get(&info->mti_pill, &RMF_DLM_REP); + rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP); if (rep == NULL) - GOTO(out, rc = err_serious(-EFAULT)); + RETURN(err_serious(-EFAULT)); /* MDC expects this in any case */ if (rc != 0) @@ -2767,15 +3396,40 @@ static int mdt_intent_reint(enum mdt_it_code opcode, LASSERT(lustre_handle_is_used(&lhc->mlh_reg_lh)); rep->lock_policy_res2 = 0; rc = mdt_intent_lock_replace(info, lockp, NULL, lhc, flags); - GOTO(out, rc); + RETURN(rc); } rep->lock_policy_res2 = clear_serious(rc); - lhc->mlh_reg_lh.cookie = 0ull; - rc = ELDLM_LOCK_ABORTED; - EXIT; -out: - return rc; + if (rc == -ENOTCONN || rc == -ENODEV || + rc == -EOVERFLOW) { /**< if VBR failure then return error */ + /* + * If it is the disconnect error (ENODEV & ENOCONN), the error + * will be returned by rq_status, and client at ptlrpc layer + * will detect this, then disconnect, reconnect the import + * immediately, instead of impacting the following the rpc. + */ + lhc->mlh_reg_lh.cookie = 0ull; + RETURN(rc); + } else { + /* + * For other cases, the error will be returned by intent. + * and client will retrieve the result from intent. + */ + /* + * FIXME: when open lock is finished, that should be + * checked here. + */ + if (lustre_handle_is_used(&lhc->mlh_reg_lh)) { + LASSERTF(rc == 0, "Error occurred but lock handle " + "is still in use\n"); + rep->lock_policy_res2 = 0; + rc = mdt_intent_lock_replace(info, lockp, NULL, lhc, flags); + RETURN(rc); + } else { + lhc->mlh_reg_lh.cookie = 0ull; + RETURN(ELDLM_LOCK_ABORTED); + } + } } static int mdt_intent_code(long itcode) @@ -2831,7 +3485,7 @@ static int mdt_intent_opc(long itopc, struct mdt_thread_info *info, if (opc < 0) RETURN(-EINVAL); - pill = &info->mti_pill; + pill = info->mti_pill; flv = &mdt_it_flavor[opc]; if (flv->it_fmt != NULL) @@ -2842,13 +3496,14 @@ static int mdt_intent_opc(long itopc, struct mdt_thread_info *info, struct ptlrpc_request *req = mdt_info_req(info); if (flv->it_flags & MUTABOR && req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY) - rc = -EROFS; + RETURN(-EROFS); } if (rc == 0 && flv->it_act != NULL) { /* execute policy */ rc = flv->it_act(opc, info, lockp, flags); - } else + } else { rc = -EOPNOTSUPP; + } RETURN(rc); } @@ -2869,89 +3524,62 @@ static int mdt_intent_policy(struct ldlm_namespace *ns, info = lu_context_key_get(&req->rq_svc_thread->t_env->le_ctx, &mdt_thread_key); LASSERT(info != NULL); - pill = &info->mti_pill; + pill = info->mti_pill; LASSERT(pill->rc_req == req); if (req->rq_reqmsg->lm_bufcount > DLM_INTENT_IT_OFF) { req_capsule_extend(pill, &RQF_LDLM_INTENT); it = req_capsule_client_get(pill, &RMF_LDLM_INTENT); if (it != NULL) { - const struct ldlm_request *dlmreq; - __u64 req_bits; -#if 0 - struct ldlm_lock *lock = *lockp; - - LDLM_DEBUG(lock, "intent policy opc: %s\n", - ldlm_it2str(it->opc)); -#endif - rc = mdt_intent_opc(it->opc, info, lockp, flags); if (rc == 0) rc = ELDLM_OK; - /* - * Lock without inodebits makes no sense and will oops + /* Lock without inodebits makes no sense and will oops * later in ldlm. Let's check it now to see if we have - * wrong lock from client or bits get corrupted - * somewhere in mdt_intent_opc(). - */ - dlmreq = info->mti_dlm_req; - req_bits = dlmreq->lock_desc.l_policy_data.l_inodebits.bits; - LASSERT(req_bits != 0); - + * ibits corrupted somewhere in mdt_intent_opc(). + * The case for client miss to set ibits has been + * processed by others. */ + LASSERT(ergo(info->mti_dlm_req->lock_desc.l_resource.\ + lr_type == LDLM_IBITS, + info->mti_dlm_req->lock_desc.\ + l_policy_data.l_inodebits.bits != 0)); } else rc = err_serious(-EFAULT); } else { /* No intent was provided */ LASSERT(pill->rc_fmt == &RQF_LDLM_ENQUEUE); - rc = req_capsule_pack(pill); + rc = req_capsule_server_pack(pill); if (rc) rc = err_serious(rc); } RETURN(rc); } -/* - * Seq wrappers - */ -static void mdt_seq_adjust(const struct lu_env *env, - struct mdt_device *m, int lost) -{ - struct lu_site *ls = m->mdt_md_dev.md_lu_dev.ld_site; - struct lu_range out; - ENTRY; - - LASSERT(ls && ls->ls_server_seq); - LASSERT(lost >= 0); - /* get extra seq from seq_server, moving it's range up */ - while (lost-- > 0) { - seq_server_alloc_meta(ls->ls_server_seq, NULL, &out, env); - } - EXIT; -} - static int mdt_seq_fini(const struct lu_env *env, struct mdt_device *m) { - struct lu_site *ls = m->mdt_md_dev.md_lu_dev.ld_site; + struct md_site *ms = mdt_md_site(m); ENTRY; - if (ls && ls->ls_server_seq) { - seq_server_fini(ls->ls_server_seq, env); - OBD_FREE_PTR(ls->ls_server_seq); - ls->ls_server_seq = NULL; + if (ms != NULL) { + if (ms->ms_server_seq) { + seq_server_fini(ms->ms_server_seq, env); + OBD_FREE_PTR(ms->ms_server_seq); + ms->ms_server_seq = NULL; } - if (ls && ls->ls_control_seq) { - seq_server_fini(ls->ls_control_seq, env); - OBD_FREE_PTR(ls->ls_control_seq); - ls->ls_control_seq = NULL; + if (ms->ms_control_seq) { + seq_server_fini(ms->ms_control_seq, env); + OBD_FREE_PTR(ms->ms_control_seq); + ms->ms_control_seq = NULL; } - if (ls && ls->ls_client_seq) { - seq_client_fini(ls->ls_client_seq); - OBD_FREE_PTR(ls->ls_client_seq); - ls->ls_client_seq = NULL; + if (ms->ms_client_seq) { + seq_client_fini(ms->ms_client_seq); + OBD_FREE_PTR(ms->ms_client_seq); + ms->ms_client_seq = NULL; + } } RETURN(0); @@ -2961,39 +3589,40 @@ static int mdt_seq_init(const struct lu_env *env, const char *uuid, struct mdt_device *m) { - struct lu_site *ls; + struct md_site *ms; char *prefix; int rc; ENTRY; - ls = m->mdt_md_dev.md_lu_dev.ld_site; + ms = mdt_md_site(m); /* * This is sequence-controller node. Init seq-controller server on local * MDT. */ - if (ls->ls_node_id == 0) { - LASSERT(ls->ls_control_seq == NULL); + if (ms->ms_node_id == 0) { + LASSERT(ms->ms_control_seq == NULL); - OBD_ALLOC_PTR(ls->ls_control_seq); - if (ls->ls_control_seq == NULL) + OBD_ALLOC_PTR(ms->ms_control_seq); + if (ms->ms_control_seq == NULL) RETURN(-ENOMEM); - rc = seq_server_init(ls->ls_control_seq, + rc = seq_server_init(ms->ms_control_seq, m->mdt_bottom, uuid, LUSTRE_SEQ_CONTROLLER, + ms, env); if (rc) GOTO(out_seq_fini, rc); - OBD_ALLOC_PTR(ls->ls_client_seq); - if (ls->ls_client_seq == NULL) + OBD_ALLOC_PTR(ms->ms_client_seq); + if (ms->ms_client_seq == NULL) GOTO(out_seq_fini, rc = -ENOMEM); OBD_ALLOC(prefix, MAX_OBD_NAME + 5); if (prefix == NULL) { - OBD_FREE_PTR(ls->ls_client_seq); + OBD_FREE_PTR(ms->ms_client_seq); GOTO(out_seq_fini, rc = -ENOMEM); } @@ -3002,11 +3631,11 @@ static int mdt_seq_init(const struct lu_env *env, /* * Init seq-controller client after seq-controller server is - * ready. Pass ls->ls_control_seq to it for direct talking. + * ready. Pass ms->ms_control_seq to it for direct talking. */ - rc = seq_client_init(ls->ls_client_seq, NULL, + rc = seq_client_init(ms->ms_client_seq, NULL, LUSTRE_SEQ_METADATA, prefix, - ls->ls_control_seq); + ms->ms_control_seq); OBD_FREE(prefix, MAX_OBD_NAME + 5); if (rc) @@ -3014,25 +3643,26 @@ static int mdt_seq_init(const struct lu_env *env, } /* Init seq-server on local MDT */ - LASSERT(ls->ls_server_seq == NULL); + LASSERT(ms->ms_server_seq == NULL); - OBD_ALLOC_PTR(ls->ls_server_seq); - if (ls->ls_server_seq == NULL) + OBD_ALLOC_PTR(ms->ms_server_seq); + if (ms->ms_server_seq == NULL) GOTO(out_seq_fini, rc = -ENOMEM); - rc = seq_server_init(ls->ls_server_seq, + rc = seq_server_init(ms->ms_server_seq, m->mdt_bottom, uuid, LUSTRE_SEQ_SERVER, + ms, env); if (rc) GOTO(out_seq_fini, rc = -ENOMEM); /* Assign seq-controller client to local seq-server. */ - if (ls->ls_node_id == 0) { - LASSERT(ls->ls_client_seq != NULL); + if (ms->ms_node_id == 0) { + LASSERT(ms->ms_client_seq != NULL); - rc = seq_server_set_cli(ls->ls_server_seq, - ls->ls_client_seq, + rc = seq_server_set_cli(ms->ms_server_seq, + ms->ms_client_seq, env); } @@ -3051,7 +3681,7 @@ static int mdt_seq_init_cli(const struct lu_env *env, struct mdt_device *m, struct lustre_cfg *cfg) { - struct lu_site *ls = m->mdt_md_dev.md_lu_dev.ld_site; + struct md_site *ms = mdt_md_site(m); struct obd_device *mdc; struct obd_uuid *uuidp, *mdcuuidp; char *uuid_str, *mdc_uuid_str; @@ -3075,7 +3705,7 @@ static int mdt_seq_init_cli(const struct lu_env *env, /* check if this is adding the first MDC and controller is not yet * initialized. */ - if (index != 0 || ls->ls_client_seq) + if (index != 0 || ms->ms_client_seq) RETURN(0); uuid_str = lustre_cfg_string(cfg, 1); @@ -3092,9 +3722,9 @@ static int mdt_seq_init_cli(const struct lu_env *env, CERROR("target %s not set up\n", mdc->obd_name); rc = -EINVAL; } else { - LASSERT(ls->ls_control_exp); - OBD_ALLOC_PTR(ls->ls_client_seq); - if (ls->ls_client_seq != NULL) { + LASSERT(ms->ms_control_exp); + OBD_ALLOC_PTR(ms->ms_client_seq); + if (ms->ms_client_seq != NULL) { char *prefix; OBD_ALLOC(prefix, MAX_OBD_NAME + 5); @@ -3104,8 +3734,8 @@ static int mdt_seq_init_cli(const struct lu_env *env, snprintf(prefix, MAX_OBD_NAME + 5, "ctl-%s", mdc->obd_name); - rc = seq_client_init(ls->ls_client_seq, - ls->ls_control_exp, + rc = seq_client_init(ms->ms_client_seq, + ms->ms_control_exp, LUSTRE_SEQ_METADATA, prefix, NULL); OBD_FREE(prefix, MAX_OBD_NAME + 5); @@ -3115,8 +3745,8 @@ static int mdt_seq_init_cli(const struct lu_env *env, if (rc) RETURN(rc); - LASSERT(ls->ls_server_seq != NULL); - rc = seq_server_set_cli(ls->ls_server_seq, ls->ls_client_seq, + LASSERT(ms->ms_server_seq != NULL); + rc = seq_server_set_cli(ms->ms_server_seq, ms->ms_client_seq, env); } @@ -3125,19 +3755,21 @@ static int mdt_seq_init_cli(const struct lu_env *env, static void mdt_seq_fini_cli(struct mdt_device *m) { - struct lu_site *ls; + struct md_site *ms; ENTRY; - ls = m->mdt_md_dev.md_lu_dev.ld_site; + ms = mdt_md_site(m); - if (ls && ls->ls_server_seq) - seq_server_set_cli(ls->ls_server_seq, + if (ms != NULL) { + if (ms->ms_server_seq) + seq_server_set_cli(ms->ms_server_seq, NULL, NULL); - if (ls && ls->ls_control_exp) { - class_export_put(ls->ls_control_exp); - ls->ls_control_exp = NULL; + if (ms->ms_control_exp) { + class_export_put(ms->ms_control_exp); + ms->ms_control_exp = NULL; + } } EXIT; } @@ -3148,13 +3780,13 @@ static void mdt_seq_fini_cli(struct mdt_device *m) static int mdt_fld_fini(const struct lu_env *env, struct mdt_device *m) { - struct lu_site *ls = m->mdt_md_dev.md_lu_dev.ld_site; + struct md_site *ms = mdt_md_site(m); ENTRY; - if (ls && ls->ls_server_fld) { - fld_server_fini(ls->ls_server_fld, env); - OBD_FREE_PTR(ls->ls_server_fld); - ls->ls_server_fld = NULL; + if (ms && ms->ms_server_fld) { + fld_server_fini(ms->ms_server_fld, env); + OBD_FREE_PTR(ms->ms_server_fld); + ms->ms_server_fld = NULL; } RETURN(0); @@ -3164,21 +3796,22 @@ static int mdt_fld_init(const struct lu_env *env, const char *uuid, struct mdt_device *m) { - struct lu_site *ls; + struct md_site *ms; int rc; ENTRY; - ls = m->mdt_md_dev.md_lu_dev.ld_site; + ms = mdt_md_site(m); - OBD_ALLOC_PTR(ls->ls_server_fld); - if (ls->ls_server_fld == NULL) + OBD_ALLOC_PTR(ms->ms_server_fld); + if (ms->ms_server_fld == NULL) RETURN(rc = -ENOMEM); - rc = fld_server_init(ls->ls_server_fld, - m->mdt_bottom, uuid, env); + rc = fld_server_init(ms->ms_server_fld, + m->mdt_bottom, uuid, + env, ms->ms_node_id); if (rc) { - OBD_FREE_PTR(ls->ls_server_fld); - ls->ls_server_fld = NULL; + OBD_FREE_PTR(ms->ms_server_fld); + ms->ms_server_fld = NULL; RETURN(rc); } @@ -3221,7 +3854,7 @@ static void mdt_stop_ptlrpc_service(struct mdt_device *m) ptlrpc_unregister_service(m->mdt_fld_service); m->mdt_fld_service = NULL; } - ENTRY; + EXIT; } static int mdt_start_ptlrpc_service(struct mdt_device *m) @@ -3234,21 +3867,20 @@ static int mdt_start_ptlrpc_service(struct mdt_device *m) procfs_entry = m->mdt_md_dev.md_lu_dev.ld_obd->obd_proc_entry; conf = (typeof(conf)) { - .psc_nbufs = MDS_NBUFS, - .psc_bufsize = MDS_BUFSIZE, - .psc_max_req_size = MDS_MAXREQSIZE, - .psc_max_reply_size = MDS_MAXREPSIZE, - .psc_req_portal = MDS_REQUEST_PORTAL, - .psc_rep_portal = MDC_REPLY_PORTAL, - .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT, + .psc_nbufs = MDS_NBUFS, + .psc_bufsize = MDS_BUFSIZE, + .psc_max_req_size = MDS_MAXREQSIZE, + .psc_max_reply_size = MDS_MAXREPSIZE, + .psc_req_portal = MDS_REQUEST_PORTAL, + .psc_rep_portal = MDC_REPLY_PORTAL, + .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR, /* * We'd like to have a mechanism to set this on a per-device * basis, but alas... */ - .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS), - MDT_MAX_THREADS), - .psc_max_threads = MDT_MAX_THREADS, - .psc_ctx_tags = LCT_MD_THREAD + .psc_min_threads = mdt_min_threads, + .psc_max_threads = mdt_max_threads, + .psc_ctx_tags = LCT_MD_THREAD }; m->mdt_ldlm_client = &m->mdt_md_dev.md_lu_dev.ld_obd->obd_ldlm_client; @@ -3257,7 +3889,8 @@ static int mdt_start_ptlrpc_service(struct mdt_device *m) m->mdt_regular_service = ptlrpc_init_svc_conf(&conf, mdt_regular_handle, LUSTRE_MDT_NAME, - procfs_entry, NULL, LUSTRE_MDT_NAME); + procfs_entry, target_print_req, + LUSTRE_MDT_NAME); if (m->mdt_regular_service == NULL) RETURN(-ENOMEM); @@ -3270,22 +3903,21 @@ static int mdt_start_ptlrpc_service(struct mdt_device *m) * ideally. */ conf = (typeof(conf)) { - .psc_nbufs = MDS_NBUFS, - .psc_bufsize = MDS_BUFSIZE, - .psc_max_req_size = MDS_MAXREQSIZE, - .psc_max_reply_size = MDS_MAXREPSIZE, - .psc_req_portal = MDS_READPAGE_PORTAL, - .psc_rep_portal = MDC_REPLY_PORTAL, - .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT, - .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS), - MDT_MAX_THREADS), - .psc_max_threads = MDT_MAX_THREADS, - .psc_ctx_tags = LCT_MD_THREAD + .psc_nbufs = MDS_NBUFS, + .psc_bufsize = MDS_BUFSIZE, + .psc_max_req_size = MDS_MAXREQSIZE, + .psc_max_reply_size = MDS_MAXREPSIZE, + .psc_req_portal = MDS_READPAGE_PORTAL, + .psc_rep_portal = MDC_REPLY_PORTAL, + .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR, + .psc_min_threads = mdt_min_threads, + .psc_max_threads = mdt_max_threads, + .psc_ctx_tags = LCT_MD_THREAD }; m->mdt_readpage_service = ptlrpc_init_svc_conf(&conf, mdt_readpage_handle, LUSTRE_MDT_NAME "_readpage", - procfs_entry, NULL, "mdt_rdpg"); + procfs_entry, target_print_req,"mdt_rdpg"); if (m->mdt_readpage_service == NULL) { CERROR("failed to start readpage service\n"); @@ -3298,23 +3930,22 @@ static int mdt_start_ptlrpc_service(struct mdt_device *m) * setattr service configuration. */ conf = (typeof(conf)) { - .psc_nbufs = MDS_NBUFS, - .psc_bufsize = MDS_BUFSIZE, - .psc_max_req_size = MDS_MAXREQSIZE, - .psc_max_reply_size = MDS_MAXREPSIZE, - .psc_req_portal = MDS_SETATTR_PORTAL, - .psc_rep_portal = MDC_REPLY_PORTAL, - .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT, - .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS), - MDT_MAX_THREADS), - .psc_max_threads = MDT_MAX_THREADS, - .psc_ctx_tags = LCT_MD_THREAD + .psc_nbufs = MDS_NBUFS, + .psc_bufsize = MDS_BUFSIZE, + .psc_max_req_size = MDS_MAXREQSIZE, + .psc_max_reply_size = MDS_MAXREPSIZE, + .psc_req_portal = MDS_SETATTR_PORTAL, + .psc_rep_portal = MDC_REPLY_PORTAL, + .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR, + .psc_min_threads = mdt_min_threads, + .psc_max_threads = mdt_max_threads, + .psc_ctx_tags = LCT_MD_THREAD }; m->mdt_setattr_service = ptlrpc_init_svc_conf(&conf, mdt_regular_handle, LUSTRE_MDT_NAME "_setattr", - procfs_entry, NULL, "mdt_attr"); + procfs_entry, target_print_req,"mdt_attr"); if (!m->mdt_setattr_service) { CERROR("failed to start setattr service\n"); @@ -3329,22 +3960,22 @@ static int mdt_start_ptlrpc_service(struct mdt_device *m) * sequence controller service configuration */ conf = (typeof(conf)) { - .psc_nbufs = MDS_NBUFS, - .psc_bufsize = MDS_BUFSIZE, - .psc_max_req_size = SEQ_MAXREQSIZE, - .psc_max_reply_size = SEQ_MAXREPSIZE, - .psc_req_portal = SEQ_CONTROLLER_PORTAL, - .psc_rep_portal = MDC_REPLY_PORTAL, - .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT, - .psc_min_threads = SEQ_NUM_THREADS, - .psc_max_threads = SEQ_NUM_THREADS, - .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD + .psc_nbufs = MDS_NBUFS, + .psc_bufsize = MDS_BUFSIZE, + .psc_max_req_size = SEQ_MAXREQSIZE, + .psc_max_reply_size = SEQ_MAXREPSIZE, + .psc_req_portal = SEQ_CONTROLLER_PORTAL, + .psc_rep_portal = MDC_REPLY_PORTAL, + .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR, + .psc_min_threads = mdt_min_threads, + .psc_max_threads = mdt_max_threads, + .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD }; m->mdt_mdsc_service = ptlrpc_init_svc_conf(&conf, mdt_mdsc_handle, LUSTRE_MDT_NAME"_mdsc", - procfs_entry, NULL, "mdt_mdsc"); + procfs_entry, target_print_req,"mdt_mdsc"); if (!m->mdt_mdsc_service) { CERROR("failed to start seq controller service\n"); GOTO(err_mdt_svc, rc = -ENOMEM); @@ -3358,22 +3989,22 @@ static int mdt_start_ptlrpc_service(struct mdt_device *m) * metadata sequence server service configuration */ conf = (typeof(conf)) { - .psc_nbufs = MDS_NBUFS, - .psc_bufsize = MDS_BUFSIZE, - .psc_max_req_size = SEQ_MAXREQSIZE, - .psc_max_reply_size = SEQ_MAXREPSIZE, - .psc_req_portal = SEQ_METADATA_PORTAL, - .psc_rep_portal = MDC_REPLY_PORTAL, - .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT, - .psc_min_threads = SEQ_NUM_THREADS, - .psc_max_threads = SEQ_NUM_THREADS, - .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD + .psc_nbufs = MDS_NBUFS, + .psc_bufsize = MDS_BUFSIZE, + .psc_max_req_size = SEQ_MAXREQSIZE, + .psc_max_reply_size = SEQ_MAXREPSIZE, + .psc_req_portal = SEQ_METADATA_PORTAL, + .psc_rep_portal = MDC_REPLY_PORTAL, + .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR, + .psc_min_threads = mdt_min_threads, + .psc_max_threads = mdt_max_threads, + .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD }; m->mdt_mdss_service = ptlrpc_init_svc_conf(&conf, mdt_mdss_handle, LUSTRE_MDT_NAME"_mdss", - procfs_entry, NULL, "mdt_mdss"); + procfs_entry, target_print_req,"mdt_mdss"); if (!m->mdt_mdss_service) { CERROR("failed to start metadata seq server service\n"); GOTO(err_mdt_svc, rc = -ENOMEM); @@ -3390,22 +4021,22 @@ static int mdt_start_ptlrpc_service(struct mdt_device *m) * controller which manages space. */ conf = (typeof(conf)) { - .psc_nbufs = MDS_NBUFS, - .psc_bufsize = MDS_BUFSIZE, - .psc_max_req_size = SEQ_MAXREQSIZE, - .psc_max_reply_size = SEQ_MAXREPSIZE, - .psc_req_portal = SEQ_DATA_PORTAL, - .psc_rep_portal = OSC_REPLY_PORTAL, - .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT, - .psc_min_threads = SEQ_NUM_THREADS, - .psc_max_threads = SEQ_NUM_THREADS, - .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD + .psc_nbufs = MDS_NBUFS, + .psc_bufsize = MDS_BUFSIZE, + .psc_max_req_size = SEQ_MAXREQSIZE, + .psc_max_reply_size = SEQ_MAXREPSIZE, + .psc_req_portal = SEQ_DATA_PORTAL, + .psc_rep_portal = OSC_REPLY_PORTAL, + .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR, + .psc_min_threads = mdt_min_threads, + .psc_max_threads = mdt_max_threads, + .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD }; m->mdt_dtss_service = ptlrpc_init_svc_conf(&conf, mdt_dtss_handle, LUSTRE_MDT_NAME"_dtss", - procfs_entry, NULL, "mdt_dtss"); + procfs_entry, target_print_req,"mdt_dtss"); if (!m->mdt_dtss_service) { CERROR("failed to start data seq server service\n"); GOTO(err_mdt_svc, rc = -ENOMEM); @@ -3417,22 +4048,22 @@ static int mdt_start_ptlrpc_service(struct mdt_device *m) /* FLD service start */ conf = (typeof(conf)) { - .psc_nbufs = MDS_NBUFS, - .psc_bufsize = MDS_BUFSIZE, - .psc_max_req_size = FLD_MAXREQSIZE, - .psc_max_reply_size = FLD_MAXREPSIZE, - .psc_req_portal = FLD_REQUEST_PORTAL, - .psc_rep_portal = MDC_REPLY_PORTAL, - .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT, - .psc_min_threads = FLD_NUM_THREADS, - .psc_max_threads = FLD_NUM_THREADS, - .psc_ctx_tags = LCT_DT_THREAD|LCT_MD_THREAD + .psc_nbufs = MDS_NBUFS, + .psc_bufsize = MDS_BUFSIZE, + .psc_max_req_size = FLD_MAXREQSIZE, + .psc_max_reply_size = FLD_MAXREPSIZE, + .psc_req_portal = FLD_REQUEST_PORTAL, + .psc_rep_portal = MDC_REPLY_PORTAL, + .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR, + .psc_min_threads = mdt_min_threads, + .psc_max_threads = mdt_max_threads, + .psc_ctx_tags = LCT_DT_THREAD|LCT_MD_THREAD }; m->mdt_fld_service = ptlrpc_init_svc_conf(&conf, mdt_fld_handle, LUSTRE_MDT_NAME"_fld", - procfs_entry, NULL, "mdt_fld"); + procfs_entry, target_print_req, "mdt_fld"); if (!m->mdt_fld_service) { CERROR("failed to start fld service\n"); GOTO(err_mdt_svc, rc = -ENOMEM); @@ -3447,24 +4078,24 @@ static int mdt_start_ptlrpc_service(struct mdt_device *m) * mds-mds requests be not blocked during recovery. */ conf = (typeof(conf)) { - .psc_nbufs = MDS_NBUFS, - .psc_bufsize = MDS_BUFSIZE, - .psc_max_req_size = MDS_MAXREQSIZE, - .psc_max_reply_size = MDS_MAXREPSIZE, - .psc_req_portal = MDS_MDS_PORTAL, - .psc_rep_portal = MDC_REPLY_PORTAL, - .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT, - .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS), - MDT_MAX_THREADS), - .psc_max_threads = MDT_MAX_THREADS, - .psc_ctx_tags = LCT_MD_THREAD + .psc_nbufs = MDS_NBUFS, + .psc_bufsize = MDS_BUFSIZE, + .psc_max_req_size = MDS_MAXREQSIZE, + .psc_max_reply_size = MDS_MAXREPSIZE, + .psc_req_portal = MDS_MDS_PORTAL, + .psc_rep_portal = MDC_REPLY_PORTAL, + .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR, + .psc_min_threads = mdt_min_threads, + .psc_max_threads = mdt_max_threads, + .psc_ctx_tags = LCT_MD_THREAD }; - m->mdt_xmds_service = ptlrpc_init_svc_conf(&conf, mdt_xmds_handle, - LUSTRE_MDT_NAME "_mds", - procfs_entry, NULL, "mdt_xmds"); + m->mdt_xmds_service = + ptlrpc_init_svc_conf(&conf, mdt_xmds_handle, + LUSTRE_MDT_NAME "_mds", + procfs_entry, target_print_req,"mdt_xmds"); if (m->mdt_xmds_service == NULL) { - CERROR("failed to start readpage service\n"); + CERROR("failed to start xmds service\n"); GOTO(err_mdt_svc, rc = -ENOMEM); } @@ -3483,8 +4114,7 @@ err_mdt_svc: static void mdt_stack_fini(const struct lu_env *env, struct mdt_device *m, struct lu_device *top) { - struct lu_device *d = top, *n; - struct obd_device *obd = m->mdt_md_dev.md_lu_dev.ld_obd; + struct obd_device *obd = mdt2obd_dev(m); struct lustre_cfg_bufs *bufs; struct lustre_cfg *lcfg; struct mdt_thread_info *info; @@ -3512,28 +4142,12 @@ static void mdt_stack_fini(const struct lu_env *env, top->ld_ops->ldo_process_config(env, top, lcfg); lustre_cfg_free(lcfg); - lu_site_purge(env, top->ld_site, ~0); - while (d != NULL) { - struct obd_type *type; - struct lu_device_type *ldt = d->ld_type; - - /* each fini() returns next device in stack of layers - * * so we can avoid the recursion */ - n = ldt->ldt_ops->ldto_device_fini(env, d); - lu_device_put(d); - ldt->ldt_ops->ldto_device_free(env, d); - type = ldt->ldt_obd_type; - type->typ_refcnt--; - class_put_type(type); - - /* switch to the next device in the layer */ - d = n; - } + lu_stack_fini(env, top); m->mdt_child = NULL; m->mdt_bottom = NULL; } -static struct lu_device *mdt_layer_setup(const struct lu_env *env, +static struct lu_device *mdt_layer_setup(struct lu_env *env, const char *typename, struct lu_device *child, struct lustre_cfg *cfg) @@ -3552,20 +4166,12 @@ static struct lu_device *mdt_layer_setup(const struct lu_env *env, GOTO(out, rc = -ENODEV); } - rc = lu_context_refill(&env->le_ctx); + rc = lu_env_refill((struct lu_env *)env); if (rc != 0) { - CERROR("Failure to refill context: '%d'\n", rc); + CERROR("Failure to refill session: '%d'\n", rc); GOTO(out_type, rc); } - if (env->le_ses != NULL) { - rc = lu_context_refill(env->le_ses); - if (rc != 0) { - CERROR("Failure to refill session: '%d'\n", rc); - GOTO(out_type, rc); - } - } - ldt = type->typ_lu; if (ldt == NULL) { CERROR("type: '%s'\n", typename); @@ -3589,6 +4195,7 @@ static struct lu_device *mdt_layer_setup(const struct lu_env *env, GOTO(out_alloc, rc); } lu_device_get(d); + lu_ref_add(&d->ld_reference, "lu-stack", &lu_site_init); RETURN(d); @@ -3601,12 +4208,15 @@ out: return ERR_PTR(rc); } -static int mdt_stack_init(const struct lu_env *env, - struct mdt_device *m, struct lustre_cfg *cfg) +static int mdt_stack_init(struct lu_env *env, + struct mdt_device *m, + struct lustre_cfg *cfg, + struct lustre_mount_info *lmi) { struct lu_device *d = &m->mdt_md_dev.md_lu_dev; struct lu_device *tmp; struct md_device *md; + struct lu_device *child_lu_dev; int rc; ENTRY; @@ -3641,7 +4251,15 @@ static int mdt_stack_init(const struct lu_env *env, /* process setup config */ tmp = &m->mdt_md_dev.md_lu_dev; rc = tmp->ld_ops->ldo_process_config(env, tmp, cfg); - GOTO(out, rc); + if (rc) + GOTO(out, rc); + + /* initialize local objects */ + child_lu_dev = &m->mdt_child->md_lu_dev; + + rc = child_lu_dev->ld_ops->ldo_prepare(env, + &m->mdt_md_dev.md_lu_dev, + child_lu_dev); out: /* fini from last known good lu_device */ if (rc) @@ -3650,75 +4268,164 @@ out: return rc; } +/** + * setup CONFIG_ORIG context, used to access local config log. + * this may need to be rewrite as part of llog rewrite for lu-api. + */ +static int mdt_obd_llog_setup(struct obd_device *obd, + struct lustre_sb_info *lsi) +{ + int rc; + + LASSERT(obd->obd_fsops == NULL); + + obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd)); + if (IS_ERR(obd->obd_fsops)) + return PTR_ERR(obd->obd_fsops); + + rc = fsfilt_setup(obd, lsi->lsi_srv_mnt->mnt_sb); + if (rc) { + fsfilt_put_ops(obd->obd_fsops); + return rc; + } + + OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt); + obd->obd_lvfs_ctxt.pwdmnt = lsi->lsi_srv_mnt; + obd->obd_lvfs_ctxt.pwd = lsi->lsi_srv_mnt->mnt_root; + obd->obd_lvfs_ctxt.fs = get_ds(); + + rc = llog_setup(obd, &obd->obd_olg, LLOG_CONFIG_ORIG_CTXT, obd, + 0, NULL, &llog_lvfs_ops); + if (rc) { + CERROR("llog_setup() failed: %d\n", rc); + fsfilt_put_ops(obd->obd_fsops); + } + + return rc; +} + +static void mdt_obd_llog_cleanup(struct obd_device *obd) +{ + struct llog_ctxt *ctxt; + + ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT); + if (ctxt) + llog_cleanup(ctxt); + + if (obd->obd_fsops) { + fsfilt_put_ops(obd->obd_fsops); + obd->obd_fsops = NULL; + } +} + static void mdt_fini(const struct lu_env *env, struct mdt_device *m) { - struct md_device *next = m->mdt_child; - struct lu_device *d = &m->mdt_md_dev.md_lu_dev; - struct lu_site *ls = d->ld_site; - struct obd_device *obd = m->mdt_md_dev.md_lu_dev.ld_obd; + struct md_device *next = m->mdt_child; + struct lu_device *d = &m->mdt_md_dev.md_lu_dev; + struct lu_site *ls = d->ld_site; + struct obd_device *obd = mdt2obd_dev(m); ENTRY; - ping_evictor_stop(); - target_recovery_fini(obd); - mdt_stop_ptlrpc_service(m); - - mdt_fs_cleanup(env, m); - upcall_cache_cleanup(m->mdt_rmtacl_cache); - m->mdt_rmtacl_cache = NULL; + ping_evictor_stop(); + mdt_stop_ptlrpc_service(m); + mdt_llog_ctxt_unclone(env, m, LLOG_CHANGELOG_ORIG_CTXT); + mdt_obd_llog_cleanup(obd); + obd_exports_barrier(obd); + obd_zombie_barrier(); +#ifdef HAVE_QUOTA_SUPPORT + next->md_ops->mdo_quota.mqo_cleanup(env, next); +#endif + lut_fini(env, &m->mdt_lut); + mdt_fs_cleanup(env, m); upcall_cache_cleanup(m->mdt_identity_cache); m->mdt_identity_cache = NULL; if (m->mdt_namespace != NULL) { - ldlm_namespace_free(m->mdt_namespace, d->ld_obd->obd_force); + ldlm_namespace_free(m->mdt_namespace, NULL, + d->ld_obd->obd_force); d->ld_obd->obd_namespace = m->mdt_namespace = NULL; } + cfs_free_nidlist(&m->mdt_nosquash_nids); + if (m->mdt_nosquash_str) { + OBD_FREE(m->mdt_nosquash_str, m->mdt_nosquash_strlen); + m->mdt_nosquash_str = NULL; + m->mdt_nosquash_strlen = 0; + } + mdt_seq_fini(env, m); mdt_seq_fini_cli(m); mdt_fld_fini(env, m); - mdt_procfs_fini(m); - ptlrpc_lprocfs_unregister_obd(d->ld_obd); - lprocfs_obd_cleanup(d->ld_obd); - - if (m->mdt_rootsquash_info) { - OBD_FREE_PTR(m->mdt_rootsquash_info); - m->mdt_rootsquash_info = NULL; - } + sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset); next->md_ops->mdo_init_capa_ctxt(env, next, 0, 0, 0, NULL); - del_timer(&m->mdt_ck_timer); + cfs_timer_disarm(&m->mdt_ck_timer); mdt_ck_thread_stop(m); - /* finish the stack */ + /* + * Finish the stack + */ mdt_stack_fini(env, m, md2lu_dev(m->mdt_child)); - if (ls) { - if (!list_empty(&ls->ls_lru) || ls->ls_total != 0) { - /* - * Uh-oh, objects still exist. - */ - static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR); + lprocfs_free_per_client_stats(obd); + lprocfs_free_obd_stats(obd); + mdt_procfs_fini(m); - lu_site_print(env, ls, &cookie, lu_cdebug_printer); - } + if (ls) { + struct md_site *mite; lu_site_fini(ls); - OBD_FREE_PTR(ls); + mite = lu_site2md(ls); + OBD_FREE_PTR(mite); d->ld_site = NULL; } - LASSERT(atomic_read(&d->ld_ref) == 0); - md_device_fini(&m->mdt_md_dev); + LASSERT(cfs_atomic_read(&d->ld_ref) == 0); EXIT; } +static int mdt_adapt_sptlrpc_conf(struct obd_device *obd, int initial) +{ + struct mdt_device *m = mdt_dev(obd->obd_lu_dev); + struct sptlrpc_rule_set tmp_rset; + int rc; + + sptlrpc_rule_set_init(&tmp_rset); + rc = sptlrpc_conf_target_get_rules(obd, &tmp_rset, initial); + if (rc) { + CERROR("mdt %s: failed get sptlrpc rules: %d\n", + obd->obd_name, rc); + return rc; + } + + sptlrpc_target_update_exp_flavor(obd, &tmp_rset); + + cfs_write_lock(&m->mdt_sptlrpc_lock); + sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset); + m->mdt_sptlrpc_rset = tmp_rset; + cfs_write_unlock(&m->mdt_sptlrpc_lock); + + return 0; +} + static void fsoptions_to_mdt_flags(struct mdt_device *m, char *options) { char *p = options; + m->mdt_opts.mo_mds_capa = 1; + m->mdt_opts.mo_oss_capa = 1; +#ifdef CONFIG_FS_POSIX_ACL + /* ACLs should be enabled by default (b=13829) */ + m->mdt_opts.mo_acl = 1; + LCONSOLE_INFO("Enabling ACL\n"); +#else + m->mdt_opts.mo_acl = 0; + LCONSOLE_INFO("Disabling ACL\n"); +#endif + if (!options) return; @@ -3737,22 +4444,15 @@ static void fsoptions_to_mdt_flags(struct mdt_device *m, char *options) (memcmp(options, "nouser_xattr", len) == 0)) { m->mdt_opts.mo_user_xattr = 0; LCONSOLE_INFO("Disabling user_xattr\n"); - } else if ((len == sizeof("acl") - 1) && - (memcmp(options, "acl", len) == 0)) { -#ifdef CONFIG_FS_POSIX_ACL - m->mdt_opts.mo_acl = 1; - LCONSOLE_INFO("Enabling ACL\n"); -#else - m->mdt_opts.mo_acl = 0; - CWARN("ignoring unsupported acl mount option\n"); - LCONSOLE_INFO("Disabling ACL\n"); -#endif } else if ((len == sizeof("noacl") - 1) && (memcmp(options, "noacl", len) == 0)) { m->mdt_opts.mo_acl = 0; LCONSOLE_INFO("Disabling ACL\n"); } + if (!*p) + break; + options = ++p; } } @@ -3762,30 +4462,49 @@ int mdt_postrecov(const struct lu_env *, struct mdt_device *); static int mdt_init0(const struct lu_env *env, struct mdt_device *m, struct lu_device_type *ldt, struct lustre_cfg *cfg) { - struct lprocfs_static_vars lvars; struct mdt_thread_info *info; struct obd_device *obd; const char *dev = lustre_cfg_string(cfg, 0); const char *num = lustre_cfg_string(cfg, 2); - struct lustre_mount_info *lmi; + struct lustre_mount_info *lmi = NULL; struct lustre_sb_info *lsi; + struct lustre_disk_data *ldd; struct lu_site *s; + struct md_site *mite; + const char *identity_upcall = "NONE"; +#ifdef HAVE_QUOTA_SUPPORT + struct md_device *next; +#endif int rc; + int node_id; ENTRY; + md_device_init(&m->mdt_md_dev, ldt); + /* + * Environment (env) might be missing mdt_thread_key values at that + * point, if device is allocated when mdt_thread_key is in QUIESCENT + * mode. + * + * Usually device allocation path doesn't use module key values, but + * mdt has to do a lot of work here, so allocate key value. + */ + rc = lu_env_refill((struct lu_env *)env); + if (rc != 0) + RETURN(rc); + info = lu_context_key_get(&env->le_ctx, &mdt_thread_key); LASSERT(info != NULL); obd = class_name2obd(dev); LASSERT(obd != NULL); - spin_lock_init(&m->mdt_transno_lock); - m->mdt_max_mdsize = MAX_MD_SIZE; m->mdt_max_cookiesize = sizeof(struct llog_cookie); + m->mdt_som_conf = 0; m->mdt_opts.mo_user_xattr = 0; m->mdt_opts.mo_acl = 0; + m->mdt_opts.mo_cos = MDT_COS_DEFAULT; lmi = server_get_mount_2(dev); if (lmi == NULL) { CERROR("Cannot get mount info for %s!\n", dev); @@ -3793,22 +4512,45 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, } else { lsi = s2lsi(lmi->lmi_sb); fsoptions_to_mdt_flags(m, lsi->lsi_lmd->lmd_opts); - server_put_mount_2(dev, lmi->lmi_mnt); + /* CMD is supported only in IAM mode */ + ldd = lsi->lsi_ldd; + LASSERT(num); + node_id = simple_strtol(num, NULL, 10); + if (!(ldd->ldd_flags & LDD_F_IAM_DIR) && node_id) { + CERROR("CMD Operation not allowed in IOP mode\n"); + GOTO(err_lmi, rc = -EINVAL); + } + /* Read recovery timeouts */ + if (lsi->lsi_lmd && lsi->lsi_lmd->lmd_recovery_time_soft) + obd->obd_recovery_timeout = + lsi->lsi_lmd->lmd_recovery_time_soft; + + if (lsi->lsi_lmd && lsi->lsi_lmd->lmd_recovery_time_hard) + obd->obd_recovery_time_hard = + lsi->lsi_lmd->lmd_recovery_time_hard; } - spin_lock_init(&m->mdt_ioepoch_lock); + cfs_rwlock_init(&m->mdt_sptlrpc_lock); + sptlrpc_rule_set_init(&m->mdt_sptlrpc_rset); + + cfs_spin_lock_init(&m->mdt_ioepoch_lock); m->mdt_opts.mo_compat_resname = 0; m->mdt_capa_timeout = CAPA_TIMEOUT; m->mdt_capa_alg = CAPA_HMAC_ALG_SHA1; m->mdt_ck_timeout = CAPA_KEY_TIMEOUT; + m->mdt_squash_uid = 0; + m->mdt_squash_gid = 0; + CFS_INIT_LIST_HEAD(&m->mdt_nosquash_nids); + m->mdt_nosquash_str = NULL; + m->mdt_nosquash_strlen = 0; + cfs_init_rwsem(&m->mdt_squash_sem); - spin_lock_init(&m->mdt_client_bitmap_lock); + OBD_ALLOC_PTR(mite); + if (mite == NULL) + GOTO(err_lmi, rc = -ENOMEM); - OBD_ALLOC_PTR(s); - if (s == NULL) - RETURN(-ENOMEM); + s = &mite->ms_lu; - md_device_init(&m->mdt_md_dev, ldt); m->mdt_md_dev.md_lu_dev.ld_ops = &mdt_lu_ops; m->mdt_md_dev.md_lu_dev.ld_obd = obd; /* set this lu_device to obd, because error handling need it */ @@ -3820,14 +4562,6 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, GOTO(err_free_site, rc); } - lprocfs_init_vars(mdt, &lvars); - rc = lprocfs_obd_setup(obd, lvars.obd_vars); - if (rc) { - CERROR("Can't init lprocfs, rc %d\n", rc); - GOTO(err_fini_site, rc); - } - ptlrpc_lprocfs_register_obd(obd); - rc = mdt_procfs_init(m, dev); if (rc) { CERROR("Can't init MDT lprocfs, rc %d\n", rc); @@ -3835,12 +4569,11 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, } /* set server index */ - LASSERT(num); - s->ls_node_id = simple_strtol(num, NULL, 10); + lu_site2md(s)->ms_node_id = node_id; /* failover is the default * FIXME: we do not failout mds0/mgs, which may cause some problems. - * assumed whose ls_node_id == 0 XXX + * assumed whose ms_node_id == 0 XXX * */ obd->obd_replayable = 1; /* No connection accepted until configurations will finish */ @@ -3855,24 +4588,29 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, } /* init the stack */ - rc = mdt_stack_init(env, m, cfg); + rc = mdt_stack_init((struct lu_env *)env, m, cfg, lmi); if (rc) { CERROR("Can't init device stack, rc %d\n", rc); GOTO(err_fini_proc, rc); } - rc = mdt_fld_init(env, obd->obd_name, m); + rc = lut_init(env, &m->mdt_lut, obd, m->mdt_bottom); if (rc) GOTO(err_fini_stack, rc); + rc = mdt_fld_init(env, obd->obd_name, m); + if (rc) + GOTO(err_lut, rc); + rc = mdt_seq_init(env, obd->obd_name, m); if (rc) GOTO(err_fini_fld, rc); snprintf(info->mti_u.ns_name, sizeof info->mti_u.ns_name, LUSTRE_MDT_NAME"-%p", m); - m->mdt_namespace = ldlm_namespace_new(info->mti_u.ns_name, - LDLM_NAMESPACE_SERVER); + m->mdt_namespace = ldlm_namespace_new(obd, info->mti_u.ns_name, + LDLM_NAMESPACE_SERVER, + LDLM_NAMESPACE_GREEDY); if (m->mdt_namespace == NULL) GOTO(err_fini_seq, rc = -ENOMEM); @@ -3880,8 +4618,12 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, /* set obd_namespace for compatibility with old code */ obd->obd_namespace = m->mdt_namespace; - m->mdt_identity_cache = upcall_cache_init(obd->obd_name, - "NONE", + /* XXX: to support suppgid for ACL, we enable identity_upcall + * by default, otherwise, maybe got unexpected -EACCESS. */ + if (m->mdt_opts.mo_acl) + identity_upcall = MDT_IDENTITY_UPCALL_PATH; + + m->mdt_identity_cache = upcall_cache_init(obd->obd_name, identity_upcall, &mdt_identity_upcall_cache_ops); if (IS_ERR(m->mdt_identity_cache)) { rc = PTR_ERR(m->mdt_identity_cache); @@ -3889,31 +4631,41 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, GOTO(err_free_ns, rc); } - m->mdt_rmtacl_cache = upcall_cache_init(obd->obd_name, - MDT_RMTACL_UPCALL_PATH, - &mdt_rmtacl_upcall_cache_ops); - if (IS_ERR(m->mdt_rmtacl_cache)) { - rc = PTR_ERR(m->mdt_rmtacl_cache); - m->mdt_rmtacl_cache = NULL; - GOTO(err_free_ns, rc); - } + cfs_timer_init(&m->mdt_ck_timer, mdt_ck_timer_callback, m); - m->mdt_ck_timer.function = mdt_ck_timer_callback; - m->mdt_ck_timer.data = (unsigned long)m; - init_timer(&m->mdt_ck_timer); rc = mdt_ck_thread_start(m); if (rc) GOTO(err_free_ns, rc); - rc = mdt_fs_setup(env, m, obd); + rc = mdt_fs_setup(env, m, obd, lsi); if (rc) GOTO(err_capa, rc); - target_recovery_init(obd, mdt_recovery_handle); + rc = mdt_obd_llog_setup(obd, lsi); + if (rc) + GOTO(err_fs_cleanup, rc); + + rc = mdt_llog_ctxt_clone(env, m, LLOG_CHANGELOG_ORIG_CTXT); + if (rc) + GOTO(err_llog_cleanup, rc); + + mdt_adapt_sptlrpc_conf(obd, 1); + +#ifdef HAVE_QUOTA_SUPPORT + next = m->mdt_child; + rc = next->md_ops->mdo_quota.mqo_setup(env, next, lmi->lmi_mnt); + if (rc) + GOTO(err_llog_cleanup, rc); +#endif + + server_put_mount_2(dev, lmi->lmi_mnt); + lmi = NULL; + + target_recovery_init(&m->mdt_lut, mdt_recovery_handle); rc = mdt_start_ptlrpc_service(m); if (rc) - GOTO(err_fs_cleanup, rc); + GOTO(err_recovery, rc); ping_evictor_start(); @@ -3926,42 +4678,51 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, mdt_init_capa_ctxt(env, m); + /* Reduce the initial timeout on an MDS because it doesn't need such + * a long timeout as an OST does. Adaptive timeouts will adjust this + * value appropriately. */ if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT) - ldlm_timeout = 6; + ldlm_timeout = MDS_LDLM_TIMEOUT_DEFAULT; RETURN(0); err_stop_service: ping_evictor_stop(); mdt_stop_ptlrpc_service(m); -err_fs_cleanup: +err_recovery: target_recovery_fini(obd); +#ifdef HAVE_QUOTA_SUPPORT + next->md_ops->mdo_quota.mqo_cleanup(env, next); +#endif +err_llog_cleanup: + mdt_llog_ctxt_unclone(env, m, LLOG_CHANGELOG_ORIG_CTXT); + mdt_obd_llog_cleanup(obd); +err_fs_cleanup: mdt_fs_cleanup(env, m); err_capa: - del_timer(&m->mdt_ck_timer); + cfs_timer_disarm(&m->mdt_ck_timer); mdt_ck_thread_stop(m); err_free_ns: - upcall_cache_cleanup(m->mdt_rmtacl_cache); - m->mdt_rmtacl_cache = NULL; upcall_cache_cleanup(m->mdt_identity_cache); m->mdt_identity_cache = NULL; - ldlm_namespace_free(m->mdt_namespace, 0); + ldlm_namespace_free(m->mdt_namespace, NULL, 0); obd->obd_namespace = m->mdt_namespace = NULL; err_fini_seq: mdt_seq_fini(env, m); err_fini_fld: mdt_fld_fini(env, m); +err_lut: + lut_fini(env, &m->mdt_lut); err_fini_stack: mdt_stack_fini(env, m, md2lu_dev(m->mdt_child)); err_fini_proc: mdt_procfs_fini(m); - lprocfs_obd_cleanup(obd); -err_fini_site: lu_site_fini(s); err_free_site: - OBD_FREE_PTR(s); - - md_device_fini(&m->mdt_md_dev); + OBD_FREE_PTR(mite); +err_lmi: + if (lmi) + server_put_mount_2(dev, lmi->lmi_mnt); return (rc); } @@ -3980,10 +4741,24 @@ static int mdt_process_config(const struct lu_env *env, struct lprocfs_static_vars lvars; struct obd_device *obd = d->ld_obd; - lprocfs_init_vars(mdt, &lvars); - rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, cfg, obd); - if (rc) - /* others are passed further */ + /* + * For interoperability between 1.8 and 2.0, + * skip old "mdt.group_upcall" param. + */ + { + char *param = lustre_cfg_string(cfg, 1); + if (param && !strncmp("mdt.group_upcall", param, 16)) { + CWARN("For 1.8 interoperability, skip this" + " mdt.group_upcall. It is obsolete\n"); + break; + } + } + + lprocfs_mdt_init_vars(&lvars); + rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, + cfg, obd); + if (rc > 0 || rc == -ENOSYS) + /* we don't understand; pass it on */ rc = next->ld_ops->ldo_process_config(env, next, cfg); break; } @@ -4025,12 +4800,14 @@ static struct lu_object *mdt_object_alloc(const struct lu_env *env, lu_object_init(o, h, d); lu_object_add_top(h, o); o->lo_ops = &mdt_obj_ops; + cfs_sema_init(&mo->mot_ioepoch_sem, 1); RETURN(o); } else RETURN(NULL); } -static int mdt_object_init(const struct lu_env *env, struct lu_object *o) +static int mdt_object_init(const struct lu_env *env, struct lu_object *o, + const struct lu_object_conf *unused) { struct mdt_device *d = mdt_dev(o->lo_dev); struct lu_device *under; @@ -4070,27 +4847,48 @@ static void mdt_object_free(const struct lu_env *env, struct lu_object *o) static int mdt_object_print(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o) { - return (*p)(env, cookie, LUSTRE_MDT_NAME"-object@%p", o); + struct mdt_object *mdto = mdt_obj((struct lu_object *)o); + return (*p)(env, cookie, LUSTRE_MDT_NAME"-object@%p(ioepoch="LPU64" " + "flags="LPX64", epochcount=%d, writecount=%d)", + mdto, mdto->mot_ioepoch, mdto->mot_flags, + mdto->mot_ioepoch_count, mdto->mot_writecount); } -static struct lu_device_operations mdt_lu_ops = { +static const struct lu_device_operations mdt_lu_ops = { .ldo_object_alloc = mdt_object_alloc, - .ldo_process_config = mdt_process_config + .ldo_process_config = mdt_process_config, }; -static struct lu_object_operations mdt_obj_ops = { +static const struct lu_object_operations mdt_obj_ops = { .loo_object_init = mdt_object_init, .loo_object_free = mdt_object_free, .loo_object_print = mdt_object_print }; +static int mdt_obd_set_info_async(struct obd_export *exp, + __u32 keylen, void *key, + __u32 vallen, void *val, + struct ptlrpc_request_set *set) +{ + struct obd_device *obd = exp->exp_obd; + int rc; + ENTRY; + + LASSERT(obd); + + if (KEY_IS(KEY_SPTLRPC_CONF)) { + rc = mdt_adapt_sptlrpc_conf(obd, 0); + RETURN(rc); + } + + RETURN(0); +} + /* mds_connect_internal */ static int mdt_connect_internal(struct obd_export *exp, struct mdt_device *mdt, struct obd_connect_data *data) { - __u64 flags; - if (data != NULL) { data->ocd_connect_flags &= MDT_CONNECT_SUPPORTED; data->ocd_ibits_known &= MDS_INODELOCK_FULL; @@ -4108,15 +4906,12 @@ static int mdt_connect_internal(struct obd_export *exp, if (!mdt->mdt_opts.mo_user_xattr) data->ocd_connect_flags &= ~OBD_CONNECT_XATTR; - if (!mdt->mdt_opts.mo_mds_capa) - data->ocd_connect_flags &= ~OBD_CONNECT_MDS_CAPA; - - if (!mdt->mdt_opts.mo_oss_capa) - data->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA; + if (!mdt->mdt_som_conf) + data->ocd_connect_flags &= ~OBD_CONNECT_SOM; - spin_lock(&exp->exp_lock); + cfs_spin_lock(&exp->exp_lock); exp->exp_connect_flags = data->ocd_connect_flags; - spin_unlock(&exp->exp_lock); + cfs_spin_unlock(&exp->exp_lock); data->ocd_version = LUSTRE_VERSION_CODE; exp->exp_mdt_data.med_ibits_known = data->ocd_ibits_known; } @@ -4130,128 +4925,249 @@ static int mdt_connect_internal(struct obd_export *exp, } #endif - flags = OBD_CONNECT_LCL_CLIENT | OBD_CONNECT_RMT_CLIENT; - if ((exp->exp_connect_flags & flags) == flags) { - CWARN("%s: both local and remote client flags are set\n", + if ((exp->exp_connect_flags & OBD_CONNECT_FID) == 0) { + CWARN("%s: MDS requires FID support, but client not\n", mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name); return -EBADE; } - if (mdt->mdt_opts.mo_mds_capa && - ((exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) == 0)) { - CWARN("%s: MDS requires capability support, but client not\n", - mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name); + if (mdt->mdt_som_conf && !exp_connect_som(exp) && + !(exp->exp_connect_flags & OBD_CONNECT_MDS_MDS)) { + CWARN("%s: MDS has SOM enabled, but client does not support " + "it\n", mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name); return -EBADE; } - if (mdt->mdt_opts.mo_oss_capa && - ((exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA) == 0)) { - CWARN("%s: MDS requires OSS capability support, " - "but client not\n", - mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name); - return -EBADE; + return 0; +} + +static int mdt_connect_check_sptlrpc(struct mdt_device *mdt, + struct obd_export *exp, + struct ptlrpc_request *req) +{ + struct sptlrpc_flavor flvr; + int rc = 0; + + if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) { + cfs_read_lock(&mdt->mdt_sptlrpc_lock); + sptlrpc_target_choose_flavor(&mdt->mdt_sptlrpc_rset, + req->rq_sp_from, + req->rq_peer.nid, + &flvr); + cfs_read_unlock(&mdt->mdt_sptlrpc_lock); + + cfs_spin_lock(&exp->exp_lock); + + exp->exp_sp_peer = req->rq_sp_from; + exp->exp_flvr = flvr; + + if (exp->exp_flvr.sf_rpc != SPTLRPC_FLVR_ANY && + exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) { + CERROR("unauthorized rpc flavor %x from %s, " + "expect %x\n", req->rq_flvr.sf_rpc, + libcfs_nid2str(req->rq_peer.nid), + exp->exp_flvr.sf_rpc); + rc = -EACCES; + } + + cfs_spin_unlock(&exp->exp_lock); + } else { + if (exp->exp_sp_peer != req->rq_sp_from) { + CERROR("RPC source %s doesn't match %s\n", + sptlrpc_part2name(req->rq_sp_from), + sptlrpc_part2name(exp->exp_sp_peer)); + rc = -EACCES; + } else { + rc = sptlrpc_target_export_check(exp, req); + } } - return 0; + return rc; } /* mds_connect copy */ static int mdt_obd_connect(const struct lu_env *env, - struct lustre_handle *conn, struct obd_device *obd, + struct obd_export **exp, struct obd_device *obd, struct obd_uuid *cluuid, - struct obd_connect_data *data) + struct obd_connect_data *data, + void *localdata) { - struct mdt_client_data *mcd; - struct obd_export *exp; + struct mdt_thread_info *info; + struct obd_export *lexp; + struct lustre_handle conn = { 0 }; struct mdt_device *mdt; + struct ptlrpc_request *req; int rc; ENTRY; LASSERT(env != NULL); - if (!conn || !obd || !cluuid) + if (!exp || !obd || !cluuid) RETURN(-EINVAL); + info = lu_context_key_get(&env->le_ctx, &mdt_thread_key); + req = info->mti_pill->rc_req; mdt = mdt_dev(obd->obd_lu_dev); - rc = class_connect(conn, obd, cluuid); + rc = class_connect(&conn, obd, cluuid); if (rc) RETURN(rc); - exp = class_conn2export(conn); - LASSERT(exp != NULL); + lexp = class_conn2export(&conn); + LASSERT(lexp != NULL); + + rc = mdt_connect_check_sptlrpc(mdt, lexp, req); + if (rc) + GOTO(out, rc); - rc = mdt_connect_internal(exp, mdt, data); + rc = mdt_connect_internal(lexp, mdt, data); if (rc == 0) { - OBD_ALLOC_PTR(mcd); - if (mcd != NULL) { - struct mdt_thread_info *mti; - mti = lu_context_key_get(&env->le_ctx, - &mdt_thread_key); - LASSERT(mti != NULL); - mti->mti_exp = exp; - memcpy(mcd->mcd_uuid, cluuid, sizeof mcd->mcd_uuid); - exp->exp_mdt_data.med_mcd = mcd; - rc = mdt_client_new(env, mdt); - if (rc != 0) { - OBD_FREE_PTR(mcd); - exp->exp_mdt_data.med_mcd = NULL; - } - } else - rc = -ENOMEM; + struct mdt_thread_info *mti; + struct lsd_client_data *lcd = lexp->exp_target_data.ted_lcd; + LASSERT(lcd); + mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key); + LASSERT(mti != NULL); + mti->mti_exp = lexp; + memcpy(lcd->lcd_uuid, cluuid, sizeof lcd->lcd_uuid); + rc = mdt_client_new(env, mdt); + if (rc == 0) + mdt_export_stats_init(obd, lexp, 0, localdata); } - if (rc != 0) - class_disconnect(exp); - else - class_export_put(exp); +out: + if (rc != 0) { + class_disconnect(lexp); + *exp = NULL; + } else { + *exp = lexp; + } RETURN(rc); } -static int mdt_obd_reconnect(struct obd_export *exp, struct obd_device *obd, +static int mdt_obd_reconnect(const struct lu_env *env, + struct obd_export *exp, struct obd_device *obd, struct obd_uuid *cluuid, - struct obd_connect_data *data) + struct obd_connect_data *data, + void *localdata) { - int rc; + struct mdt_thread_info *info; + struct mdt_device *mdt; + struct ptlrpc_request *req; + int rc; ENTRY; if (exp == NULL || obd == NULL || cluuid == NULL) RETURN(-EINVAL); + info = lu_context_key_get(&env->le_ctx, &mdt_thread_key); + req = info->mti_pill->rc_req; + mdt = mdt_dev(obd->obd_lu_dev); + + rc = mdt_connect_check_sptlrpc(mdt, exp, req); + if (rc) + RETURN(rc); + rc = mdt_connect_internal(exp, mdt_dev(obd->obd_lu_dev), data); + if (rc == 0) + mdt_export_stats_init(obd, exp, 1, localdata); + + RETURN(rc); +} +static int mdt_export_cleanup(struct obd_export *exp) +{ + struct mdt_export_data *med = &exp->exp_mdt_data; + struct obd_device *obd = exp->exp_obd; + struct mdt_device *mdt; + struct mdt_thread_info *info; + struct lu_env env; + CFS_LIST_HEAD(closing_list); + struct mdt_file_data *mfd, *n; + int rc = 0; + ENTRY; + + cfs_spin_lock(&med->med_open_lock); + while (!cfs_list_empty(&med->med_open_head)) { + cfs_list_t *tmp = med->med_open_head.next; + mfd = cfs_list_entry(tmp, struct mdt_file_data, mfd_list); + + /* Remove mfd handle so it can't be found again. + * We are consuming the mfd_list reference here. */ + class_handle_unhash(&mfd->mfd_handle); + cfs_list_move_tail(&mfd->mfd_list, &closing_list); + } + cfs_spin_unlock(&med->med_open_lock); + mdt = mdt_dev(obd->obd_lu_dev); + LASSERT(mdt != NULL); + + rc = lu_env_init(&env, LCT_MD_THREAD); + if (rc) + RETURN(rc); + + info = lu_context_key_get(&env.le_ctx, &mdt_thread_key); + LASSERT(info != NULL); + memset(info, 0, sizeof *info); + info->mti_env = &env; + info->mti_mdt = mdt; + info->mti_exp = exp; + + if (!cfs_list_empty(&closing_list)) { + struct md_attr *ma = &info->mti_attr; + int lmm_size; + int cookie_size; + + lmm_size = mdt->mdt_max_mdsize; + OBD_ALLOC(ma->ma_lmm, lmm_size); + if (ma->ma_lmm == NULL) + GOTO(out_lmm, rc = -ENOMEM); + + cookie_size = mdt->mdt_max_cookiesize; + OBD_ALLOC(ma->ma_cookie, cookie_size); + if (ma->ma_cookie == NULL) + GOTO(out_cookie, rc = -ENOMEM); + + /* Close any open files (which may also cause orphan unlinking). */ + cfs_list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) { + cfs_list_del_init(&mfd->mfd_list); + memset(&ma->ma_attr, 0, sizeof(ma->ma_attr)); + ma->ma_lmm_size = lmm_size; + ma->ma_cookie_size = cookie_size; + ma->ma_need = 0; + /* It is not for setattr, just tell MDD to send + * DESTROY RPC to OSS if needed */ + ma->ma_attr_flags = MDS_CLOSE_CLEANUP; + ma->ma_valid = MA_FLAGS; + mdt_mfd_close(info, mfd); + } + OBD_FREE(ma->ma_cookie, cookie_size); + ma->ma_cookie = NULL; +out_cookie: + OBD_FREE(ma->ma_lmm, lmm_size); + ma->ma_lmm = NULL; + } +out_lmm: + info->mti_mdt = NULL; + /* cleanup client slot early */ + /* Do not erase record for recoverable client. */ + if (!obd->obd_fail || exp->exp_failed) + mdt_client_del(&env, mdt); + lu_env_fini(&env); RETURN(rc); } static int mdt_obd_disconnect(struct obd_export *exp) { - struct mdt_device *mdt = mdt_dev(exp->exp_obd->obd_lu_dev); int rc; ENTRY; LASSERT(exp); class_export_get(exp); - /* Disconnect early so that clients can't keep using export */ - rc = class_disconnect(exp); - if (mdt->mdt_namespace != NULL || exp->exp_obd->obd_namespace != NULL) - ldlm_cancel_locks_for_export(exp); - - /* complete all outstanding replies */ - spin_lock(&exp->exp_lock); - while (!list_empty(&exp->exp_outstanding_replies)) { - struct ptlrpc_reply_state *rs = - list_entry(exp->exp_outstanding_replies.next, - struct ptlrpc_reply_state, rs_exp_list); - struct ptlrpc_service *svc = rs->rs_service; - - spin_lock(&svc->srv_lock); - list_del_init(&rs->rs_exp_list); - ptlrpc_schedule_difficult_reply(rs); - spin_unlock(&svc->srv_lock); - } - spin_unlock(&exp->exp_lock); + rc = server_disconnect_export(exp); + if (rc != 0) + CDEBUG(D_IOCTL, "server disconnect error: %d\n", rc); + rc = mdt_export_cleanup(exp); class_export_put(exp); RETURN(rc); } @@ -4260,117 +5176,71 @@ static int mdt_obd_disconnect(struct obd_export *exp) static int mdt_init_export(struct obd_export *exp) { struct mdt_export_data *med = &exp->exp_mdt_data; + int rc; ENTRY; - INIT_LIST_HEAD(&med->med_open_head); - spin_lock_init(&med->med_open_lock); - spin_lock(&exp->exp_lock); + CFS_INIT_LIST_HEAD(&med->med_open_head); + cfs_spin_lock_init(&med->med_open_lock); + cfs_sema_init(&med->med_idmap_sem, 1); + med->med_idmap = NULL; + cfs_spin_lock(&exp->exp_lock); exp->exp_connecting = 1; - spin_unlock(&exp->exp_lock); - RETURN(0); + cfs_spin_unlock(&exp->exp_lock); + rc = lut_client_alloc(exp); + if (rc == 0) + rc = ldlm_init_export(exp); + + if (rc) + CERROR("Error %d while initializing export\n", rc); + RETURN(rc); } -static int mdt_destroy_export(struct obd_export *export) +static int mdt_destroy_export(struct obd_export *exp) { struct mdt_export_data *med; - struct obd_device *obd = export->exp_obd; - struct mdt_device *mdt; - struct mdt_thread_info *info; - struct lu_env env; - struct md_attr *ma; - int lmm_size; - int cookie_size; int rc = 0; ENTRY; - med = &export->exp_mdt_data; - if (med->med_rmtclient) - mdt_cleanup_idmap(med); + med = &exp->exp_mdt_data; + if (exp_connect_rmtclient(exp)) + mdt_cleanup_idmap(&exp->exp_mdt_data); - target_destroy_export(export); + target_destroy_export(exp); + ldlm_destroy_export(exp); + lut_client_free(exp); - if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid)) + LASSERT(cfs_list_empty(&exp->exp_outstanding_replies)); + LASSERT(cfs_list_empty(&exp->exp_mdt_data.med_open_head)); + if (obd_uuid_equals(&exp->exp_client_uuid, &exp->exp_obd->obd_uuid)) RETURN(0); - mdt = mdt_dev(obd->obd_lu_dev); - LASSERT(mdt != NULL); - - rc = lu_env_init(&env, NULL, LCT_MD_THREAD); - if (rc) - RETURN(rc); - - info = lu_context_key_get(&env.le_ctx, &mdt_thread_key); - LASSERT(info != NULL); - memset(info, 0, sizeof *info); - info->mti_env = &env; - info->mti_mdt = mdt; - info->mti_exp = export; - - ma = &info->mti_attr; - lmm_size = ma->ma_lmm_size = mdt->mdt_max_mdsize; - cookie_size = ma->ma_cookie_size = mdt->mdt_max_cookiesize; - OBD_ALLOC(ma->ma_lmm, lmm_size); - OBD_ALLOC(ma->ma_cookie, cookie_size); - - if (ma->ma_lmm == NULL || ma->ma_cookie == NULL) - GOTO(out, rc = -ENOMEM); - ma->ma_need = MA_LOV | MA_COOKIE; - ma->ma_valid = 0; - /* Close any open files (which may also cause orphan unlinking). */ - spin_lock(&med->med_open_lock); - while (!list_empty(&med->med_open_head)) { - struct list_head *tmp = med->med_open_head.next; - struct mdt_file_data *mfd = - list_entry(tmp, struct mdt_file_data, mfd_list); - - /* Remove mfd handle so it can't be found again. - * We are consuming the mfd_list reference here. */ - class_handle_unhash(&mfd->mfd_handle); - list_del_init(&mfd->mfd_list); - spin_unlock(&med->med_open_lock); - mdt_mfd_close(info, mfd); - /* TODO: if we close the unlinked file, - * we need to remove it's objects from OST */ - memset(&ma->ma_attr, 0, sizeof(ma->ma_attr)); - spin_lock(&med->med_open_lock); - ma->ma_lmm_size = lmm_size; - ma->ma_cookie_size = cookie_size; - ma->ma_need = MA_LOV | MA_COOKIE; - ma->ma_valid = 0; - } - spin_unlock(&med->med_open_lock); - info->mti_mdt = NULL; - mdt_client_del(&env, mdt); - - EXIT; -out: - if (lmm_size) { - OBD_FREE(ma->ma_lmm, lmm_size); - ma->ma_lmm = NULL; - } - if (cookie_size) { - OBD_FREE(ma->ma_cookie, cookie_size); - ma->ma_cookie = NULL; - } - lu_env_fini(&env); - - return rc; + RETURN(rc); } static void mdt_allow_cli(struct mdt_device *m, unsigned int flag) { if (flag & CONFIG_LOG) - m->mdt_fl_cfglog = 1; + cfs_set_bit(MDT_FL_CFGLOG, &m->mdt_state); + + /* also notify active event */ if (flag & CONFIG_SYNC) - m->mdt_fl_synced = 1; + cfs_set_bit(MDT_FL_SYNCED, &m->mdt_state); - if (m->mdt_fl_cfglog && m->mdt_fl_synced) + if (cfs_test_bit(MDT_FL_CFGLOG, &m->mdt_state) && + cfs_test_bit(MDT_FL_SYNCED, &m->mdt_state)) { + struct obd_device *obd = m->mdt_md_dev.md_lu_dev.ld_obd; + /* Open for clients */ - m->mdt_md_dev.md_lu_dev.ld_obd->obd_no_conn = 0; + if (obd->obd_no_conn) { + cfs_spin_lock_bh(&obd->obd_processing_task_lock); + obd->obd_no_conn = 0; + cfs_spin_unlock_bh(&obd->obd_processing_task_lock); + } + } } static int mdt_upcall(const struct lu_env *env, struct md_device *md, - enum md_upcall_event ev) + enum md_upcall_event ev, void *data) { struct mdt_device *m = mdt_dev(&md->md_lu_dev); struct md_device *next = m->mdt_child; @@ -4386,6 +5256,9 @@ static int mdt_upcall(const struct lu_env *env, struct md_device *md, CDEBUG(D_INFO, "get max mdsize %d max cookiesize %d\n", m->mdt_max_mdsize, m->mdt_max_cookiesize); mdt_allow_cli(m, CONFIG_SYNC); + if (data) + (*(__u64 *)data) = + m->mdt_lut.lut_obd->u.obt.obt_mount_count; break; case MD_NO_TRANS: mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key); @@ -4394,8 +5267,15 @@ static int mdt_upcall(const struct lu_env *env, struct md_device *md, break; case MD_LOV_CONFIG: /* Check that MDT is not yet configured */ - LASSERT(!m->mdt_fl_cfglog); + LASSERT(!cfs_test_bit(MDT_FL_CFGLOG, &m->mdt_state)); break; +#ifdef HAVE_QUOTA_SUPPORT + case MD_LOV_QUOTA: + if (md->md_lu_dev.ld_obd->obd_recovering == 0 && + likely(md->md_lu_dev.ld_obd->obd_stopping == 0)) + next->md_ops->mdo_quota.mqo_recovery(env, next); + break; +#endif default: CERROR("invalid event\n"); rc = -EINVAL; @@ -4408,11 +5288,21 @@ static int mdt_obd_notify(struct obd_device *host, struct obd_device *watched, enum obd_notify_event ev, void *data) { + struct mdt_device *mdt = mdt_dev(host->obd_lu_dev); +#ifdef HAVE_QUOTA_SUPPORT + struct md_device *next = mdt->mdt_child; +#endif ENTRY; switch (ev) { case OBD_NOTIFY_CONFIG: - mdt_allow_cli(mdt_dev(host->obd_lu_dev), (unsigned int)data); + mdt_allow_cli(mdt, (unsigned long)data); + +#ifdef HAVE_QUOTA_SUPPORT + /* quota_type has been processed, we can now handle + * incoming quota requests */ + next->md_ops->mdo_quota.mqo_notify(NULL, next); +#endif break; default: CDEBUG(D_INFO, "Unhandled notification %#x\n", ev); @@ -4420,27 +5310,191 @@ static int mdt_obd_notify(struct obd_device *host, RETURN(0); } +static int mdt_rpc_fid2path(struct mdt_thread_info *info, void *key, + void *val, int vallen) +{ + struct mdt_device *mdt = mdt_dev(info->mti_exp->exp_obd->obd_lu_dev); + struct getinfo_fid2path *fpout, *fpin; + int rc = 0; + + fpin = key + cfs_size_round(sizeof(KEY_FID2PATH)); + fpout = val; + + if (ptlrpc_req_need_swab(info->mti_pill->rc_req)) + lustre_swab_fid2path(fpin); + + memcpy(fpout, fpin, sizeof(*fpin)); + if (fpout->gf_pathlen != vallen - sizeof(*fpin)) + RETURN(-EINVAL); + + rc = mdt_fid2path(info->mti_env, mdt, fpout); + RETURN(rc); +} + +static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt, + struct getinfo_fid2path *fp) +{ + struct mdt_object *obj; + int rc; + ENTRY; + + CDEBUG(D_IOCTL, "path get "DFID" from "LPU64" #%d\n", + PFID(&fp->gf_fid), fp->gf_recno, fp->gf_linkno); + + if (!fid_is_sane(&fp->gf_fid)) + RETURN(-EINVAL); + + obj = mdt_object_find(env, mdt, &fp->gf_fid); + if (obj == NULL || IS_ERR(obj)) { + CDEBUG(D_IOCTL, "no object "DFID": %ld\n",PFID(&fp->gf_fid), + PTR_ERR(obj)); + RETURN(-EINVAL); + } + + rc = lu_object_exists(&obj->mot_obj.mo_lu); + if (rc <= 0) { + if (rc == -1) + rc = -EREMOTE; + else + rc = -ENOENT; + mdt_object_put(env, obj); + CDEBUG(D_IOCTL, "nonlocal object "DFID": %d\n", + PFID(&fp->gf_fid), rc); + RETURN(rc); + } + + rc = mo_path(env, md_object_next(&obj->mot_obj), fp->gf_path, + fp->gf_pathlen, &fp->gf_recno, &fp->gf_linkno); + mdt_object_put(env, obj); + + RETURN(rc); +} + +static int mdt_get_info(struct mdt_thread_info *info) +{ + struct ptlrpc_request *req = mdt_info_req(info); + char *key; + int keylen; + __u32 *vallen; + void *valout; + int rc; + ENTRY; + + key = req_capsule_client_get(info->mti_pill, &RMF_GETINFO_KEY); + if (key == NULL) { + CDEBUG(D_IOCTL, "No GETINFO key"); + RETURN(-EFAULT); + } + keylen = req_capsule_get_size(info->mti_pill, &RMF_GETINFO_KEY, + RCL_CLIENT); + + vallen = req_capsule_client_get(info->mti_pill, &RMF_GETINFO_VALLEN); + if (vallen == NULL) { + CDEBUG(D_IOCTL, "Unable to get RMF_GETINFO_VALLEN buffer"); + RETURN(-EFAULT); + } + + req_capsule_set_size(info->mti_pill, &RMF_GETINFO_VAL, RCL_SERVER, + *vallen); + rc = req_capsule_server_pack(info->mti_pill); + valout = req_capsule_server_get(info->mti_pill, &RMF_GETINFO_VAL); + if (valout == NULL) { + CDEBUG(D_IOCTL, "Unable to get get-info RPC out buffer"); + RETURN(-EFAULT); + } + + if (KEY_IS(KEY_FID2PATH)) + rc = mdt_rpc_fid2path(info, key, valout, *vallen); + else + rc = -EINVAL; + + lustre_msg_set_status(req->rq_repmsg, rc); + + RETURN(rc); +} + +/* Pass the ioc down */ +static int mdt_ioc_child(struct lu_env *env, struct mdt_device *mdt, + unsigned int cmd, int len, void *data) +{ + struct lu_context ioctl_session; + struct md_device *next = mdt->mdt_child; + int rc; + ENTRY; + + rc = lu_context_init(&ioctl_session, LCT_SESSION); + if (rc) + RETURN(rc); + ioctl_session.lc_thread = (struct ptlrpc_thread *)cfs_current(); + lu_context_enter(&ioctl_session); + env->le_ses = &ioctl_session; + + LASSERT(next->md_ops->mdo_iocontrol); + rc = next->md_ops->mdo_iocontrol(env, next, cmd, len, data); + + lu_context_exit(&ioctl_session); + lu_context_fini(&ioctl_session); + RETURN(rc); +} + +static int mdt_ioc_version_get(struct mdt_thread_info *mti, void *karg) +{ + struct obd_ioctl_data *data = karg; + struct lu_fid *fid = (struct lu_fid *)data->ioc_inlbuf1; + __u64 version; + struct mdt_object *obj; + struct mdt_lock_handle *lh; + int rc; + ENTRY; + CDEBUG(D_IOCTL, "getting version for "DFID"\n", PFID(fid)); + if (!fid_is_sane(fid)) + RETURN(-EINVAL); + + lh = &mti->mti_lh[MDT_LH_PARENT]; + mdt_lock_reg_init(lh, LCK_CR); + + obj = mdt_object_find_lock(mti, fid, lh, MDS_INODELOCK_UPDATE); + if (IS_ERR(obj)) + RETURN(PTR_ERR(obj)); + + rc = mdt_object_exists(obj); + if (rc < 0) { + rc = -EREMOTE; + /** + * before calling version get the correct MDS should be + * fid, this is error to find remote object here + */ + CERROR("nonlocal object "DFID"\n", PFID(fid)); + } else { + version = mo_version_get(mti->mti_env, mdt_object_child(obj)); + *(__u64 *)data->ioc_inlbuf2 = version; + rc = 0; + } + mdt_object_unlock_put(mti, obj, lh, 1); + RETURN(rc); +} + +/* ioctls on obd dev */ static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len, void *karg, void *uarg) { struct lu_env env; - struct obd_device *obd= exp->exp_obd; + struct obd_device *obd = exp->exp_obd; struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev); struct dt_device *dt = mdt->mdt_bottom; int rc; ENTRY; CDEBUG(D_IOCTL, "handling ioctl cmd %#x\n", cmd); - rc = lu_env_init(&env, NULL, LCT_MD_THREAD); + rc = lu_env_init(&env, LCT_MD_THREAD); if (rc) RETURN(rc); switch (cmd) { case OBD_IOC_SYNC: - rc = dt->dd_ops->dt_sync(&env, dt); + rc = mdt_device_sync(&env, mdt); break; case OBD_IOC_SET_READONLY: - rc = dt->dd_ops->dt_sync(&env, dt); dt->dd_ops->dt_ro(&env, dt); break; case OBD_IOC_ABORT_RECOVERY: @@ -4448,6 +5502,22 @@ static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len, target_stop_recovery_thread(obd); rc = 0; break; + case OBD_IOC_CHANGELOG_REG: + case OBD_IOC_CHANGELOG_DEREG: + case OBD_IOC_CHANGELOG_CLEAR: + rc = mdt_ioc_child(&env, mdt, cmd, len, karg); + break; + case OBD_IOC_GET_OBJ_VERSION: { + struct mdt_thread_info *mti; + mti = lu_context_key_get(&env.le_ctx, &mdt_thread_key); + memset(mti, 0, sizeof *mti); + mti->mti_env = &env; + mti->mti_mdt = mdt; + mti->mti_exp = exp; + + rc = mdt_ioc_version_get(mti, karg); + break; + } default: CERROR("Not supported cmd = %d for device %s\n", cmd, obd->obd_name); @@ -4461,15 +5531,18 @@ static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len, int mdt_postrecov(const struct lu_env *env, struct mdt_device *mdt) { struct lu_device *ld = md2lu_dev(mdt->mdt_child); - struct obd_device *obd = mdt->mdt_md_dev.md_lu_dev.ld_obd; - int rc, lost; +#ifdef HAVE_QUOTA_SUPPORT + struct obd_device *obd = mdt2obd_dev(mdt); + struct md_device *next = mdt->mdt_child; +#endif + int rc; ENTRY; - /* if some clients didn't participate in recovery then we can possibly - * lost sequence. Now we should increase sequence for safe value */ - lost = obd->obd_max_recoverable_clients - obd->obd_connected_clients; - mdt_seq_adjust(env, mdt, lost); - + rc = ld->ld_ops->ldo_recovery_complete(env, ld); +#ifdef HAVE_QUOTA_SUPPORT + if (likely(obd->obd_stopping == 0)) + next->md_ops->mdo_quota.mqo_recovery(env, next); +#endif RETURN(rc); } @@ -4478,7 +5551,7 @@ int mdt_obd_postrecov(struct obd_device *obd) struct lu_env env; int rc; - rc = lu_env_init(&env, NULL, LCT_MD_THREAD); + rc = lu_env_init(&env, LCT_MD_THREAD); if (rc) RETURN(rc); rc = mdt_postrecov(&env, mdt_dev(obd->obd_lu_dev)); @@ -4486,8 +5559,66 @@ int mdt_obd_postrecov(struct obd_device *obd) return rc; } +/** + * Send a copytool req to a client + * Note this sends a request RPC from a server (MDT) to a client (MDC), + * backwards of normal comms. + */ +int mdt_hsm_copytool_send(struct obd_export *exp) +{ + struct kuc_hdr *lh; + struct hsm_action_list *hal; + struct hsm_action_item *hai; + int rc, len; + ENTRY; + + CWARN("%s: writing to mdc at %s\n", exp->exp_obd->obd_name, + libcfs_nid2str(exp->exp_connection->c_peer.nid)); + + len = sizeof(*lh) + sizeof(*hal) + MTI_NAME_MAXLEN + + /* for mockup below */ 2 * cfs_size_round(sizeof(*hai)); + OBD_ALLOC(lh, len); + if (lh == NULL) + RETURN(-ENOMEM); + + lh->kuc_magic = KUC_MAGIC; + lh->kuc_transport = KUC_TRANSPORT_HSM; + lh->kuc_msgtype = HMT_ACTION_LIST; + lh->kuc_msglen = len; + + hal = (struct hsm_action_list *)(lh + 1); + hal->hal_version = HAL_VERSION; + hal->hal_archive_num = 1; + obd_uuid2fsname(hal->hal_fsname, exp->exp_obd->obd_name, + MTI_NAME_MAXLEN); + + /* mock up an action list */ + hal->hal_count = 2; + hai = hai_zero(hal); + hai->hai_action = HSMA_ARCHIVE; + hai->hai_fid.f_oid = 0xA00A; + hai->hai_len = sizeof(*hai); + hai = hai_next(hai); + hai->hai_action = HSMA_RESTORE; + hai->hai_fid.f_oid = 0xB00B; + hai->hai_len = sizeof(*hai); + + /* Uses the ldlm reverse import; this rpc will be seen by + the ldlm_callback_handler */ + rc = do_set_info_async(exp->exp_imp_reverse, + LDLM_SET_INFO, LUSTRE_OBD_VERSION, + sizeof(KEY_HSM_COPYTOOL_SEND), + KEY_HSM_COPYTOOL_SEND, + len, lh, NULL); + + OBD_FREE(lh, len); + + RETURN(rc); +} + static struct obd_ops mdt_obd_device_ops = { .o_owner = THIS_MODULE, + .o_set_info_async = mdt_obd_set_info_async, .o_connect = mdt_obd_connect, .o_reconnect = mdt_obd_reconnect, .o_disconnect = mdt_obd_disconnect, @@ -4508,11 +5639,15 @@ static struct lu_device* mdt_device_fini(const struct lu_env *env, RETURN(NULL); } -static void mdt_device_free(const struct lu_env *env, struct lu_device *d) +static struct lu_device *mdt_device_free(const struct lu_env *env, + struct lu_device *d) { struct mdt_device *m = mdt_dev(d); + ENTRY; + md_device_fini(&m->mdt_md_dev); OBD_FREE_PTR(m); + RETURN(NULL); } static struct lu_device *mdt_device_alloc(const struct lu_env *env, @@ -4529,7 +5664,7 @@ static struct lu_device *mdt_device_alloc(const struct lu_env *env, l = &m->mdt_md_dev.md_lu_dev; rc = mdt_init0(env, m, t, cfg); if (rc != 0) { - OBD_FREE_PTR(m); + mdt_device_free(env, l); l = ERR_PTR(rc); return l; } @@ -4539,17 +5674,13 @@ static struct lu_device *mdt_device_alloc(const struct lu_env *env, return l; } -/* - * context key constructor/destructor - */ +/* context key constructor/destructor: mdt_key_init, mdt_key_fini */ LU_KEY_INIT_FINI(mdt, struct mdt_thread_info); -struct lu_context_key mdt_thread_key = { - .lct_tags = LCT_MD_THREAD, - .lct_init = mdt_key_init, - .lct_fini = mdt_key_fini -}; +/* context key: mdt_thread_key */ +LU_CONTEXT_KEY_DEFINE(mdt, LCT_MD_THREAD); +/* context key constructor/destructor: mdt_txn_key_init, mdt_txn_key_fini */ LU_KEY_INIT_FINI(mdt_txn, struct mdt_txn_info); struct lu_context_key mdt_txn_key = { @@ -4563,29 +5694,52 @@ struct md_ucred *mdt_ucred(const struct mdt_thread_info *info) return md_ucred(info->mti_env); } -static int mdt_type_init(struct lu_device_type *t) +/** + * Enable/disable COS (Commit On Sharing). + * + * Set/Clear the COS flag in mdt options. + * + * \param mdt mdt device + * \param val 0 disables COS, other values enable COS + */ +void mdt_enable_cos(struct mdt_device *mdt, int val) { + struct lu_env env; int rc; - LU_CONTEXT_KEY_INIT(&mdt_thread_key); - rc = lu_context_key_register(&mdt_thread_key); - if (rc == 0) { - LU_CONTEXT_KEY_INIT(&mdt_txn_key); - rc = lu_context_key_register(&mdt_txn_key); + mdt->mdt_opts.mo_cos = !!val; + rc = lu_env_init(&env, LCT_MD_THREAD); + if (unlikely(rc != 0)) { + CWARN("lu_env initialization failed with rc = %d," + "cannot sync\n", rc); + return; } - return rc; + mdt_device_sync(&env, mdt); + lu_env_fini(&env); } -static void mdt_type_fini(struct lu_device_type *t) +/** + * Check COS (Commit On Sharing) status. + * + * Return COS flag status. + * + * \param mdt mdt device + */ +int mdt_cos_is_enabled(struct mdt_device *mdt) { - lu_context_key_degister(&mdt_thread_key); - lu_context_key_degister(&mdt_txn_key); + return mdt->mdt_opts.mo_cos != 0; } +/* type constructor/destructor: mdt_type_init, mdt_type_fini */ +LU_TYPE_INIT_FINI(mdt, &mdt_thread_key, &mdt_txn_key); + static struct lu_device_type_operations mdt_device_type_ops = { .ldto_init = mdt_type_init, .ldto_fini = mdt_type_fini, + .ldto_start = mdt_type_start, + .ldto_stop = mdt_type_stop, + .ldto_device_alloc = mdt_device_alloc, .ldto_device_free = mdt_device_free, .ldto_device_fini = mdt_device_fini @@ -4598,13 +5752,33 @@ static struct lu_device_type mdt_device_type = { .ldt_ctx_tags = LCT_MD_THREAD }; +static struct lu_local_obj_desc mdt_last_recv = { + .llod_name = LAST_RCVD, + .llod_oid = MDT_LAST_RECV_OID, + .llod_is_index = 0, +}; + static int __init mdt_mod_init(void) { struct lprocfs_static_vars lvars; int rc; - mdt_num_threads = MDT_NUM_THREADS; - lprocfs_init_vars(mdt, &lvars); + llo_local_obj_register(&mdt_last_recv); + + if (mdt_num_threads > 0) { + if (mdt_num_threads > MDT_MAX_THREADS) + mdt_num_threads = MDT_MAX_THREADS; + if (mdt_num_threads < MDT_MIN_THREADS) + mdt_num_threads = MDT_MIN_THREADS; + mdt_max_threads = mdt_min_threads = mdt_num_threads; + } else { + mdt_max_threads = MDT_MAX_THREADS; + mdt_min_threads = MDT_MIN_THREADS; + if (mdt_min_threads < MDT_NUM_THREADS) + mdt_min_threads = MDT_NUM_THREADS; + } + + lprocfs_mdt_init_vars(&lvars); rc = class_register_type(&mdt_obd_device_ops, NULL, lvars.module_vars, LUSTRE_MDT_NAME, &mdt_device_type); @@ -4614,6 +5788,7 @@ static int __init mdt_mod_init(void) static void __exit mdt_mod_exit(void) { + llo_local_obj_unregister(&mdt_last_recv); class_unregister_type(LUSTRE_MDT_NAME); } @@ -4656,11 +5831,12 @@ static void __exit mdt_mod_exit(void) static struct mdt_handler mdt_mds_ops[] = { DEF_MDT_HNDL_F(0, CONNECT, mdt_connect), DEF_MDT_HNDL_F(0, DISCONNECT, mdt_disconnect), -DEF_MDT_HNDL_F(0, SET_INFO, mdt_set_info), +DEF_MDT_HNDL (0, SET_INFO, mdt_set_info, + &RQF_OBD_SET_INFO), +DEF_MDT_HNDL_F(0, GET_INFO, mdt_get_info), DEF_MDT_HNDL_F(0 |HABEO_REFERO, GETSTATUS, mdt_getstatus), DEF_MDT_HNDL_F(HABEO_CORPUS, GETATTR, mdt_getattr), DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, GETATTR_NAME, mdt_getattr_name), -DEF_MDT_HNDL_F(HABEO_CORPUS|MUTABOR, SETXATTR, mdt_setxattr), DEF_MDT_HNDL_F(HABEO_CORPUS, GETXATTR, mdt_getxattr), DEF_MDT_HNDL_F(0 |HABEO_REFERO, STATFS, mdt_statfs), DEF_MDT_HNDL_F(0 |MUTABOR, REINT, mdt_reint), @@ -4669,8 +5845,10 @@ DEF_MDT_HNDL_F(HABEO_CORPUS, DONE_WRITING, mdt_done_writing), DEF_MDT_HNDL_F(0 |HABEO_REFERO, PIN, mdt_pin), DEF_MDT_HNDL_0(0, SYNC, mdt_sync), DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, IS_SUBDIR, mdt_is_subdir), -DEF_MDT_HNDL_0(0, QUOTACHECK, mdt_quotacheck_handle), -DEF_MDT_HNDL_0(0, QUOTACTL, mdt_quotactl_handle) +#ifdef HAVE_QUOTA_SUPPORT +DEF_MDT_HNDL_F(0, QUOTACHECK, mdt_quotacheck_handle), +DEF_MDT_HNDL_F(0, QUOTACTL, mdt_quotactl_handle) +#endif }; #define DEF_OBD_HNDL(flags, name, fn) \ @@ -4695,7 +5873,19 @@ static struct mdt_handler mdt_dlm_ops[] = { DEF_DLM_HNDL_0(0, CP_CALLBACK, mdt_cp_callback) }; +#define DEF_LLOG_HNDL(flags, name, fn) \ + DEF_HNDL(LLOG, ORIGIN_HANDLE_CREATE, _NET, flags, name, fn, NULL) + static struct mdt_handler mdt_llog_ops[] = { + DEF_LLOG_HNDL(0, ORIGIN_HANDLE_CREATE, mdt_llog_create), + DEF_LLOG_HNDL(0, ORIGIN_HANDLE_NEXT_BLOCK, mdt_llog_next_block), + DEF_LLOG_HNDL(0, ORIGIN_HANDLE_READ_HEADER, mdt_llog_read_header), + DEF_LLOG_HNDL(0, ORIGIN_HANDLE_WRITE_REC, NULL), + DEF_LLOG_HNDL(0, ORIGIN_HANDLE_CLOSE, NULL), + DEF_LLOG_HNDL(0, ORIGIN_CONNECT, NULL), + DEF_LLOG_HNDL(0, CATINFO, NULL), + DEF_LLOG_HNDL(0, ORIGIN_HANDLE_PREV_BLOCK, mdt_llog_prev_block), + DEF_LLOG_HNDL(0, ORIGIN_HANDLE_DESTROY, mdt_llog_destroy), }; #define DEF_SEC_CTX_HNDL(name, fn) \ @@ -4783,6 +5973,11 @@ static struct mdt_opc_slice mdt_xmds_handlers[] = { .mos_hs = mdt_obd_ops }, { + .mos_opc_start = SEC_CTX_INIT, + .mos_opc_end = SEC_LAST_OPC, + .mos_hs = mdt_sec_ctx_ops + }, + { .mos_hs = NULL } }; @@ -4817,7 +6012,7 @@ static struct mdt_opc_slice mdt_fld_handlers[] = { } }; -MODULE_AUTHOR("Cluster File Systems, Inc. "); +MODULE_AUTHOR("Sun Microsystems, Inc. "); MODULE_DESCRIPTION("Lustre Meta-data Target ("LUSTRE_MDT_NAME")"); MODULE_LICENSE("GPL");