/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * lustre/mdt/mdt_handler.c
- * Lustre Metadata Target (mdt) request handler
+ * GPL HEADER START
*
- * Copyright (c) 2006 Cluster File Systems, Inc.
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Andreas Dilger <adilger@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Mike Shaver <shaver@clusterfs.com>
- * Author: Nikita Danilov <nikita@clusterfs.com>
- * Author: Huang Hua <huanghua@clusterfs.com>
- * Author: Yury Umanets <umka@clusterfs.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * This file is part of the Lustre file system, http://www.lustre.org
- * Lustre is a trademark of Cluster File Systems, Inc.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * You may have signed or agreed to another license before downloading
- * this software. If so, you are bound by the terms and conditions
- * of that agreement, and the following does not apply to you. See the
- * LICENSE file included with this distribution for more information.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * If you did not agree to a different license, then this copy of Lustre
- * is open source software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * In either case, Lustre is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * license text for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/mdt/mdt_handler.c
+ *
+ * Lustre Metadata Target (mdt) request handler
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Mike Shaver <shaver@clusterfs.com>
+ * Author: Nikita Danilov <nikita@clusterfs.com>
+ * Author: Huang Hua <huanghua@clusterfs.com>
+ * Author: Yury Umanets <umka@clusterfs.com>
*/
#ifndef EXPORT_SYMTAB
#include <lustre_mds.h>
#include <lustre_mdt.h>
#include "mdt_internal.h"
-#include <linux/lustre_acl.h>
+#include <lustre_acl.h>
#include <lustre_param.h>
mdl_mode_t mdt_mdl_lock_modes[] = {
static int mdt_statfs(struct mdt_thread_info *info)
{
- struct md_device *next = info->mti_mdt->mdt_child;
- struct obd_statfs *osfs;
- int rc;
+ struct md_device *next = info->mti_mdt->mdt_child;
+ struct ptlrpc_service *svc;
+ struct obd_statfs *osfs;
+ int rc;
ENTRY;
+ svc = info->mti_pill->rc_req->rq_rqbd->rqbd_service;
+
/* This will trigger a watchdog timeout */
OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
- (MDT_SERVICE_WATCHDOG_TIMEOUT / 1000) + 1);
+ (MDT_SERVICE_WATCHDOG_FACTOR *
+ at_get(&svc->srv_at_estimate) / 1000) + 1);
rc = mdt_check_ucred(info);
if (rc)
struct md_object *next = mdt_object_child(parent);
struct lu_fid *child_fid = &info->mti_tmp_fid1;
struct lu_name *lname = NULL;
- const char *name;
+ const char *name = NULL;
int namelen = 0;
struct mdt_lock_handle *lhp;
struct ldlm_lock *lock;
namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
RCL_CLIENT) - 1;
- LASSERT(namelen >= 0);
-
- /* XXX: "namelen == 0" is for getattr by fid (OBD_CONNECT_ATTRFID),
- * otherwise do not allow empty name, that is the name must contain
- * at least one character and the terminating '\0'*/
- if (namelen == 0) {
- reqbody =req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
- LASSERT(fid_is_sane(&reqbody->fid2));
- name = NULL;
-
- CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
- "ldlm_rep = %p\n",
- PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2),
- ldlm_rep);
- } else {
- lname = mdt_name(info->mti_env, (char *)name, namelen);
- CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
- "ldlm_rep = %p\n",
- PFID(mdt_object_fid(parent)), name, ldlm_rep);
- }
+ if (!info->mti_cross_ref) {
+ /*
+ * XXX: Check for "namelen == 0" is for getattr by fid
+ * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
+ * that is the name must contain at least one character and
+ * the terminating '\0'
+ */
+ if (namelen == 0) {
+ reqbody = req_capsule_client_get(info->mti_pill,
+ &RMF_MDT_BODY);
+ LASSERT(fid_is_sane(&reqbody->fid2));
+ name = NULL;
+ CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
+ "ldlm_rep = %p\n",
+ PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2),
+ ldlm_rep);
+ } else {
+ lname = mdt_name(info->mti_env, (char *)name, namelen);
+ CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
+ "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
+ name, ldlm_rep);
+ }
+ }
mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
rc = mdt_object_exists(parent);
&parent->mot_obj.mo_lu,
"Parent doesn't exist!\n");
RETURN(-ESTALE);
- } else
+ } else if (!info->mti_cross_ref) {
LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
PFID(mdt_object_fid(parent)));
-
+ }
if (lname) {
rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
if (rc != 0) {
LDLM_LOCK_PUT(lock);
rc = 0;
} else {
- struct md_attr *ma = &info->mti_attr;
+ struct md_attr *ma;
relock:
+ ma = &info->mti_attr;
+
mdt_lock_handle_init(lhc);
mdt_lock_reg_init(lhc, LCK_PR);
LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
&child->mot_obj.mo_lu,
"Object doesn't exist!\n");
+ GOTO(out_child, rc = -ESTALE);
}
ma->ma_valid = 0;
lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
if (lock) {
struct mdt_body *repbody;
- struct lu_attr *ma;
/* Debugging code. */
res_id = &lock->l_resource->lr_name;
*/
repbody = req_capsule_server_get(info->mti_pill,
&RMF_MDT_BODY);
- ma = &info->mti_attr.ma_attr;
if (lock->l_policy_data.l_inodebits.bits &
MDS_INODELOCK_UPDATE)
mdt_pack_size2body(info, child);
RETURN(-EFAULT);
}
- if (keylen != (sizeof(KEY_READ_ONLY) - 1) ||
- memcmp(key, KEY_READ_ONLY, keylen) != 0)
+ if (!KEY_IS(KEY_READ_ONLY))
RETURN(-EINVAL);
req->rq_status = 0;
struct l_wait_info *lwi = &info->mti_u.rdpg.mti_wait_info;
int tmpcount;
int tmpsize;
+ int timeout;
int i;
int rc;
ENTRY;
GOTO(free_desc, rc);
if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
- GOTO(abort_bulk, rc);
+ GOTO(abort_bulk, rc = 0);
- *lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
+ timeout = (int) req->rq_deadline - cfs_time_current_sec();
+ if (timeout < 0)
+ CERROR("Req deadline already passed %lu (now: %lu)\n",
+ req->rq_deadline, cfs_time_current_sec());
+ *lwi = LWI_TIMEOUT(max(timeout, 1) * HZ, NULL, NULL);
rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), lwi);
LASSERT (rc == 0 || rc == -ETIMEDOUT);
ma->ma_attr.la_valid = LA_MODE;
ma->ma_valid = MA_INODE;
- kmap(page);
+ cfs_kmap(page);
dp = page_address(page);
offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
continue;
fid_le_to_cpu(lf, &ent->lde_fid);
- if (le32_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
+ if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
ma->ma_attr.la_mode = S_IFDIR;
else
ma->ma_attr.la_mode = 0;
memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
lname = mdt_name(info->mti_env, name,
- le16_to_cpu(ent->lde_namelen) + 1);
+ le16_to_cpu(ent->lde_namelen));
ma->ma_attr_flags |= MDS_PERM_BYPASS;
rc = mdo_name_insert(info->mti_env,
md_object_next(&object->mot_obj),
}
EXIT;
out:
- kunmap(page);
+ cfs_kunmap(page);
return rc;
}
* reqbody->nlink contains number bytes to read.
*/
rdpg->rp_hash = reqbody->size;
- if ((__u64)rdpg->rp_hash != reqbody->size) {
- CERROR("Invalid hash: %#llx != %#llx\n",
- (__u64)rdpg->rp_hash, reqbody->size);
+ if (rdpg->rp_hash != reqbody->size) {
+ CERROR("Invalid hash: "LPX64" != "LPX64"\n",
+ rdpg->rp_hash, reqbody->size);
RETURN(-EFAULT);
}
rdpg->rp_count = reqbody->nlink;
ENTRY;
- if (OBD_FAIL_CHECK_RESET(OBD_FAIL_MDS_REINT_NET,
- OBD_FAIL_MDS_REINT_NET)) {
- info->mti_fail_id = OBD_FAIL_MDS_REINT_NET;
- RETURN(0);
- }
-
opc = mdt_reint_opcode(info, reint_fmts);
if (opc >= 0) {
/*
RETURN(rc);
}
-/* TODO these two methods not available now. */
-
/* this should sync the whole device */
-static int mdt_device_sync(struct mdt_thread_info *info)
+static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
{
- return 0;
+ struct dt_device *dt = mdt->mdt_bottom;
+ int rc;
+ ENTRY;
+
+ rc = dt->dd_ops->dt_sync(env, dt);
+ RETURN(rc);
}
/* this should sync this object */
static int mdt_object_sync(struct mdt_thread_info *info)
{
- return 0;
+ struct md_object *next;
+ int rc;
+ ENTRY;
+
+ if (!mdt_object_exists(info->mti_object)) {
+ CWARN("Non existing object "DFID"!\n",
+ PFID(mdt_object_fid(info->mti_object)));
+ RETURN(-ESTALE);
+ }
+ next = mdt_object_child(info->mti_object);
+ rc = mo_object_sync(info->mti_env, next);
+
+ RETURN(rc);
}
static int mdt_sync(struct mdt_thread_info *info)
/* sync the whole device */
rc = req_capsule_server_pack(pill);
if (rc == 0)
- rc = mdt_device_sync(info);
+ rc = mdt_device_sync(info->mti_env, info->mti_mdt);
else
rc = err_serious(rc);
} else {
*/
LASSERT(info->mti_dlm_req != NULL);
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE)) {
- info->mti_fail_id = OBD_FAIL_LDLM_ENQUEUE;
- return 0;
- }
-
req = mdt_info_req(info);
/*
sptlrpc_svc_ctx_invalidate(req);
}
+ OBD_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, obd_fail_val);
+
return rc;
}
ENTRY;
CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
- o = lu_object_find(env, d->mdt_md_dev.md_lu_dev.ld_site, f);
+ o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
if (unlikely(IS_ERR(o)))
m = (struct mdt_object *)o;
else
LASSERT(lh->mlh_type != MDT_PDO_LOCK);
}
+ if (lh->mlh_type == MDT_PDO_LOCK) {
+ /* check for exists after object is locked */
+ if (mdt_object_exists(o) == 0) {
+ /* Non-existent object shouldn't have PDO lock */
+ RETURN(-ESTALE);
+ } else {
+ /* Non-dir object shouldn't have PDO lock */
+ LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)));
+ }
+ }
+
memset(policy, 0, sizeof(*policy));
fid_build_reg_res_name(mdt_object_fid(o), res_id);
}
/*
- * Finish res_id initializing by name hash marking patr of
+ * Finish res_id initializing by name hash marking part of
* directory which is taking modification.
*/
res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
/*
* Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
* going to be sent to client. If it is - mdt_intent_policy() path will
- * fix it up and turns FL_LOCAL flag off.
+ * fix it up and turn FL_LOCAL flag off.
*/
rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB);
if (rc)
GOTO(out, rc);
- if (lh->mlh_type == MDT_PDO_LOCK) {
- /* check for exists after object is locked */
- if (mdt_object_exists(o) == 0) {
- /* Non-existent object shouldn't have PDO lock */
- rc = -ESTALE;
- } else {
- /* Non-dir object shouldn't have PDO lock */
- LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)));
- }
- }
out:
if (rc)
mdt_object_unlock(info, o, lh, 1);
RETURN(rc);
}
+static inline
+void mdt_save_lock(struct ptlrpc_request *req, struct lustre_handle *h,
+ ldlm_mode_t mode, int decref)
+{
+ ENTRY;
+
+ if (lustre_handle_is_used(h)) {
+ if (decref)
+ mdt_fid_unlock(h, mode);
+ else
+ ptlrpc_save_lock(req, h, mode);
+ h->cookie = 0ull;
+ }
+
+ EXIT;
+}
+
/*
* Just call ldlm_lock_decref() if decref, else we only call ptlrpc_save_lock()
* to save this lock in req. when transaction committed, req will be released,
struct ptlrpc_request *req = mdt_info_req(info);
ENTRY;
- if (lustre_handle_is_used(&lh->mlh_pdo_lh)) {
- /* Do not save PDO locks to request, just decref. */
- mdt_fid_unlock(&lh->mlh_pdo_lh,
- lh->mlh_pdo_mode);
- lh->mlh_pdo_lh.cookie = 0ull;
- }
-
- if (lustre_handle_is_used(&lh->mlh_reg_lh)) {
- if (decref) {
- mdt_fid_unlock(&lh->mlh_reg_lh,
- lh->mlh_reg_mode);
- } else {
- ptlrpc_save_lock(req, &lh->mlh_reg_lh,
- lh->mlh_reg_mode);
- }
- lh->mlh_reg_lh.cookie = 0ull;
- }
+ mdt_save_lock(req, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
+ mdt_save_lock(req, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
EXIT;
}
LASSERT(current->journal_info == NULL);
/*
- * Mask out OBD_FAIL_ONCE, because that will stop
- * correct handling of failed req later in ldlm due to doing
- * obd_fail_loc |= OBD_FAIL_ONCE without actually
- * correct actions like it is done in target_send_reply_msg().
+ * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
+ * to put same checks into handlers like mdt_close(), mdt_reint(),
+ * etc., without talking to mdt authors first. Checking same thing
+ * there again is useless and returning 0 error wihtout packing reply
+ * is buggy! Handlers either pack reply or return error.
+ *
+ * We return 0 here and do not send any reply in order to emulate
+ * network failure. Do not send any reply in case any of NET related
+ * fail_id has occured.
*/
- if (h->mh_fail_id != 0) {
- /*
- * Set to info->mti_fail_id to handler fail_id, it will be used
- * later, and better than use default fail_id.
- */
- if (OBD_FAIL_CHECK_RESET(h->mh_fail_id && OBD_FAIL_MASK_LOC,
- h->mh_fail_id & ~OBD_FAILED)) {
- info->mti_fail_id = h->mh_fail_id;
- RETURN(0);
- }
- }
+ if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
+ RETURN(0);
rc = 0;
flags = h->mh_flags;
* only
*/
rc = h->mh_act(info);
+ if (rc == 0 &&
+ !req->rq_no_reply && req->rq_reply_state == NULL) {
+ DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
+ "pack reply and returned 0 error\n",
+ h->mh_name);
+ LBUG();
+ }
serious = is_serious(rc);
rc = clear_serious(rc);
} else
LBUG();
}
- RETURN(rc);
+ target_send_reply(req, rc, info->mti_fail_id);
+ RETURN(0);
}
void mdt_lock_handle_init(struct mdt_lock_handle *lh)
info->mti_env = NULL;
}
-/* mds/handler.c */
-extern int mds_filter_recovery_request(struct ptlrpc_request *req,
- struct obd_device *obd, int *process);
+static int mdt_filter_recovery_request(struct ptlrpc_request *req,
+ struct obd_device *obd, int *process)
+{
+ switch (lustre_msg_get_opc(req->rq_reqmsg)) {
+ case MDS_CONNECT: /* This will never get here, but for completeness. */
+ case OST_CONNECT: /* This will never get here, but for completeness. */
+ case MDS_DISCONNECT:
+ case OST_DISCONNECT:
+ *process = 1;
+ RETURN(0);
+
+ case MDS_CLOSE:
+ case MDS_DONE_WRITING:
+ case MDS_SYNC: /* used in unmounting */
+ case OBD_PING:
+ case MDS_REINT:
+ case SEQ_QUERY:
+ case FLD_QUERY:
+ case LDLM_ENQUEUE:
+ *process = target_queue_recovery_request(req, obd);
+ RETURN(0);
+
+ default:
+ DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
+ *process = -EAGAIN;
+ RETURN(0);
+ }
+}
+
/*
* Handle recovery. Return:
* +1: continue request processing;
int rc;
int should_process;
DEBUG_REQ(D_INFO, req, "Got new replay");
- rc = mds_filter_recovery_request(req, obd, &should_process);
+ rc = mdt_filter_recovery_request(req, obd, &should_process);
if (rc != 0 || !should_process)
RETURN(rc);
else if (should_process < 0) {
RETURN(+1);
}
-static int mdt_reply(struct ptlrpc_request *req, int rc,
- struct mdt_thread_info *info)
+static int mdt_msg_check_version(struct lustre_msg *msg)
{
- ENTRY;
+ int rc;
-#if 0
- if (req->rq_reply_state == NULL && rc == 0) {
- req->rq_status = rc;
- lustre_pack_reply(req, 1, NULL, NULL);
+ switch (lustre_msg_get_opc(msg)) {
+ case MDS_CONNECT:
+ case MDS_DISCONNECT:
+ case OBD_PING:
+ case SEC_CTX_INIT:
+ case SEC_CTX_INIT_CONT:
+ case SEC_CTX_FINI:
+ rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
+ if (rc)
+ CERROR("bad opc %u version %08x, expecting %08x\n",
+ lustre_msg_get_opc(msg),
+ lustre_msg_get_version(msg),
+ LUSTRE_OBD_VERSION);
+ break;
+ case MDS_GETSTATUS:
+ case MDS_GETATTR:
+ case MDS_GETATTR_NAME:
+ case MDS_STATFS:
+ case MDS_READPAGE:
+ case MDS_WRITEPAGE:
+ case MDS_IS_SUBDIR:
+ case MDS_REINT:
+ case MDS_CLOSE:
+ case MDS_DONE_WRITING:
+ case MDS_PIN:
+ case MDS_SYNC:
+ case MDS_GETXATTR:
+ case MDS_SETXATTR:
+ case MDS_SET_INFO:
+ case MDS_QUOTACHECK:
+ case MDS_QUOTACTL:
+ case QUOTA_DQACQ:
+ case QUOTA_DQREL:
+ case SEQ_QUERY:
+ case FLD_QUERY:
+ rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
+ if (rc)
+ CERROR("bad opc %u version %08x, expecting %08x\n",
+ lustre_msg_get_opc(msg),
+ lustre_msg_get_version(msg),
+ LUSTRE_MDS_VERSION);
+ break;
+ case LDLM_ENQUEUE:
+ case LDLM_CONVERT:
+ case LDLM_BL_CALLBACK:
+ case LDLM_CP_CALLBACK:
+ rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
+ if (rc)
+ CERROR("bad opc %u version %08x, expecting %08x\n",
+ lustre_msg_get_opc(msg),
+ lustre_msg_get_version(msg),
+ LUSTRE_DLM_VERSION);
+ break;
+ case OBD_LOG_CANCEL:
+ case LLOG_ORIGIN_HANDLE_CREATE:
+ case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
+ case LLOG_ORIGIN_HANDLE_READ_HEADER:
+ case LLOG_ORIGIN_HANDLE_CLOSE:
+ case LLOG_ORIGIN_HANDLE_DESTROY:
+ case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
+ case LLOG_CATINFO:
+ rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
+ if (rc)
+ CERROR("bad opc %u version %08x, expecting %08x\n",
+ lustre_msg_get_opc(msg),
+ lustre_msg_get_version(msg),
+ LUSTRE_LOG_VERSION);
+ break;
+ default:
+ CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
+ rc = -ENOTSUPP;
}
-#endif
- target_send_reply(req, rc, info->mti_fail_id);
- RETURN(0);
+ return rc;
}
-/* mds/handler.c */
-extern int mds_msg_check_version(struct lustre_msg *msg);
-
static int mdt_handle0(struct ptlrpc_request *req,
struct mdt_thread_info *info,
struct mdt_opc_slice *supported)
LASSERT(current->journal_info == NULL);
msg = req->rq_reqmsg;
- rc = mds_msg_check_version(msg);
+ rc = mdt_msg_check_version(msg);
if (likely(rc == 0)) {
rc = mdt_recovery(info);
if (likely(rc == +1)) {
supported);
if (likely(h != NULL)) {
rc = mdt_req_handle(info, h, req);
- rc = mdt_reply(req, rc, info);
} else {
CERROR("The unsupported opc: 0x%x\n", lustre_msg_get_opc(msg) );
req->rq_status = -ENOTSUPP;
* lock.
*/
if (new_lock == NULL)
- new_lock = ldlm_handle2lock(&lh->mlh_reg_lh);
+ new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
lh->mlh_reg_lh.cookie = 0;
RETURN(ELDLM_LOCK_REPLACED);
}
- /* This lock might already be given to the client by an resent req,
- * in this case we should return ELDLM_LOCK_ABORTED,
- * so we should check led_held_locks here, but it will affect
- * performance, FIXME
+ /*
+ * Fixup the lock to be given to the client.
*/
- /* Fixup the lock to be given to the client */
lock_res_and_lock(new_lock);
- new_lock->l_readers = 0;
- new_lock->l_writers = 0;
+ /* Zero new_lock->l_readers and new_lock->l_writers without triggering
+ * possible blocking AST. */
+ while (new_lock->l_readers > 0) {
+ lu_ref_del(&new_lock->l_reference, "reader", new_lock);
+ lu_ref_del(&new_lock->l_reference, "user", new_lock);
+ new_lock->l_readers--;
+ }
+ while (new_lock->l_writers > 0) {
+ lu_ref_del(&new_lock->l_reference, "writer", new_lock);
+ lu_ref_del(&new_lock->l_reference, "user", new_lock);
+ new_lock->l_writers--;
+ }
new_lock->l_export = class_export_get(req->rq_export);
- spin_lock(&req->rq_export->exp_ldlm_data.led_lock);
- list_add(&new_lock->l_export_chain,
- &new_lock->l_export->exp_ldlm_data.led_held_locks);
- spin_unlock(&req->rq_export->exp_ldlm_data.led_lock);
-
new_lock->l_blocking_ast = lock->l_blocking_ast;
new_lock->l_completion_ast = lock->l_completion_ast;
new_lock->l_remote_handle = lock->l_remote_handle;
new_lock->l_flags &= ~LDLM_FL_LOCAL;
+ lustre_hash_add(new_lock->l_export->exp_lock_hash,
+ &new_lock->l_remote_handle,
+ &new_lock->l_exp_hash);
+
unlock_res_and_lock(new_lock);
- LDLM_LOCK_PUT(new_lock);
+ LDLM_LOCK_RELEASE(new_lock);
lh->mlh_reg_lh.cookie = 0;
RETURN(ELDLM_LOCK_REPLACED);
struct obd_export *exp = req->rq_export;
struct lustre_handle remote_hdl;
struct ldlm_request *dlmreq;
- struct list_head *iter;
+ struct ldlm_lock *lock;
if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
return;
dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
remote_hdl = dlmreq->lock_handle[0];
- spin_lock(&exp->exp_ldlm_data.led_lock);
- list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
- struct ldlm_lock *lock;
- lock = list_entry(iter, struct ldlm_lock, l_export_chain);
- if (lock == new_lock)
- continue;
- if (lock->l_remote_handle.cookie == remote_hdl.cookie) {
+ lock = lustre_hash_lookup(exp->exp_lock_hash, &remote_hdl);
+ if (lock) {
+ if (lock != new_lock) {
lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
lh->mlh_reg_mode = lock->l_granted_mode;
- LDLM_DEBUG(lock, "restoring lock cookie");
+ LDLM_DEBUG(lock, "Restoring lock cookie");
DEBUG_REQ(D_DLMTRACE, req,
"restoring lock cookie "LPX64,
lh->mlh_reg_lh.cookie);
if (old_lock)
*old_lock = LDLM_LOCK_GET(lock);
- spin_unlock(&exp->exp_ldlm_data.led_lock);
+ lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
return;
}
+
+ lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
}
- spin_unlock(&exp->exp_ldlm_data.led_lock);
/*
* If the xid matches, then we know this is a resent request, and allow
rep->lock_policy_res2 = clear_serious(rc);
lhc->mlh_reg_lh.cookie = 0ull;
- rc = ELDLM_LOCK_ABORTED;
- RETURN(rc);
+ if (rc == -ENOTCONN || rc == -ENODEV) {
+ /*
+ * If it is the disconnect error (ENODEV & ENOCONN), the error
+ * will be returned by rq_status, and client at ptlrpc layer
+ * will detect this, then disconnect, reconnect the import
+ * immediately, instead of impacting the following the rpc.
+ */
+ RETURN(rc);
+ } else {
+ /*
+ * For other cases, the error will be returned by intent.
+ * and client will retrieve the result from intent.
+ */
+ /*
+ * FIXME: when open lock is finished, that should be
+ * checked here.
+ */
+ RETURN(ELDLM_LOCK_ABORTED);
+ }
}
static int mdt_intent_code(long itcode)
if (it != NULL) {
const struct ldlm_request *dlmreq;
__u64 req_bits;
-#if 0
- struct ldlm_lock *lock = *lockp;
-
- LDLM_DEBUG(lock, "intent policy opc: %s\n",
- ldlm_it2str(it->opc));
-#endif
rc = mdt_intent_opc(it->opc, info, lockp, flags);
if (rc == 0)
procfs_entry = m->mdt_md_dev.md_lu_dev.ld_obd->obd_proc_entry;
conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = MDS_MAXREQSIZE,
- .psc_max_reply_size = MDS_MAXREPSIZE,
- .psc_req_portal = MDS_REQUEST_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
+ .psc_nbufs = MDS_NBUFS,
+ .psc_bufsize = MDS_BUFSIZE,
+ .psc_max_req_size = MDS_MAXREQSIZE,
+ .psc_max_reply_size = MDS_MAXREPSIZE,
+ .psc_req_portal = MDS_REQUEST_PORTAL,
+ .psc_rep_portal = MDC_REPLY_PORTAL,
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
/*
* We'd like to have a mechanism to set this on a per-device
* basis, but alas...
*/
- .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
- MDT_MAX_THREADS),
- .psc_max_threads = MDT_MAX_THREADS,
- .psc_ctx_tags = LCT_MD_THREAD
+ .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
+ MDT_MAX_THREADS),
+ .psc_max_threads = MDT_MAX_THREADS,
+ .psc_ctx_tags = LCT_MD_THREAD
};
m->mdt_ldlm_client = &m->mdt_md_dev.md_lu_dev.ld_obd->obd_ldlm_client;
m->mdt_regular_service =
ptlrpc_init_svc_conf(&conf, mdt_regular_handle, LUSTRE_MDT_NAME,
- procfs_entry, NULL, LUSTRE_MDT_NAME);
+ procfs_entry, target_print_req,
+ LUSTRE_MDT_NAME);
if (m->mdt_regular_service == NULL)
RETURN(-ENOMEM);
* ideally.
*/
conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = MDS_MAXREQSIZE,
- .psc_max_reply_size = MDS_MAXREPSIZE,
- .psc_req_portal = MDS_READPAGE_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
- .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
- MDT_MAX_THREADS),
- .psc_max_threads = MDT_MAX_THREADS,
- .psc_ctx_tags = LCT_MD_THREAD
+ .psc_nbufs = MDS_NBUFS,
+ .psc_bufsize = MDS_BUFSIZE,
+ .psc_max_req_size = MDS_MAXREQSIZE,
+ .psc_max_reply_size = MDS_MAXREPSIZE,
+ .psc_req_portal = MDS_READPAGE_PORTAL,
+ .psc_rep_portal = MDC_REPLY_PORTAL,
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
+ MDT_MAX_THREADS),
+ .psc_max_threads = MDT_MAX_THREADS,
+ .psc_ctx_tags = LCT_MD_THREAD
};
m->mdt_readpage_service =
ptlrpc_init_svc_conf(&conf, mdt_readpage_handle,
LUSTRE_MDT_NAME "_readpage",
- procfs_entry, NULL, "mdt_rdpg");
+ procfs_entry, target_print_req,"mdt_rdpg");
if (m->mdt_readpage_service == NULL) {
CERROR("failed to start readpage service\n");
* setattr service configuration.
*/
conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = MDS_MAXREQSIZE,
- .psc_max_reply_size = MDS_MAXREPSIZE,
- .psc_req_portal = MDS_SETATTR_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
+ .psc_nbufs = MDS_NBUFS,
+ .psc_bufsize = MDS_BUFSIZE,
+ .psc_max_req_size = MDS_MAXREQSIZE,
+ .psc_max_reply_size = MDS_MAXREPSIZE,
+ .psc_req_portal = MDS_SETATTR_PORTAL,
+ .psc_rep_portal = MDC_REPLY_PORTAL,
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
.psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
- MDT_MAX_THREADS),
- .psc_max_threads = MDT_MAX_THREADS,
- .psc_ctx_tags = LCT_MD_THREAD
+ MDT_MAX_THREADS),
+ .psc_max_threads = MDT_MAX_THREADS,
+ .psc_ctx_tags = LCT_MD_THREAD
};
m->mdt_setattr_service =
ptlrpc_init_svc_conf(&conf, mdt_regular_handle,
LUSTRE_MDT_NAME "_setattr",
- procfs_entry, NULL, "mdt_attr");
+ procfs_entry, target_print_req,"mdt_attr");
if (!m->mdt_setattr_service) {
CERROR("failed to start setattr service\n");
* sequence controller service configuration
*/
conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = SEQ_MAXREQSIZE,
- .psc_max_reply_size = SEQ_MAXREPSIZE,
- .psc_req_portal = SEQ_CONTROLLER_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
- .psc_min_threads = SEQ_NUM_THREADS,
- .psc_max_threads = SEQ_NUM_THREADS,
- .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
+ .psc_nbufs = MDS_NBUFS,
+ .psc_bufsize = MDS_BUFSIZE,
+ .psc_max_req_size = SEQ_MAXREQSIZE,
+ .psc_max_reply_size = SEQ_MAXREPSIZE,
+ .psc_req_portal = SEQ_CONTROLLER_PORTAL,
+ .psc_rep_portal = MDC_REPLY_PORTAL,
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_min_threads = SEQ_NUM_THREADS,
+ .psc_max_threads = SEQ_NUM_THREADS,
+ .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
};
m->mdt_mdsc_service =
ptlrpc_init_svc_conf(&conf, mdt_mdsc_handle,
LUSTRE_MDT_NAME"_mdsc",
- procfs_entry, NULL, "mdt_mdsc");
+ procfs_entry, target_print_req,"mdt_mdsc");
if (!m->mdt_mdsc_service) {
CERROR("failed to start seq controller service\n");
GOTO(err_mdt_svc, rc = -ENOMEM);
* metadata sequence server service configuration
*/
conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = SEQ_MAXREQSIZE,
- .psc_max_reply_size = SEQ_MAXREPSIZE,
- .psc_req_portal = SEQ_METADATA_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
- .psc_min_threads = SEQ_NUM_THREADS,
- .psc_max_threads = SEQ_NUM_THREADS,
- .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
+ .psc_nbufs = MDS_NBUFS,
+ .psc_bufsize = MDS_BUFSIZE,
+ .psc_max_req_size = SEQ_MAXREQSIZE,
+ .psc_max_reply_size = SEQ_MAXREPSIZE,
+ .psc_req_portal = SEQ_METADATA_PORTAL,
+ .psc_rep_portal = MDC_REPLY_PORTAL,
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_min_threads = SEQ_NUM_THREADS,
+ .psc_max_threads = SEQ_NUM_THREADS,
+ .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
};
m->mdt_mdss_service =
ptlrpc_init_svc_conf(&conf, mdt_mdss_handle,
LUSTRE_MDT_NAME"_mdss",
- procfs_entry, NULL, "mdt_mdss");
+ procfs_entry, target_print_req,"mdt_mdss");
if (!m->mdt_mdss_service) {
CERROR("failed to start metadata seq server service\n");
GOTO(err_mdt_svc, rc = -ENOMEM);
* controller which manages space.
*/
conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = SEQ_MAXREQSIZE,
- .psc_max_reply_size = SEQ_MAXREPSIZE,
- .psc_req_portal = SEQ_DATA_PORTAL,
- .psc_rep_portal = OSC_REPLY_PORTAL,
- .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
- .psc_min_threads = SEQ_NUM_THREADS,
- .psc_max_threads = SEQ_NUM_THREADS,
- .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
+ .psc_nbufs = MDS_NBUFS,
+ .psc_bufsize = MDS_BUFSIZE,
+ .psc_max_req_size = SEQ_MAXREQSIZE,
+ .psc_max_reply_size = SEQ_MAXREPSIZE,
+ .psc_req_portal = SEQ_DATA_PORTAL,
+ .psc_rep_portal = OSC_REPLY_PORTAL,
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_min_threads = SEQ_NUM_THREADS,
+ .psc_max_threads = SEQ_NUM_THREADS,
+ .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
};
m->mdt_dtss_service =
ptlrpc_init_svc_conf(&conf, mdt_dtss_handle,
LUSTRE_MDT_NAME"_dtss",
- procfs_entry, NULL, "mdt_dtss");
+ procfs_entry, target_print_req,"mdt_dtss");
if (!m->mdt_dtss_service) {
CERROR("failed to start data seq server service\n");
GOTO(err_mdt_svc, rc = -ENOMEM);
/* FLD service start */
conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = FLD_MAXREQSIZE,
- .psc_max_reply_size = FLD_MAXREPSIZE,
- .psc_req_portal = FLD_REQUEST_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
- .psc_min_threads = FLD_NUM_THREADS,
- .psc_max_threads = FLD_NUM_THREADS,
- .psc_ctx_tags = LCT_DT_THREAD|LCT_MD_THREAD
+ .psc_nbufs = MDS_NBUFS,
+ .psc_bufsize = MDS_BUFSIZE,
+ .psc_max_req_size = FLD_MAXREQSIZE,
+ .psc_max_reply_size = FLD_MAXREPSIZE,
+ .psc_req_portal = FLD_REQUEST_PORTAL,
+ .psc_rep_portal = MDC_REPLY_PORTAL,
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_min_threads = FLD_NUM_THREADS,
+ .psc_max_threads = FLD_NUM_THREADS,
+ .psc_ctx_tags = LCT_DT_THREAD|LCT_MD_THREAD
};
m->mdt_fld_service =
ptlrpc_init_svc_conf(&conf, mdt_fld_handle,
LUSTRE_MDT_NAME"_fld",
- procfs_entry, NULL, "mdt_fld");
+ procfs_entry, target_print_req, "mdt_fld");
if (!m->mdt_fld_service) {
CERROR("failed to start fld service\n");
GOTO(err_mdt_svc, rc = -ENOMEM);
* mds-mds requests be not blocked during recovery.
*/
conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = MDS_MAXREQSIZE,
- .psc_max_reply_size = MDS_MAXREPSIZE,
- .psc_req_portal = MDS_MDS_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
- .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
- MDT_MAX_THREADS),
- .psc_max_threads = MDT_MAX_THREADS,
- .psc_ctx_tags = LCT_MD_THREAD
+ .psc_nbufs = MDS_NBUFS,
+ .psc_bufsize = MDS_BUFSIZE,
+ .psc_max_req_size = MDS_MAXREQSIZE,
+ .psc_max_reply_size = MDS_MAXREPSIZE,
+ .psc_req_portal = MDS_MDS_PORTAL,
+ .psc_rep_portal = MDC_REPLY_PORTAL,
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
+ MDT_MAX_THREADS),
+ .psc_max_threads = MDT_MAX_THREADS,
+ .psc_ctx_tags = LCT_MD_THREAD
};
- m->mdt_xmds_service = ptlrpc_init_svc_conf(&conf, mdt_xmds_handle,
- LUSTRE_MDT_NAME "_mds",
- procfs_entry, NULL, "mdt_xmds");
+ m->mdt_xmds_service =
+ ptlrpc_init_svc_conf(&conf, mdt_xmds_handle,
+ LUSTRE_MDT_NAME "_mds",
+ procfs_entry, target_print_req,"mdt_xmds");
if (m->mdt_xmds_service == NULL) {
CERROR("failed to start readpage service\n");
m->mdt_bottom = NULL;
}
-static struct lu_device *mdt_layer_setup(const struct lu_env *env,
+static struct lu_device *mdt_layer_setup(struct lu_env *env,
const char *typename,
struct lu_device *child,
struct lustre_cfg *cfg)
return ERR_PTR(rc);
}
-static int mdt_stack_init(const struct lu_env *env,
+static int mdt_stack_init(struct lu_env *env,
struct mdt_device *m, struct lustre_cfg *cfg)
{
struct lu_device *d = &m->mdt_md_dev.md_lu_dev;
m->mdt_identity_cache = NULL;
if (m->mdt_namespace != NULL) {
- ldlm_namespace_free(m->mdt_namespace, d->ld_obd->obd_force);
+ ldlm_namespace_free(m->mdt_namespace, NULL, d->ld_obd->obd_force);
d->ld_obd->obd_namespace = m->mdt_namespace = NULL;
}
sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset);
next->md_ops->mdo_init_capa_ctxt(env, next, 0, 0, 0, NULL);
- del_timer(&m->mdt_ck_timer);
+ cfs_timer_disarm(&m->mdt_ck_timer);
mdt_ck_thread_stop(m);
/* finish the stack */
}
/* init the stack */
- rc = mdt_stack_init(env, m, cfg);
+ rc = mdt_stack_init((struct lu_env *)env, m, cfg);
if (rc) {
CERROR("Can't init device stack, rc %d\n", rc);
GOTO(err_fini_proc, rc);
snprintf(info->mti_u.ns_name, sizeof info->mti_u.ns_name,
LUSTRE_MDT_NAME"-%p", m);
- m->mdt_namespace = ldlm_namespace_new(info->mti_u.ns_name,
+ m->mdt_namespace = ldlm_namespace_new(obd, info->mti_u.ns_name,
LDLM_NAMESPACE_SERVER,
LDLM_NAMESPACE_GREEDY);
if (m->mdt_namespace == NULL)
GOTO(err_free_ns, rc);
}
- m->mdt_ck_timer.function = mdt_ck_timer_callback;
- m->mdt_ck_timer.data = (unsigned long)m;
- init_timer(&m->mdt_ck_timer);
+ cfs_timer_init(&m->mdt_ck_timer, mdt_ck_timer_callback, m);
+
rc = mdt_ck_thread_start(m);
if (rc)
GOTO(err_free_ns, rc);
mdt_init_capa_ctxt(env, m);
+ /* Reduce the initial timeout on an MDS because it doesn't need such
+ * a long timeout as an OST does. Adaptive timeouts will adjust this
+ * value appropriately. */
if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT)
- ldlm_timeout = 6;
+ ldlm_timeout = MDS_LDLM_TIMEOUT_DEFAULT;
RETURN(0);
target_recovery_fini(obd);
mdt_fs_cleanup(env, m);
err_capa:
- del_timer(&m->mdt_ck_timer);
+ cfs_timer_disarm(&m->mdt_ck_timer);
mdt_ck_thread_stop(m);
err_free_ns:
upcall_cache_cleanup(m->mdt_identity_cache);
m->mdt_identity_cache = NULL;
- ldlm_namespace_free(m->mdt_namespace, 0);
+ ldlm_namespace_free(m->mdt_namespace, NULL, 0);
obd->obd_namespace = m->mdt_namespace = NULL;
err_fini_seq:
mdt_seq_fini(env, m);
mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
err_fini_proc:
mdt_procfs_fini(m);
+ ptlrpc_lprocfs_unregister_obd(obd);
lprocfs_obd_cleanup(obd);
err_fini_site:
lu_site_fini(s);
struct obd_device *obd = d->ld_obd;
lprocfs_mdt_init_vars(&lvars);
- rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, cfg, obd);
- if (rc)
- /* others are passed further */
+ rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars,
+ cfg, obd);
+ if (rc == -ENOSYS)
+ /* we don't understand; pass it on */
rc = next->ld_ops->ldo_process_config(env, next, cfg);
break;
}
RETURN(NULL);
}
-static int mdt_object_init(const struct lu_env *env, struct lu_object *o)
+static int mdt_object_init(const struct lu_env *env, struct lu_object *o,
+ const struct lu_object_conf *_)
{
struct mdt_device *d = mdt_dev(o->lo_dev);
struct lu_device *under;
void *localdata)
{
struct mdt_thread_info *info;
- struct mdt_client_data *mcd;
+ struct lsd_client_data *lcd;
struct obd_export *exp;
struct mdt_device *mdt;
struct ptlrpc_request *req;
rc = mdt_connect_internal(exp, mdt, data);
if (rc == 0) {
- OBD_ALLOC_PTR(mcd);
- if (mcd != NULL) {
+ OBD_ALLOC_PTR(lcd);
+ if (lcd != NULL) {
struct mdt_thread_info *mti;
mti = lu_context_key_get(&env->le_ctx,
&mdt_thread_key);
LASSERT(mti != NULL);
mti->mti_exp = exp;
- memcpy(mcd->mcd_uuid, cluuid, sizeof mcd->mcd_uuid);
- exp->exp_mdt_data.med_mcd = mcd;
+ memcpy(lcd->lcd_uuid, cluuid, sizeof lcd->lcd_uuid);
+ exp->exp_mdt_data.med_lcd = lcd;
rc = mdt_client_new(env, mdt);
if (rc != 0) {
- OBD_FREE_PTR(mcd);
- exp->exp_mdt_data.med_mcd = NULL;
+ OBD_FREE_PTR(lcd);
+ exp->exp_mdt_data.med_lcd = NULL;
}
} else
rc = -ENOMEM;
if (mdt->mdt_namespace != NULL || exp->exp_obd->obd_namespace != NULL)
ldlm_cancel_locks_for_export(exp);
+ /* release nid stat refererence */
+ lprocfs_exp_cleanup(exp);
+
/* complete all outstanding replies */
spin_lock(&exp->exp_lock);
while (!list_empty(&exp->exp_outstanding_replies)) {
static int mdt_init_export(struct obd_export *exp)
{
struct mdt_export_data *med = &exp->exp_mdt_data;
+ int rc;
ENTRY;
CFS_INIT_LIST_HEAD(&med->med_open_head);
spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
spin_unlock(&exp->exp_lock);
- RETURN(0);
+ rc = ldlm_init_export(exp);
+ if (rc)
+ CERROR("Error %d while initializing export\n", rc);
+ RETURN(rc);
}
static int mdt_destroy_export(struct obd_export *export)
mdt_cleanup_idmap(med);
target_destroy_export(export);
+ ldlm_destroy_export(export);
if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
RETURN(0);
switch (ev) {
case OBD_NOTIFY_CONFIG:
- mdt_allow_cli(mdt_dev(host->obd_lu_dev), (unsigned int)data);
+ mdt_allow_cli(mdt_dev(host->obd_lu_dev), (unsigned long)data);
break;
default:
CDEBUG(D_INFO, "Unhandled notification %#x\n", ev);
switch (cmd) {
case OBD_IOC_SYNC:
- rc = dt->dd_ops->dt_sync(&env, dt);
+ rc = mdt_device_sync(&env, mdt);
break;
case OBD_IOC_SET_READONLY:
rc = dt->dd_ops->dt_sync(&env, dt);
}
};
-MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Meta-data Target ("LUSTRE_MDT_NAME")");
MODULE_LICENSE("GPL");