-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <lustre_disk.h>
#include <lustre_param.h>
-static int (*client_fill_super)(struct super_block *sb) = NULL;
+static int (*client_fill_super)(struct super_block *sb,
+ struct vfsmount *mnt) = NULL;
static void (*kill_super_cb)(struct super_block *sb) = NULL;
/*********** mount lookup *********/
-DECLARE_MUTEX(lustre_mount_info_lock);
+CFS_DEFINE_MUTEX(lustre_mount_info_lock);
static CFS_LIST_HEAD(server_mount_info_list);
static struct lustre_mount_info *server_find_mount(const char *name)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct lustre_mount_info *lmi;
ENTRY;
- list_for_each(tmp, &server_mount_info_list) {
- lmi = list_entry(tmp, struct lustre_mount_info, lmi_list_chain);
+ cfs_list_for_each(tmp, &server_mount_info_list) {
+ lmi = cfs_list_entry(tmp, struct lustre_mount_info,
+ lmi_list_chain);
if (strcmp(name, lmi->lmi_name) == 0)
RETURN(lmi);
}
}
strcpy(name_cp, name);
- down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
if (server_find_mount(name)) {
- up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
OBD_FREE(lmi, sizeof(*lmi));
OBD_FREE(name_cp, strlen(name) + 1);
CERROR("Already registered %s\n", name);
lmi->lmi_name = name_cp;
lmi->lmi_sb = sb;
lmi->lmi_mnt = mnt;
- list_add(&lmi->lmi_list_chain, &server_mount_info_list);
+ cfs_list_add(&lmi->lmi_list_chain, &server_mount_info_list);
- up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
CDEBUG(D_MOUNT, "reg_mnt %p from %s, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, mnt_get_count(lmi->lmi_mnt));
RETURN(0);
}
struct lustre_mount_info *lmi;
ENTRY;
- down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
if (!lmi) {
- up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
CERROR("%s not registered\n", name);
RETURN(-ENOENT);
}
CDEBUG(D_MOUNT, "dereg_mnt %p from %s, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, mnt_get_count(lmi->lmi_mnt));
OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1);
- list_del(&lmi->lmi_list_chain);
+ cfs_list_del(&lmi->lmi_list_chain);
OBD_FREE(lmi, sizeof(*lmi));
- up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
RETURN(0);
}
struct lustre_sb_info *lsi;
ENTRY;
- down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(NULL);
}
lsi = s2lsi(lmi->lmi_sb);
mntget(lmi->lmi_mnt);
- atomic_inc(&lsi->lsi_mounts);
+ cfs_atomic_inc(&lsi->lsi_mounts);
CDEBUG(D_MOUNT, "get_mnt %p from %s, refs=%d, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lsi->lsi_mounts),
- atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, cfs_atomic_read(&lsi->lsi_mounts),
+ mnt_get_count(lmi->lmi_mnt));
RETURN(lmi);
}
struct lustre_mount_info *lmi;
ENTRY;
- down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
if (!lmi)
CERROR("Can't find mount for %s\n", name);
static void unlock_mntput(struct vfsmount *mnt)
{
- if (kernel_locked()) {
- unlock_kernel();
- mntput(mnt);
- lock_kernel();
- } else {
- mntput(mnt);
- }
+#ifdef HAVE_KERNEL_LOCKED
+ /* for kernel < 2.6.37 */
+ if (kernel_locked()) {
+ unlock_kernel();
+ mntput(mnt);
+ lock_kernel();
+ } else {
+ mntput(mnt);
+ }
+#else
+ mntput(mnt);
+#endif
}
static int lustre_put_lsi(struct super_block *sb);
{
struct lustre_mount_info *lmi;
struct lustre_sb_info *lsi;
- int count = atomic_read(&mnt->mnt_count) - 1;
+ int count = mnt_get_count(mnt) - 1;
ENTRY;
/* This might be the last one, can't deref after this */
unlock_mntput(mnt);
- down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(-ENOENT);
LASSERT(lmi->lmi_mnt == mnt);
CDEBUG(D_MOUNT, "put_mnt %p from %s, refs=%d, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lsi->lsi_mounts), count);
+ lmi->lmi_mnt, name, cfs_atomic_read(&lsi->lsi_mounts), count);
if (lustre_put_lsi(lmi->lmi_sb)) {
CDEBUG(D_MOUNT, "Last put of mnt %p from %s, vfscount=%d\n",
#endif
static int ldd_parse(struct lvfs_run_ctxt *mount_ctxt,
- struct lustre_disk_data *ldd)
+ struct lustre_disk_data *ldd)
{
struct lvfs_run_ctxt saved;
struct file *file;
len = i_size_read(file->f_dentry->d_inode);
CDEBUG(D_MOUNT, "Have %s, size %lu\n", MOUNT_DATA_FILE, len);
if (len != sizeof(*ldd)) {
- CERROR("disk data size does not match: see %lu expect "LPSZ"\n",
- len, sizeof(*ldd));
+ CERROR("disk data size does not match: see %lu expect %u\n",
+ len, (int)sizeof(*ldd));
GOTO(out_close, rc = -EINVAL);
}
push_ctxt(&saved, mount_ctxt, NULL);
- file = filp_open(MOUNT_DATA_FILE, O_RDWR, 0644);
+ file = filp_open(MOUNT_DATA_FILE, O_RDWR|O_SYNC, 0644);
if (IS_ERR(file)) {
rc = PTR_ERR(file);
CERROR("cannot open %s: rc = %d\n", MOUNT_DATA_FILE, rc);
/**************** config llog ********************/
-/* Get a config log from the MGS and process it.
- This func is called for both clients and servers.
- Continue to process new statements appended to the logs
- (whenever the config lock is revoked) until lustre_end_log
- is called. */
+/** Get a config log from the MGS and process it.
+ * This func is called for both clients and servers.
+ * Continue to process new statements appended to the logs
+ * (whenever the config lock is revoked) until lustre_end_log
+ * is called.
+ * @param sb The superblock is used by the MGC to write to the local copy of
+ * the config log
+ * @param logname The name of the llog to replicate from the MGS
+ * @param cfg Since the same mgc may be used to follow multiple config logs
+ * (e.g. ost1, ost2, client), the config_llog_instance keeps the state for
+ * this log, and is added to the mgc's list of logs to follow.
+ */
int lustre_process_log(struct super_block *sb, char *logname,
struct config_llog_instance *cfg)
{
struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs bufs;
+ struct lustre_cfg_bufs *bufs;
struct lustre_sb_info *lsi = s2lsi(sb);
struct obd_device *mgc = lsi->lsi_mgc;
int rc;
LASSERT(mgc);
LASSERT(cfg);
+ OBD_ALLOC_PTR(bufs);
+ if (bufs == NULL)
+ RETURN(-ENOMEM);
+
/* mgc_process_config */
- lustre_cfg_bufs_reset(&bufs, mgc->obd_name);
- lustre_cfg_bufs_set_string(&bufs, 1, logname);
- lustre_cfg_bufs_set(&bufs, 2, cfg, sizeof(*cfg));
- lustre_cfg_bufs_set(&bufs, 3, &sb, sizeof(sb));
- lcfg = lustre_cfg_new(LCFG_LOG_START, &bufs);
+ lustre_cfg_bufs_reset(bufs, mgc->obd_name);
+ lustre_cfg_bufs_set_string(bufs, 1, logname);
+ lustre_cfg_bufs_set(bufs, 2, cfg, sizeof(*cfg));
+ lustre_cfg_bufs_set(bufs, 3, &sb, sizeof(sb));
+ lcfg = lustre_cfg_new(LCFG_LOG_START, bufs);
rc = obd_process_config(mgc, sizeof(*lcfg), lcfg);
lustre_cfg_free(lcfg);
+ OBD_FREE_PTR(bufs);
+
if (rc == -EINVAL)
LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s'"
"failed from the MGS (%d). Make sure this "
/**************** obd start *******************/
+/** lustre_cfg_bufs are a holdover from 1.4; we can still set these up from
+ * lctl (and do for echo cli/srv.
+ */
int do_lcfg(char *cfgname, lnet_nid_t nid, int cmd,
char *s1, char *s2, char *s3, char *s4)
{
return(rc);
}
+/** Call class_attach and class_setup. These methods in turn call
+ * obd type-specific methods.
+ */
static int lustre_start_simple(char *obdname, char *type, char *uuid,
char *s1, char *s2)
{
RETURN(rc);
}
-DECLARE_MUTEX(mgc_start_lock);
+CFS_DEFINE_MUTEX(mgc_start_lock);
/** Set up a mgc obd to process startup logs
*
struct obd_uuid *uuid;
class_uuid_t uuidc;
lnet_nid_t nid;
- char *mgcname, *niduuid, *mgssec;
+ char *mgcname = NULL, *niduuid = NULL, *mgssec = NULL;
char *ptr;
int recov_bk;
int rc = 0, i = 0, j, len;
RETURN(-EINVAL);
}
+ cfs_mutex_lock(&mgc_start_lock);
+
len = strlen(LUSTRE_MGC_OBDNAME) + strlen(libcfs_nid2str(nid)) + 1;
OBD_ALLOC(mgcname, len);
OBD_ALLOC(niduuid, len + 2);
mgssec = lsi->lsi_lmd->lmd_mgssec ? lsi->lsi_lmd->lmd_mgssec : "";
- mutex_down(&mgc_start_lock);
+ OBD_ALLOC_PTR(data);
+ if (data == NULL)
+ GOTO(out_free, rc = -ENOMEM);
obd = class_name2obd(mgcname);
if (obd && !obd->obd_stopping) {
- rc = obd_set_info_async(obd->obd_self_export,
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
strlen(KEY_MGSSEC), KEY_MGSSEC,
strlen(mgssec), mgssec, NULL);
if (rc)
GOTO(out_free, rc);
/* Re-using an existing MGC */
- atomic_inc(&obd->u.cli.cl_mgc_refcount);
+ cfs_atomic_inc(&obd->u.cli.cl_mgc_refcount);
+
+ /* IR compatibility check, only for clients */
+ if (lmd_is_client(lsi->lsi_lmd)) {
+ int has_ir;
+ int vallen = sizeof(*data);
+ __u32 *flags = &lsi->lsi_lmd->lmd_flags;
+
+ rc = obd_get_info(NULL, obd->obd_self_export,
+ strlen(KEY_CONN_DATA), KEY_CONN_DATA,
+ &vallen, data, NULL);
+ LASSERT(rc == 0);
+ has_ir = OCD_HAS_FLAG(data, IMP_RECOV);
+ if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
+ /* LMD_FLG_NOIR is for test purpose only */
+ LCONSOLE_WARN(
+ "Trying to mount a client with IR setting "
+ "not compatible with current mgc. "
+ "Force to use current mgc setting that is "
+ "IR %s.\n",
+ has_ir ? "enabled" : "disabled");
+ if (has_ir)
+ *flags &= ~LMD_FLG_NOIR;
+ else
+ *flags |= LMD_FLG_NOIR;
+ }
+ }
recov_bk = 0;
/* If we are restarting the MGS, don't try to keep the MGC's
if at all possible. */
recov_bk++;
CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname,recov_bk);
- rc = obd_set_info_async(obd->obd_self_export,
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
sizeof(KEY_INIT_RECOV_BACKUP),
KEY_INIT_RECOV_BACKUP,
sizeof(recov_bk), &recov_bk, NULL);
GOTO(out_free, rc = -ENOTCONN);
}
- rc = obd_set_info_async(obd->obd_self_export,
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
strlen(KEY_MGSSEC), KEY_MGSSEC,
strlen(mgssec), mgssec, NULL);
if (rc)
/* Keep a refcount of servers/clients who started with "mount",
so we know when we can get rid of the mgc. */
- atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
+ cfs_atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
/* Try all connections, but only once. */
recov_bk = 1;
- rc = obd_set_info_async(obd->obd_self_export,
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
sizeof(KEY_INIT_RECOV_BACKUP),
KEY_INIT_RECOV_BACKUP,
sizeof(recov_bk), &recov_bk, NULL);
/* nonfatal */
CWARN("can't set %s %d\n", KEY_INIT_RECOV_BACKUP, rc);
/* We connect to the MGS at setup, and don't disconnect until cleanup */
- OBD_ALLOC_PTR(data);
- if (data == NULL)
- GOTO(out, rc = -ENOMEM);
data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_FID |
- OBD_CONNECT_AT;
+ OBD_CONNECT_AT | OBD_CONNECT_FULL20 |
+ OBD_CONNECT_IMP_RECOV;
+ if (lmd_is_client(lsi->lsi_lmd) &&
+ lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)
+ data->ocd_connect_flags &= ~OBD_CONNECT_IMP_RECOV;
data->ocd_version = LUSTRE_VERSION_CODE;
rc = obd_connect(NULL, &exp, obd, &(obd->obd_uuid), data, NULL);
- OBD_FREE_PTR(data);
if (rc) {
CERROR("connect failed %d\n", rc);
GOTO(out, rc);
to the same mgc.*/
lsi->lsi_mgc = obd;
out_free:
- mutex_up(&mgc_start_lock);
+ cfs_mutex_unlock(&mgc_start_lock);
+ if (data)
+ OBD_FREE_PTR(data);
if (mgcname)
OBD_FREE(mgcname, len);
if (niduuid)
RETURN(-ENOENT);
lsi->lsi_mgc = NULL;
- mutex_down(&mgc_start_lock);
- LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
- if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
+ cfs_mutex_lock(&mgc_start_lock);
+ LASSERT(cfs_atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
+ if (!cfs_atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
/* This is not fatal, every client that stops
will call in here. */
CDEBUG(D_MOUNT, "mgc still has %d references.\n",
- atomic_read(&obd->u.cli.cl_mgc_refcount));
+ cfs_atomic_read(&obd->u.cli.cl_mgc_refcount));
GOTO(out, rc = -EBUSY);
}
OBD_FREE(niduuid, len);
/* class_import_put will get rid of the additional connections */
- mutex_up(&mgc_start_lock);
+ cfs_mutex_unlock(&mgc_start_lock);
RETURN(rc);
}
CDEBUG(D_MOUNT, "Set mgc disk for %s\n", lsi->lsi_lmd->lmd_dev);
/* cl_mgc_sem in mgc insures we sleep if the mgc_fs is busy */
- rc = obd_set_info_async(mgc->obd_self_export,
+ rc = obd_set_info_async(NULL, mgc->obd_self_export,
sizeof(KEY_SET_FS), KEY_SET_FS,
sizeof(*sb), sb, NULL);
if (rc) {
CDEBUG(D_MOUNT, "Unassign mgc disk\n");
- rc = obd_set_info_async(mgc->obd_self_export,
+ rc = obd_set_info_async(NULL, mgc->obd_self_export,
sizeof(KEY_CLEAR_FS), KEY_CLEAR_FS,
0, NULL, NULL);
RETURN(rc);
}
-DECLARE_MUTEX(server_start_lock);
+CFS_DEFINE_MUTEX(server_start_lock);
/* Stop MDS/OSS if nobody is using them */
static int server_stop_servers(int lddflags, int lsiflags)
int rc = 0;
ENTRY;
- mutex_down(&server_start_lock);
+ cfs_mutex_lock(&server_start_lock);
/* Either an MDT or an OST or neither */
/* if this was an MDT, and there are no more MDT's, clean up the MDS */
rc = err;
}
- mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
RETURN(rc);
}
struct lustre_sb_info *lsi = s2lsi(sb);
struct lustre_disk_data *ldd = lsi->lsi_ldd;
lnet_process_id_t id;
- int i = 0;
+ int i = 0;
ENTRY;
if (!(lsi->lsi_flags & LSI_SERVER))
while (LNetGetId(i++, &id) != -ENOENT) {
if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND)
continue;
+
+ /* server use --servicenode param, only allow specified
+ * nids be registered */
+ if ((ldd->ldd_flags & LDD_F_NO_PRIMNODE) != 0 &&
+ class_match_nid(ldd->ldd_params,
+ PARAM_FAILNODE, id.nid) < 1)
+ continue;
+
+ /* match specified network */
+ if (!class_match_net(ldd->ldd_params,
+ PARAM_NETWORK, LNET_NIDNET(id.nid)))
+ continue;
+
mti->mti_nids[mti->mti_nid_count] = id.nid;
mti->mti_nid_count++;
if (mti->mti_nid_count >= MTI_NIDS_MAX) {
mti->mti_lustre_ver = LUSTRE_VERSION_CODE;
mti->mti_config_ver = 0;
+ if (lsi->lsi_lmd->lmd_flags & LMD_FLG_WRITECONF)
+ ldd->ldd_flags |= LDD_F_WRITECONF;
mti->mti_flags = ldd->ldd_flags;
mti->mti_stripe_index = ldd->ldd_svindex;
memcpy(mti->mti_uuid, ldd->ldd_uuid, sizeof(mti->mti_uuid));
struct obd_device *mgc = lsi->lsi_mgc;
struct lustre_disk_data *ldd = lsi->lsi_ldd;
struct mgs_target_info *mti = NULL;
+ bool writeconf;
int rc;
ENTRY;
libcfs_nid2str(mti->mti_nids[0]), mti->mti_stripe_index,
mti->mti_flags);
+ /* if write_conf is true, the registration must succeed */
+ writeconf = !!(ldd->ldd_flags & (LDD_F_NEED_INDEX | LDD_F_UPDATE));
+ mti->mti_flags |= LDD_F_OPC_REG;
+
/* Register the target */
/* FIXME use mgc_process_config instead */
- rc = obd_set_info_async(mgc->u.cli.cl_mgc_mgsexp,
+ rc = obd_set_info_async(NULL, mgc->u.cli.cl_mgc_mgsexp,
sizeof(KEY_REGISTER_TARGET), KEY_REGISTER_TARGET,
sizeof(*mti), mti, NULL);
- if (rc)
+ if (rc) {
+ if (mti->mti_flags & LDD_F_ERROR) {
+ LCONSOLE_ERROR_MSG(0x160,
+ "The MGS is refusing to allow this "
+ "server (%s) to start. Please see messages"
+ " on the MGS node.\n", ldd->ldd_svname);
+ } else if (writeconf) {
+ LCONSOLE_ERROR_MSG(0x15f,
+ "Communication to the MGS return error %d. "
+ "Is the MGS running?\n", rc);
+ } else {
+ CERROR("Cannot talk to the MGS: %d, not fatal\n", rc);
+ /* reset the error code for non-fatal error. */
+ rc = 0;
+ }
GOTO(out, rc);
+ }
/* Always update our flags */
- ldd->ldd_flags = mti->mti_flags & ~LDD_F_REWRITE_LDD;
+ ldd->ldd_flags = mti->mti_flags & LDD_F_ONDISK_MASK;
/* If this flag is set, it means the MGS wants us to change our
on-disk data. (So far this means just the index.) */
RETURN(rc);
}
-/* Start targets */
+/**
+ * Notify the MGS that this target is ready.
+ * Used by IR - if the MGS receives this message, it will notify clients.
+ */
+static int server_notify_target(struct super_block *sb, struct obd_device *obd)
+{
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct obd_device *mgc = lsi->lsi_mgc;
+ struct mgs_target_info *mti = NULL;
+ int rc;
+ ENTRY;
+
+ LASSERT(mgc);
+
+ if (!(lsi->lsi_flags & LSI_SERVER))
+ RETURN(-EINVAL);
+
+ OBD_ALLOC_PTR(mti);
+ if (!mti)
+ RETURN(-ENOMEM);
+ rc = server_sb2mti(sb, mti);
+ if (rc)
+ GOTO(out, rc);
+
+ mti->mti_instance = obd->u.obt.obt_instance;
+ mti->mti_flags |= LDD_F_OPC_READY;
+
+ /* FIXME use mgc_process_config instead */
+ rc = obd_set_info_async(NULL, mgc->u.cli.cl_mgc_mgsexp,
+ sizeof(KEY_REGISTER_TARGET),
+ KEY_REGISTER_TARGET,
+ sizeof(*mti), mti, NULL);
+
+ /* Imperative recovery: if the mgs informs us to use IR? */
+ if (!rc && !(mti->mti_flags & LDD_F_ERROR) &&
+ (mti->mti_flags & LDD_F_IR_CAPABLE))
+ lsi->lsi_flags |= LSI_IR_CAPABLE;
+
+out:
+ if (mti)
+ OBD_FREE_PTR(mti);
+ RETURN(rc);
+
+}
+
+/** Start server targets: MDTs and OSTs
+ */
static int server_start_targets(struct super_block *sb, struct vfsmount *mnt)
{
struct obd_device *obd;
/* If we're an MDT, make sure the global MDS is running */
if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_MDT) {
/* make sure the MDS is started */
- mutex_down(&server_start_lock);
+ cfs_mutex_lock(&server_start_lock);
obd = class_name2obd(LUSTRE_MDS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_MDS_OBDNAME,
LUSTRE_MDS_OBDNAME"_uuid",
0, 0);
if (rc) {
- mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
CERROR("failed to start MDS: %d\n", rc);
RETURN(rc);
}
}
- mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
}
#endif
/* If we're an OST, make sure the global OSS is running */
- if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_OST) {
+ if (IS_OST(lsi->lsi_ldd)) {
/* make sure OSS is started */
- mutex_down(&server_start_lock);
+ cfs_mutex_lock(&server_start_lock);
obd = class_name2obd(LUSTRE_OSS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_OSS_OBDNAME,
LUSTRE_OSS_OBDNAME"_uuid",
0, 0);
if (rc) {
- mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
CERROR("failed to start OSS: %d\n", rc);
RETURN(rc);
}
}
- mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
}
- /* Set the mgc fs to our server disk. This allows the MGC
- to read and write configs locally. */
+ /* Set the mgc fs to our server disk. This allows the MGC to
+ * read and write configs locally, in case it can't talk to the MGS. */
rc = server_mgc_set_fs(lsi->lsi_mgc, sb);
if (rc)
RETURN(rc);
/* Register with MGS */
rc = server_register_target(sb);
- if (rc && (lsi->lsi_ldd->ldd_flags &
- (LDD_F_NEED_INDEX | LDD_F_UPDATE | LDD_F_UPGRADE14))){
- CERROR("Required registration failed for %s: %d\n",
- lsi->lsi_ldd->ldd_svname, rc);
- if (rc == -EIO) {
- LCONSOLE_ERROR_MSG(0x15f, "Communication error with "
- "the MGS. Is the MGS running?\n");
- }
- GOTO(out_mgc, rc);
- }
- if (rc == -EINVAL) {
- LCONSOLE_ERROR_MSG(0x160, "The MGS is refusing to allow this "
- "server (%s) to start. Please see messages"
- " on the MGS node.\n",
- lsi->lsi_ldd->ldd_svname);
- GOTO(out_mgc, rc);
- }
- /* non-fatal error of registeration with MGS */
if (rc)
- CDEBUG(D_MOUNT, "Cannot register with MGS: %d\n", rc);
+ GOTO(out_mgc, rc);
/* Let the target look up the mount using the target's name
(we can't pass the sb or mnt through class_process_config.) */
obd->obd_self_export, 0, NULL, NULL);
}
+ server_notify_target(sb, obd);
+
+ /* calculate recovery timeout, do it after lustre_process_log */
+ server_calc_timeout(lsi, obd);
+
/* log has been fully processed */
obd_notify(obd, NULL, OBD_NOTIFY_CONFIG, (void *)CONFIG_LOG);
}
}
lsi->lsi_lmd->lmd_exclude_count = 0;
+ lsi->lsi_lmd->lmd_recovery_time_soft = 0;
+ lsi->lsi_lmd->lmd_recovery_time_hard = 0;
s2lsi_nocast(sb) = lsi;
/* we take 1 extra ref for our setup */
- atomic_set(&lsi->lsi_mounts, 1);
+ cfs_atomic_set(&lsi->lsi_mounts, 1);
/* Default umount style */
lsi->lsi_flags = LSI_UMOUNT_FAILOVER;
CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
/* someone didn't call server_put_mount. */
- LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
+ LASSERT(cfs_atomic_read(&lsi->lsi_mounts) == 0);
if (lsi->lsi_ldd != NULL)
OBD_FREE(lsi->lsi_ldd, sizeof(*lsi->lsi_ldd));
LASSERT(lsi != NULL);
- CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
- if (atomic_dec_and_test(&lsi->lsi_mounts)) {
+ CDEBUG(D_MOUNT, "put %p %d\n", sb, cfs_atomic_read(&lsi->lsi_mounts));
+ if (cfs_atomic_dec_and_test(&lsi->lsi_mounts)) {
lustre_free_lsi(sb);
RETURN(1);
}
/*************** server mount ******************/
-/* Kernel mount using mount options in MOUNT_DATA_FILE */
+/** Kernel mount using mount options in MOUNT_DATA_FILE.
+ * Since this file lives on the disk, we pre-mount using a common
+ * type, read the file, then re-mount using the type specified in the
+ * file.
+ */
static struct vfsmount *server_kernel_mount(struct super_block *sb)
{
struct lvfs_run_ctxt mount_ctxt;
struct lustre_disk_data *ldd;
struct lustre_mount_data *lmd = lsi->lsi_lmd;
struct vfsmount *mnt;
+ struct file_system_type *type;
char *options = NULL;
unsigned long page, s_flags;
struct page *__page;
+ int len;
int rc;
ENTRY;
/* Pre-mount ldiskfs to read the MOUNT_DATA_FILE */
CDEBUG(D_MOUNT, "Pre-mount ldiskfs %s\n", lmd->lmd_dev);
- mnt = ll_kern_mount("ldiskfs", s_flags, lmd->lmd_dev, (void *)options);
+ type = get_fs_type("ldiskfs");
+ if (!type) {
+ CERROR("premount failed: cannot find ldiskfs module\n");
+ GOTO(out_free, rc = -ENODEV);
+ }
+ mnt = vfs_kern_mount(type, s_flags, lmd->lmd_dev, (void *)options);
+ cfs_module_put(type->owner);
if (IS_ERR(mnt)) {
rc = PTR_ERR(mnt);
CERROR("premount %s:%#lx ldiskfs failed: %d "
memset(options, 0, CFS_PAGE_SIZE);
strncpy(options, ldd->ldd_mount_opts, CFS_PAGE_SIZE - 2);
+ len = CFS_PAGE_SIZE - strlen(options) - 2;
+ if (*options != 0)
+ strcat(options, ",");
+ strncat(options, "no_mbcache", len);
+
/* Add in any mount-line options */
if (lmd->lmd_opts && (*(lmd->lmd_opts) != 0)) {
- int len = CFS_PAGE_SIZE - strlen(options) - 2;
- if (*options != 0)
- strcat(options, ",");
+ len = CFS_PAGE_SIZE - strlen(options) - 2;
+ strcat(options, ",");
strncat(options, lmd->lmd_opts, len);
}
CDEBUG(D_MOUNT, "kern_mount: %s %s %s\n",
MT_STR(ldd), lmd->lmd_dev, options);
- mnt = ll_kern_mount(MT_STR(ldd), s_flags, lmd->lmd_dev,
- (void *)options);
+ type = get_fs_type(MT_STR(ldd));
+ if (!type) {
+ CERROR("get_fs_type failed\n");
+ GOTO(out_free, rc = -ENODEV);
+ }
+ mnt = vfs_kern_mount(type, s_flags, lmd->lmd_dev, (void *)options);
+ cfs_module_put(type->owner);
if (IS_ERR(mnt)) {
rc = PTR_ERR(mnt);
- CERROR("ll_kern_mount failed: rc = %d\n", rc);
+ CERROR("vfs_kern_mount failed: rc = %d\n", rc);
GOTO(out_free, rc);
}
RETURN(ERR_PTR(rc));
}
+/** Wait here forever until the mount refcount is 0 before completing umount,
+ * else we risk dereferencing a null pointer.
+ * LNET may take e.g. 165s before killing zombies.
+ */
static void server_wait_finished(struct vfsmount *mnt)
{
- wait_queue_head_t waitq;
- struct l_wait_info lwi;
- int retries = 330;
-
- init_waitqueue_head(&waitq);
-
- while ((atomic_read(&mnt->mnt_count) > 1) && (retries > 0)) {
- LCONSOLE_WARN("%s: Mount still busy with %d refs, waiting for "
- "%d secs...\n", mnt->mnt_devname,
- atomic_read(&mnt->mnt_count), retries);
-
- /* Wait for a bit */
- retries -= 5;
- lwi = LWI_TIMEOUT(5 * HZ, NULL, NULL);
- l_wait_event(waitq, 0, &lwi);
- }
- if (atomic_read(&mnt->mnt_count) > 1) {
- CERROR("%s: Mount still busy (%d refs), giving up.\n",
- mnt->mnt_devname, atomic_read(&mnt->mnt_count));
- }
+ cfs_waitq_t waitq;
+ int rc, waited = 0;
+ cfs_sigset_t blocked;
+
+ cfs_waitq_init(&waitq);
+
+ while (mnt_get_count(mnt) > 1) {
+ if (waited && (waited % 30 == 0))
+ LCONSOLE_WARN("Mount still busy with %d refs after "
+ "%d secs.\n",
+ mnt_get_count(mnt),
+ waited);
+ /* Cannot use l_event_wait() for an interruptible sleep. */
+ waited += 3;
+ blocked = cfs_block_sigsinv(sigmask(SIGKILL));
+ cfs_waitq_wait_event_interruptible_timeout(
+ waitq,
+ (mnt_get_count(mnt) == 1),
+ cfs_time_seconds(3),
+ rc);
+ cfs_restore_sigs(blocked);
+ if (rc < 0) {
+ LCONSOLE_EMERG("Danger: interrupted umount %s with "
+ "%d refs!\n", mnt->mnt_devname,
+ mnt_get_count(mnt));
+ break;
+ }
+
+ }
}
+/** Start the shutdown of servers at umount.
+ */
static void server_put_super(struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
OBD_ALLOC(tmpname, tmpname_sz);
memcpy(tmpname, lsi->lsi_ldd->ldd_svname, tmpname_sz);
CDEBUG(D_MOUNT, "server put_super %s\n", tmpname);
+ if (IS_MDT(lsi->lsi_ldd) && (lsi->lsi_lmd->lmd_flags & LMD_FLG_NOSVC))
+ snprintf(tmpname, tmpname_sz, "MGS");
/* Stop the target */
if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOSVC) &&
EXIT;
}
+/** Called only for 'umount -f'
+ */
#ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
static void server_umount_begin(struct vfsmount *vfsmnt, int flags)
{
}
#ifndef HAVE_STATFS_DENTRY_PARAM
-static int server_statfs (struct super_block *sb, struct kstatfs *buf)
+static int server_statfs (struct super_block *sb, cfs_kstatfs_t *buf)
{
#else
-static int server_statfs (struct dentry *dentry, struct kstatfs *buf)
+static int server_statfs (struct dentry *dentry, cfs_kstatfs_t *buf)
{
struct super_block *sb = dentry->d_sb;
#endif
RETURN(0);
}
+/** The operations we support directly on the superblock:
+ * mount, umount, and df.
+ */
static struct super_operations server_ops =
{
.put_super = server_put_super,
.statfs = server_statfs,
};
-#define log2(n) ffz(~(n))
+#define log2(n) cfs_ffz(~(n))
#define LUSTRE_SUPER_MAGIC 0x0BD00BD1
static int server_fill_super_common(struct super_block *sb)
sb->s_blocksize = 4096;
sb->s_blocksize_bits = log2(sb->s_blocksize);
sb->s_magic = LUSTRE_SUPER_MAGIC;
- sb->s_maxbytes = 0; //PAGE_CACHE_MAXBYTES;
+ sb->s_maxbytes = 0; /* we don't allow file IO on server mountpoints */
sb->s_flags |= MS_RDONLY;
sb->s_op = &server_ops;
RETURN(0);
}
+/** Fill in the superblock info for a Lustre server.
+ * Mount the device with the correct options.
+ * Read the on-disk config file.
+ * Start the services.
+ */
static int server_fill_super(struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
GOTO(out_mnt, rc);
}
+ /* Start MGC before servers */
rc = lustre_start_mgc(sb);
if (rc)
GOTO(out_mnt, rc);
if (rc)
GOTO(out_mnt, rc);
- LCONSOLE_WARN("Server %s on device %s has started\n",
- lsi->lsi_ldd->ldd_svname, lsi->lsi_lmd->lmd_dev);
-
RETURN(0);
out_mnt:
/* We jump here in case of failure while starting targets or MGS.
return rc;
}
+/*
+ * Calculate timeout value for a target.
+ */
+void server_calc_timeout(struct lustre_sb_info *lsi, struct obd_device *obd)
+{
+ struct lustre_mount_data *lmd;
+ int soft = 0;
+ int hard = 0;
+ int factor = 0;
+ bool has_ir = !!(lsi->lsi_flags & LSI_IR_CAPABLE);
+ int min = OBD_RECOVERY_TIME_MIN;
+
+ LASSERT(lsi->lsi_flags & LSI_SERVER);
+
+ lmd = lsi->lsi_lmd;
+ if (lmd) {
+ soft = lmd->lmd_recovery_time_soft;
+ hard = lmd->lmd_recovery_time_hard;
+ has_ir = has_ir && !(lmd->lmd_flags & LMD_FLG_NOIR);
+ obd->obd_no_ir = !has_ir;
+ }
+
+ if (soft == 0)
+ soft = OBD_RECOVERY_TIME_SOFT;
+ if (hard == 0)
+ hard = OBD_RECOVERY_TIME_HARD;
+
+ /* target may have ir_factor configured. */
+ factor = OBD_IR_FACTOR_DEFAULT;
+ if (obd->obd_recovery_ir_factor)
+ factor = obd->obd_recovery_ir_factor;
+
+ if (has_ir) {
+ int new_soft = soft;
+ int new_hard = hard;
+
+ /* adjust timeout value by imperative recovery */
+
+ new_soft = (soft * factor) / OBD_IR_FACTOR_MAX;
+ new_hard = (hard * factor) / OBD_IR_FACTOR_MAX;
+
+ /* make sure the timeout is not too short */
+ new_soft = max(min, new_soft);
+ new_hard = max(new_soft, new_hard);
+
+ LCONSOLE_INFO("%s: Imperative Recovery enabled, recovery "
+ "window shrunk from %d-%d down to %d-%d\n",
+ obd->obd_name, soft, hard, new_soft, new_hard);
+
+ soft = new_soft;
+ hard = new_hard;
+ }
+
+ /* we're done */
+ obd->obd_recovery_timeout = max(obd->obd_recovery_timeout, soft);
+ obd->obd_recovery_time_hard = hard;
+ obd->obd_recovery_ir_factor = factor;
+}
+EXPORT_SYMBOL(server_calc_timeout);
+
/*************** mount common betweeen server and client ***************/
/* Common umount */
RETURN(rc);
}
-#if 0
static void lmd_print(struct lustre_mount_data *lmd)
{
int i;
PRINT_CMD(PRINT_MASK, "profile: %s\n", lmd->lmd_profile);
PRINT_CMD(PRINT_MASK, "device: %s\n", lmd->lmd_dev);
PRINT_CMD(PRINT_MASK, "flags: %x\n", lmd->lmd_flags);
+
if (lmd->lmd_opts)
PRINT_CMD(PRINT_MASK, "options: %s\n", lmd->lmd_opts);
+
+ if (lmd->lmd_recovery_time_soft)
+ PRINT_CMD(PRINT_MASK, "recovery time soft: %d\n",
+ lmd->lmd_recovery_time_soft);
+
+ if (lmd->lmd_recovery_time_hard)
+ PRINT_CMD(PRINT_MASK, "recovery time hard: %d\n",
+ lmd->lmd_recovery_time_hard);
+
for (i = 0; i < lmd->lmd_exclude_count; i++) {
PRINT_CMD(PRINT_MASK, "exclude %d: OST%04x\n", i,
lmd->lmd_exclude[i]);
}
}
-#endif
/* Is this server on the exclusion list */
int lustre_check_exclusion(struct super_block *sb, char *svname)
return 0;
}
-/* mount -v -t lustre uml1:uml2:/lustre-client /mnt/lustre */
+/** Parse mount line options
+ * e.g. mount -v -t lustre -o abort_recov uml1:uml2:/lustre-client /mnt/lustre
+ * dev is passed as device=uml1:/lustre by mount.lustre
+ */
static int lmd_parse(char *options, struct lustre_mount_data *lmd)
{
char *s1, *s2, *devname = NULL;
s1 = options;
while (*s1) {
int clear = 0;
+ int time_min = OBD_RECOVERY_TIME_MIN;
+
/* Skip whitespace and extra commas */
while (*s1 == ' ' || *s1 == ',')
s1++;
if (strncmp(s1, "abort_recov", 11) == 0) {
lmd->lmd_flags |= LMD_FLG_ABORT_RECOV;
clear++;
+ } else if (strncmp(s1, "recovery_time_soft=", 19) == 0) {
+ lmd->lmd_recovery_time_soft = max_t(int,
+ simple_strtoul(s1 + 19, NULL, 10), time_min);
+ clear++;
+ } else if (strncmp(s1, "recovery_time_hard=", 19) == 0) {
+ lmd->lmd_recovery_time_hard = max_t(int,
+ simple_strtoul(s1 + 19, NULL, 10), time_min);
+ clear++;
+ } else if (strncmp(s1, "noir", 4) == 0) {
+ lmd->lmd_flags |= LMD_FLG_NOIR; /* test purpose only. */
+ clear++;
} else if (strncmp(s1, "nosvc", 5) == 0) {
lmd->lmd_flags |= LMD_FLG_NOSVC;
clear++;
} else if (strncmp(s1, "nomgs", 5) == 0) {
lmd->lmd_flags |= LMD_FLG_NOMGS;
clear++;
+ } else if (strncmp(s1, "noscrub", 7) == 0) {
+ lmd->lmd_flags |= LMD_FLG_NOSCRUB;
+ clear++;
+ } else if (strncmp(s1, "writeconf", 9) == 0) {
+ lmd->lmd_flags |= LMD_FLG_WRITECONF;
+ clear++;
} else if (strncmp(s1, "mgssec=", 7) == 0) {
rc = lmd_parse_mgssec(lmd, s1 + 7);
if (rc)
s1 = strstr(devname, ":/");
if (s1) {
++s1;
- lmd->lmd_flags = LMD_FLG_CLIENT;
+ lmd->lmd_flags |= LMD_FLG_CLIENT;
/* Remove leading /s from fsname */
while (*++s1 == '/') ;
/* Freed in lustre_free_lsi */
strcpy(lmd->lmd_opts, options);
}
+ lmd_print(lmd);
lmd->lmd_magic = LMD_MAGIC;
RETURN(rc);
RETURN(-EINVAL);
}
+struct lustre_mount_data2 {
+ void *lmd2_data;
+ struct vfsmount *lmd2_mnt;
+};
-/* Common mount */
+/** This is the entry point for the mount call into Lustre.
+ * This is called when a server or client is mounted,
+ * and this is where we start setting things up.
+ * @param data Mount options (e.g. -o flock,abort_recov)
+ */
int lustre_fill_super(struct super_block *sb, void *data, int silent)
{
struct lustre_mount_data *lmd;
+ struct lustre_mount_data2 *lmd2 = data;
struct lustre_sb_info *lsi;
int rc;
ENTRY;
* Disable lockdep during mount, because mount locking patterns are
* `special'.
*/
- lockdep_off();
+ cfs_lockdep_off();
+
+ /*
+ * LU-639: the obd cleanup of last mount may not finish yet, wait here.
+ */
+ obd_zombie_barrier();
/* Figure out the lmd from the mount options */
- if (lmd_parse((char *)data, lmd)) {
+ if (lmd_parse((char *)(lmd2->lmd2_data), lmd)) {
lustre_put_lsi(sb);
GOTO(out, rc = -EINVAL);
}
}
/* Connect and start */
/* (should always be ll_fill_super) */
- rc = (*client_fill_super)(sb);
+ rc = (*client_fill_super)(sb, lmd2->lmd2_mnt);
/* c_f_s will call lustre_common_put_super on failure */
}
} else {
CDEBUG(D_SUPER, "Mount %s complete\n",
lmd->lmd_dev);
}
- lockdep_on();
+ cfs_lockdep_on();
return rc;
}
/* We can't call ll_fill_super by name because it lives in a module that
must be loaded after this one. */
-void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb))
+void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb,
+ struct vfsmount *mnt))
{
client_fill_super = cfs;
}
/***************** FS registration ******************/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
-struct super_block * lustre_get_sb(struct file_system_type *fs_type,
- int flags, const char *devname, void * data)
+struct super_block * lustre_get_sb(struct file_system_type *fs_type, int flags,
+ const char *devname, void * data)
{
return get_sb_nodev(fs_type, flags, data, lustre_fill_super);
}
#else
-int lustre_get_sb(struct file_system_type *fs_type,
- int flags, const char *devname, void * data,
- struct vfsmount *mnt)
+int lustre_get_sb(struct file_system_type *fs_type, int flags,
+ const char *devname, void * data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, lustre_fill_super, mnt);
+ struct lustre_mount_data2 lmd2 = {data, mnt};
+
+ return get_sb_nodev(fs_type, flags, &lmd2, lustre_fill_super, mnt);
}
#endif
kill_anon_super(sb);
}
+/** Register the "lustre" fs type
+ */
struct file_system_type lustre_fs_type = {
.owner = THIS_MODULE,
.name = "lustre",
.get_sb = lustre_get_sb,
.kill_sb = lustre_kill_super,
.fs_flags = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV |
+#ifdef FS_HAS_FIEMAP
+ FS_HAS_FIEMAP |
+#endif
LL_RENAME_DOES_D_MOVE,
};