/*********** mount lookup *********/
-DECLARE_MUTEX(lustre_mount_info_lock);
+CFS_DECLARE_MUTEX(lustre_mount_info_lock);
static CFS_LIST_HEAD(server_mount_info_list);
static struct lustre_mount_info *server_find_mount(const char *name)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct lustre_mount_info *lmi;
ENTRY;
- list_for_each(tmp, &server_mount_info_list) {
- lmi = list_entry(tmp, struct lustre_mount_info, lmi_list_chain);
+ cfs_list_for_each(tmp, &server_mount_info_list) {
+ lmi = cfs_list_entry(tmp, struct lustre_mount_info,
+ lmi_list_chain);
if (strcmp(name, lmi->lmi_name) == 0)
RETURN(lmi);
}
}
strcpy(name_cp, name);
- down(&lustre_mount_info_lock);
+ cfs_down(&lustre_mount_info_lock);
if (server_find_mount(name)) {
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
OBD_FREE(lmi, sizeof(*lmi));
OBD_FREE(name_cp, strlen(name) + 1);
CERROR("Already registered %s\n", name);
lmi->lmi_name = name_cp;
lmi->lmi_sb = sb;
lmi->lmi_mnt = mnt;
- list_add(&lmi->lmi_list_chain, &server_mount_info_list);
+ cfs_list_add(&lmi->lmi_list_chain, &server_mount_info_list);
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
CDEBUG(D_MOUNT, "reg_mnt %p from %s, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
RETURN(0);
}
struct lustre_mount_info *lmi;
ENTRY;
- down(&lustre_mount_info_lock);
+ cfs_down(&lustre_mount_info_lock);
lmi = server_find_mount(name);
if (!lmi) {
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
CERROR("%s not registered\n", name);
RETURN(-ENOENT);
}
CDEBUG(D_MOUNT, "dereg_mnt %p from %s, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1);
- list_del(&lmi->lmi_list_chain);
+ cfs_list_del(&lmi->lmi_list_chain);
OBD_FREE(lmi, sizeof(*lmi));
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
RETURN(0);
}
struct lustre_sb_info *lsi;
ENTRY;
- down(&lustre_mount_info_lock);
+ cfs_down(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(NULL);
}
lsi = s2lsi(lmi->lmi_sb);
mntget(lmi->lmi_mnt);
- atomic_inc(&lsi->lsi_mounts);
-
+ cfs_atomic_inc(&lsi->lsi_mounts);
+
CDEBUG(D_MOUNT, "get_mnt %p from %s, refs=%d, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lsi->lsi_mounts),
- atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, cfs_atomic_read(&lsi->lsi_mounts),
+ cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
RETURN(lmi);
}
struct lustre_mount_info *lmi;
ENTRY;
- down(&lustre_mount_info_lock);
+ cfs_down(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
if (!lmi)
CERROR("Can't find mount for %s\n", name);
static void unlock_mntput(struct vfsmount *mnt)
{
if (kernel_locked()) {
- unlock_kernel();
+ cfs_unlock_kernel();
mntput(mnt);
- lock_kernel();
+ cfs_lock_kernel();
} else {
mntput(mnt);
}
/* This might be the last one, can't deref after this */
unlock_mntput(mnt);
- down(&lustre_mount_info_lock);
+ cfs_down(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(-ENOENT);
LASSERT(lmi->lmi_mnt == mnt);
CDEBUG(D_MOUNT, "put_mnt %p from %s, refs=%d, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lsi->lsi_mounts), count);
+ lmi->lmi_mnt, name, cfs_atomic_read(&lsi->lsi_mounts), count);
if (lustre_put_lsi(lmi->lmi_sb)) {
CDEBUG(D_MOUNT, "Last put of mnt %p from %s, vfscount=%d\n",
len = i_size_read(file->f_dentry->d_inode);
CDEBUG(D_MOUNT, "Have %s, size %lu\n", MOUNT_DATA_FILE, len);
if (len != sizeof(*ldd)) {
- CERROR("disk data size does not match: see %lu expect "LPSZ"\n",
- len, sizeof(*ldd));
+ CERROR("disk data size does not match: see %lu expect %u\n",
+ len, (int)sizeof(*ldd));
GOTO(out_close, rc = -EINVAL);
}
rc = server_register_mount(LUSTRE_MGS_OBDNAME, sb, mnt);
- if (!rc &&
- ((rc = lustre_start_simple(LUSTRE_MGS_OBDNAME, LUSTRE_MGS_NAME,
- LUSTRE_MGS_OBDNAME, 0, 0))))
- server_deregister_mount(LUSTRE_MGS_OBDNAME);
+ if (!rc) {
+ rc = lustre_start_simple(LUSTRE_MGS_OBDNAME, LUSTRE_MGS_NAME,
+ LUSTRE_MGS_OBDNAME, 0, 0);
+ /* Do NOT call server_deregister_mount() here. This leads to
+ * inability cleanup cleanly and free lsi and other stuff when
+ * mgs calls server_put_mount() in error handling case. -umka */
+ }
if (rc)
LCONSOLE_ERROR_MSG(0x15e, "Failed to start MGS '%s' (%d). "
RETURN(rc);
}
-DECLARE_MUTEX(mgc_start_lock);
+CFS_DECLARE_MUTEX(mgc_start_lock);
/** Set up a mgc obd to process startup logs
*
*/
static int lustre_start_mgc(struct super_block *sb)
{
- struct lustre_handle mgc_conn = {0, };
struct obd_connect_data *data = NULL;
struct lustre_sb_info *lsi = s2lsi(sb);
struct obd_device *obd;
struct obd_uuid *uuid;
class_uuid_t uuidc;
lnet_nid_t nid;
- char *mgcname, *niduuid;
+ char *mgcname, *niduuid, *mgssec;
char *ptr;
int recov_bk;
int rc = 0, i = 0, j, len;
GOTO(out_free, rc = -ENOMEM);
sprintf(mgcname, "%s%s", LUSTRE_MGC_OBDNAME, libcfs_nid2str(nid));
- mutex_down(&mgc_start_lock);
+ mgssec = lsi->lsi_lmd->lmd_mgssec ? lsi->lsi_lmd->lmd_mgssec : "";
+
+ cfs_mutex_down(&mgc_start_lock);
obd = class_name2obd(mgcname);
if (obd && !obd->obd_stopping) {
+ rc = obd_set_info_async(obd->obd_self_export,
+ strlen(KEY_MGSSEC), KEY_MGSSEC,
+ strlen(mgssec), mgssec, NULL);
+ if (rc)
+ GOTO(out_free, rc);
+
/* Re-using an existing MGC */
- atomic_inc(&obd->u.cli.cl_mgc_refcount);
+ cfs_atomic_inc(&obd->u.cli.cl_mgc_refcount);
recov_bk = 0;
/* If we are restarting the MGS, don't try to keep the MGC's
GOTO(out_free, rc = -ENOTCONN);
}
+ rc = obd_set_info_async(obd->obd_self_export,
+ strlen(KEY_MGSSEC), KEY_MGSSEC,
+ strlen(mgssec), mgssec, NULL);
+ if (rc)
+ GOTO(out_free, rc);
+
/* Keep a refcount of servers/clients who started with "mount",
so we know when we can get rid of the mgc. */
- atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
+ cfs_atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
/* Try all connections, but only once. */
recov_bk = 1;
data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_FID |
OBD_CONNECT_AT;
data->ocd_version = LUSTRE_VERSION_CODE;
- rc = obd_connect(NULL, &mgc_conn, obd, &(obd->obd_uuid), data, NULL);
+ rc = obd_connect(NULL, &exp, obd, &(obd->obd_uuid), data, NULL);
OBD_FREE_PTR(data);
if (rc) {
CERROR("connect failed %d\n", rc);
GOTO(out, rc);
}
- exp = class_conn2export(&mgc_conn);
obd->u.cli.cl_mgc_mgsexp = exp;
out:
to the same mgc.*/
lsi->lsi_mgc = obd;
out_free:
- mutex_up(&mgc_start_lock);
+ cfs_mutex_up(&mgc_start_lock);
if (mgcname)
OBD_FREE(mgcname, len);
RETURN(-ENOENT);
lsi->lsi_mgc = NULL;
- mutex_down(&mgc_start_lock);
- if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
+ cfs_mutex_down(&mgc_start_lock);
+ LASSERT(cfs_atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
+ if (!cfs_atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
/* This is not fatal, every client that stops
will call in here. */
CDEBUG(D_MOUNT, "mgc still has %d references.\n",
- atomic_read(&obd->u.cli.cl_mgc_refcount));
+ cfs_atomic_read(&obd->u.cli.cl_mgc_refcount));
GOTO(out, rc = -EBUSY);
}
/* Clean the nid uuids */
if (!niduuid)
- RETURN(-ENOMEM);
+ GOTO(out, rc = -ENOMEM);
+
for (i = 0; i < lsi->lsi_lmd->lmd_mgs_failnodes; i++) {
sprintf(ptr, "_%x", i);
rc = do_lcfg(LUSTRE_MGC_OBDNAME, 0, LCFG_DEL_UUID,
CERROR("del MDC UUID %s failed: rc = %d\n",
niduuid, rc);
}
- OBD_FREE(niduuid, len);
- /* class_import_put will get rid of the additional connections */
-
out:
- mutex_up(&mgc_start_lock);
+ if (niduuid)
+ OBD_FREE(niduuid, len);
+
+ /* class_import_put will get rid of the additional connections */
+ cfs_mutex_up(&mgc_start_lock);
RETURN(rc);
}
RETURN(rc);
}
-DECLARE_MUTEX(server_start_lock);
+CFS_DECLARE_MUTEX(server_start_lock);
/* Stop MDS/OSS if nobody is using them */
static int server_stop_servers(int lddflags, int lsiflags)
int rc = 0;
ENTRY;
- mutex_down(&server_start_lock);
+ cfs_mutex_down(&server_start_lock);
/* Either an MDT or an OST or neither */
/* if this was an MDT, and there are no more MDT's, clean up the MDS */
rc = err;
}
- mutex_up(&server_start_lock);
+ cfs_mutex_up(&server_start_lock);
RETURN(rc);
}
/* If we're an MDT, make sure the global MDS is running */
if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_MDT) {
/* make sure the MDS is started */
- mutex_down(&server_start_lock);
+ cfs_mutex_down(&server_start_lock);
obd = class_name2obd(LUSTRE_MDS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_MDS_OBDNAME,
LUSTRE_MDS_OBDNAME"_uuid",
0, 0);
if (rc) {
- mutex_up(&server_start_lock);
+ cfs_mutex_up(&server_start_lock);
CERROR("failed to start MDS: %d\n", rc);
RETURN(rc);
}
}
- mutex_up(&server_start_lock);
+ cfs_mutex_up(&server_start_lock);
}
#endif
/* If we're an OST, make sure the global OSS is running */
if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_OST) {
/* make sure OSS is started */
- mutex_down(&server_start_lock);
+ cfs_mutex_down(&server_start_lock);
obd = class_name2obd(LUSTRE_OSS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_OSS_OBDNAME,
LUSTRE_OSS_OBDNAME"_uuid",
0, 0);
if (rc) {
- mutex_up(&server_start_lock);
+ cfs_mutex_up(&server_start_lock);
CERROR("failed to start OSS: %d\n", rc);
RETURN(rc);
}
}
- mutex_up(&server_start_lock);
+ cfs_mutex_up(&server_start_lock);
}
/* Set the mgc fs to our server disk. This allows the MGC
if (rc) {
CERROR("failed to start server %s: %d\n",
lsi->lsi_ldd->ldd_svname, rc);
+ /* Do NOT call server_deregister_mount() here. This makes it
+ * impossible to find mount later in cleanup time and leaves
+ * @lsi and othder stuff leaked. -umka */
GOTO(out_mgc, rc);
}
struct lustre_sb_info *lustre_init_lsi(struct super_block *sb)
{
- struct lustre_sb_info *lsi = NULL;
+ struct lustre_sb_info *lsi;
ENTRY;
- OBD_ALLOC(lsi, sizeof(*lsi));
+ OBD_ALLOC_PTR(lsi);
if (!lsi)
RETURN(NULL);
- OBD_ALLOC(lsi->lsi_lmd, sizeof(*lsi->lsi_lmd));
+ OBD_ALLOC_PTR(lsi->lsi_lmd);
if (!lsi->lsi_lmd) {
- OBD_FREE(lsi, sizeof(*lsi));
+ OBD_FREE_PTR(lsi);
RETURN(NULL);
}
lsi->lsi_lmd->lmd_exclude_count = 0;
s2lsi_nocast(sb) = lsi;
/* we take 1 extra ref for our setup */
- atomic_set(&lsi->lsi_mounts, 1);
+ cfs_atomic_set(&lsi->lsi_mounts, 1);
/* Default umount style */
lsi->lsi_flags = LSI_UMOUNT_FAILOVER;
CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
/* someone didn't call server_put_mount. */
- LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
+ LASSERT(cfs_atomic_read(&lsi->lsi_mounts) == 0);
if (lsi->lsi_ldd != NULL)
OBD_FREE(lsi->lsi_ldd, sizeof(*lsi->lsi_ldd));
if (lsi->lsi_lmd->lmd_profile != NULL)
OBD_FREE(lsi->lsi_lmd->lmd_profile,
strlen(lsi->lsi_lmd->lmd_profile) + 1);
+ if (lsi->lsi_lmd->lmd_mgssec != NULL)
+ OBD_FREE(lsi->lsi_lmd->lmd_mgssec,
+ strlen(lsi->lsi_lmd->lmd_mgssec) + 1);
if (lsi->lsi_lmd->lmd_opts != NULL)
OBD_FREE(lsi->lsi_lmd->lmd_opts,
strlen(lsi->lsi_lmd->lmd_opts) + 1);
LASSERT(lsi != NULL);
- CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
- if (atomic_dec_and_test(&lsi->lsi_mounts)) {
+ CDEBUG(D_MOUNT, "put %p %d\n", sb, cfs_atomic_read(&lsi->lsi_mounts));
+ if (cfs_atomic_dec_and_test(&lsi->lsi_mounts)) {
lustre_free_lsi(sb);
RETURN(1);
}
GOTO(out_free, rc);
}
+ if (lmd->lmd_flags & LMD_FLG_ABORT_RECOV)
+ simple_truncate(mnt->mnt_sb->s_root, mnt, LAST_RCVD,
+ LR_CLIENT_START);
+
OBD_PAGE_FREE(__page);
lsi->lsi_ldd = ldd; /* freed at lsi cleanup */
CDEBUG(D_SUPER, "%s: mnt = %p\n", lmd->lmd_dev, mnt);
RETURN(ERR_PTR(rc));
}
+/* Wait here forever until the mount refcount is 0 before completing umount,
+ * else we risk dereferencing a null pointer.
+ * LNET may take e.g. 165s before killing zombies.
+ */
static void server_wait_finished(struct vfsmount *mnt)
{
- wait_queue_head_t waitq;
- struct l_wait_info lwi;
- int retries = 330;
-
- init_waitqueue_head(&waitq);
-
- while ((atomic_read(&mnt->mnt_count) > 1) && (retries > 0)) {
- LCONSOLE_WARN("Mount still busy with %d refs, waiting for "
- "%d secs...\n",
- atomic_read(&mnt->mnt_count), retries);
-
- /* Wait for a bit */
- retries -= 5;
- lwi = LWI_TIMEOUT(5 * HZ, NULL, NULL);
- l_wait_event(waitq, 0, &lwi);
- }
- if (atomic_read(&mnt->mnt_count) > 1) {
- CERROR("Mount %p is still busy (%d refs), giving up.\n",
- mnt, atomic_read(&mnt->mnt_count));
- }
+ cfs_waitq_t waitq;
+ int rc, waited = 0;
+ cfs_sigset_t blocked;
+
+ cfs_waitq_init(&waitq);
+
+ while (atomic_read(&mnt->mnt_count) > 1) {
+ if (waited && (waited % 30 == 0))
+ LCONSOLE_WARN("Mount still busy with %d refs after "
+ "%d secs.\n",
+ atomic_read(&mnt->mnt_count),
+ waited);
+ /* Cannot use l_event_wait() for an interruptible sleep. */
+ waited += 3;
+ blocked = l_w_e_set_sigs(sigmask(SIGKILL));
+ cfs_waitq_wait_event_interruptible_timeout(
+ waitq,
+ (atomic_read(&mnt->mnt_count) == 1),
+ cfs_time_seconds(3),
+ rc);
+ cfs_block_sigs(blocked);
+ if (rc < 0) {
+ LCONSOLE_EMERG("Danger: interrupted umount %s with "
+ "%d refs!\n",
+ mnt->mnt_devname,
+ atomic_read(&mnt->mnt_count));
+ break;
+ }
+
+ }
}
static void server_put_super(struct super_block *sb)
int tmpname_sz;
int lddflags = lsi->lsi_ldd->ldd_flags;
int lsiflags = lsi->lsi_flags;
- int rc;
ENTRY;
LASSERT(lsiflags & LSI_SERVER);
OBD_ALLOC(tmpname, tmpname_sz);
memcpy(tmpname, lsi->lsi_ldd->ldd_svname, tmpname_sz);
CDEBUG(D_MOUNT, "server put_super %s\n", tmpname);
+ if (IS_MDT(lsi->lsi_ldd) && (lsi->lsi_lmd->lmd_flags & LMD_FLG_NOSVC))
+ snprintf(tmpname, tmpname_sz, "MGS");
/* Stop the target */
if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOSVC) &&
/* If they wanted the mgs to stop separately from the mdt, they
should have put it on a different device. */
if (IS_MGS(lsi->lsi_ldd)) {
- /* stop the mgc before the mgs so the connection gets cleaned
- up */
- lustre_stop_mgc(sb);
/* if MDS start with --nomgs, don't stop MGS then */
if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOMGS))
server_stop_mgs(sb);
}
/* Clean the mgc and sb */
- rc = lustre_common_put_super(sb);
- /* FIXME how can I report a failure to umount? */
+ lustre_common_put_super(sb);
/* Wait for the targets to really clean up - can't exit (and let the
sb get destroyed) while the mount is still in use */
}
#ifndef HAVE_STATFS_DENTRY_PARAM
-static int server_statfs (struct super_block *sb, struct kstatfs *buf)
+static int server_statfs (struct super_block *sb, cfs_kstatfs_t *buf)
{
#else
-static int server_statfs (struct dentry *dentry, struct kstatfs *buf)
+static int server_statfs (struct dentry *dentry, cfs_kstatfs_t *buf)
{
struct super_block *sb = dentry->d_sb;
#endif
.statfs = server_statfs,
};
-#define log2(n) ffz(~(n))
+#define log2(n) cfs_ffz(~(n))
#define LUSTRE_SUPER_MAGIC 0x0BD00BD1
static int server_fill_super_common(struct super_block *sb)
}
/* Start MGS before MGC */
- if (IS_MGS(lsi->lsi_ldd) && !(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOMGS)) {
+ if (IS_MGS(lsi->lsi_ldd) && !(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOMGS)){
rc = server_start_mgs(sb);
if (rc)
GOTO(out_mnt, rc);
GOTO(out_mnt, rc);
LCONSOLE_WARN("Server %s on device %s has started\n",
- lsi->lsi_ldd->ldd_svname, lsi->lsi_lmd->lmd_dev);
+ ((lsi->lsi_lmd->lmd_flags & LMD_FLG_NOSVC) &&
+ (IS_MDT(lsi->lsi_ldd))) ? "MGS" : lsi->lsi_ldd->ldd_svname,
+ lsi->lsi_lmd->lmd_dev);
RETURN(0);
out_mnt:
{
unsigned long index;
int rc;
- char *dash = strchr(svname, '-');
+ char *dash = strrchr(svname, '-');
if (!dash)
return(-EINVAL);
+ /* intepret <fsname>-MDTXXXXX-mdc as mdt, the better way is to pass
+ * in the fsname, then determine the server index */
+ if (!strcmp(LUSTRE_MDC_NAME, dash + 1)) {
+ dash--;
+ for (; dash > svname && *dash != '-'; dash--);
+ if (dash == svname)
+ return(-EINVAL);
+ }
+
if (strncmp(dash + 1, "MDT", 3) == 0)
rc = LDD_F_SV_TYPE_MDT;
else if (strncmp(dash + 1, "OST", 3) == 0)
rc = LDD_F_SV_TYPE_OST;
else
return(-EINVAL);
+ if (strcmp(dash + 4, "all") == 0)
+ return rc | LDD_F_SV_ALL;
index = simple_strtoul(dash + 4, endptr, 16);
*idx = index;
}
/* Drop a ref to the mounted disk */
lustre_put_lsi(sb);
+ lu_types_stop();
RETURN(rc);
}
RETURN(rc);
}
+static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr)
+{
+ char *tail;
+ int length;
+
+ if (lmd->lmd_mgssec != NULL) {
+ OBD_FREE(lmd->lmd_mgssec, strlen(lmd->lmd_mgssec) + 1);
+ lmd->lmd_mgssec = NULL;
+ }
+
+ tail = strchr(ptr, ',');
+ if (tail == NULL)
+ length = strlen(ptr);
+ else
+ length = tail - ptr;
+
+ OBD_ALLOC(lmd->lmd_mgssec, length + 1);
+ if (lmd->lmd_mgssec == NULL)
+ return -ENOMEM;
+
+ memcpy(lmd->lmd_mgssec, ptr, length);
+ lmd->lmd_mgssec[length] = '\0';
+ return 0;
+}
+
/* mount -v -t lustre uml1:uml2:/lustre-client /mnt/lustre */
static int lmd_parse(char *options, struct lustre_mount_data *lmd)
{
} else if (strncmp(s1, "nomgs", 5) == 0) {
lmd->lmd_flags |= LMD_FLG_NOMGS;
clear++;
+ } else if (strncmp(s1, "mgssec=", 7) == 0) {
+ rc = lmd_parse_mgssec(lmd, s1 + 7);
+ if (rc)
+ goto invalid;
+ clear++;
/* ost exclusion list */
} else if (strncmp(s1, "exclude=", 8) == 0) {
rc = lmd_make_exclusion(lmd, s1 + 7);
RETURN(-ENOMEM);
lmd = lsi->lsi_lmd;
+ /*
+ * Disable lockdep during mount, because mount locking patterns are
+ * `special'.
+ */
+ cfs_lockdep_off();
+
/* Figure out the lmd from the mount options */
if (lmd_parse((char *)data, lmd)) {
lustre_put_lsi(sb);
CERROR("Unable to mount %s (%d)\n",
s2lsi(sb) ? lmd->lmd_dev : "", rc);
} else {
- CDEBUG(D_SUPER, "Mount %s complete\n",
+ CDEBUG(D_SUPER, "Mount %s complete\n",
lmd->lmd_dev);
}
+ cfs_lockdep_on();
return rc;
}
.get_sb = lustre_get_sb,
.kill_sb = lustre_kill_super,
.fs_flags = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV |
+#ifdef FS_HAS_FIEMAP
+ FS_HAS_FIEMAP |
+#endif
LL_RENAME_DOES_D_MOVE,
};