-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011 Whamcloud, Inc.
- *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/*********** mount lookup *********/
-CFS_DECLARE_MUTEX(lustre_mount_info_lock);
+CFS_DEFINE_MUTEX(lustre_mount_info_lock);
static CFS_LIST_HEAD(server_mount_info_list);
static struct lustre_mount_info *server_find_mount(const char *name)
}
strcpy(name_cp, name);
- cfs_down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
if (server_find_mount(name)) {
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
OBD_FREE(lmi, sizeof(*lmi));
OBD_FREE(name_cp, strlen(name) + 1);
CERROR("Already registered %s\n", name);
lmi->lmi_mnt = mnt;
cfs_list_add(&lmi->lmi_list_chain, &server_mount_info_list);
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
CDEBUG(D_MOUNT, "reg_mnt %p from %s, vfscount=%d\n",
- lmi->lmi_mnt, name, cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, mnt_get_count(lmi->lmi_mnt));
RETURN(0);
}
struct lustre_mount_info *lmi;
ENTRY;
- cfs_down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
if (!lmi) {
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
CERROR("%s not registered\n", name);
RETURN(-ENOENT);
}
CDEBUG(D_MOUNT, "dereg_mnt %p from %s, vfscount=%d\n",
- lmi->lmi_mnt, name, cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, mnt_get_count(lmi->lmi_mnt));
OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1);
cfs_list_del(&lmi->lmi_list_chain);
OBD_FREE(lmi, sizeof(*lmi));
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
RETURN(0);
}
struct lustre_sb_info *lsi;
ENTRY;
- cfs_down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(NULL);
CDEBUG(D_MOUNT, "get_mnt %p from %s, refs=%d, vfscount=%d\n",
lmi->lmi_mnt, name, cfs_atomic_read(&lsi->lsi_mounts),
- cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
+ mnt_get_count(lmi->lmi_mnt));
RETURN(lmi);
}
struct lustre_mount_info *lmi;
ENTRY;
- cfs_down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
if (!lmi)
CERROR("Can't find mount for %s\n", name);
static void unlock_mntput(struct vfsmount *mnt)
{
- if (kernel_locked()) {
- cfs_unlock_kernel();
- mntput(mnt);
- cfs_lock_kernel();
- } else {
- mntput(mnt);
- }
+#ifdef HAVE_KERNEL_LOCKED
+ /* for kernel < 2.6.37 */
+ if (kernel_locked()) {
+ unlock_kernel();
+ mntput(mnt);
+ lock_kernel();
+ } else {
+ mntput(mnt);
+ }
+#else
+ mntput(mnt);
+#endif
}
static int lustre_put_lsi(struct super_block *sb);
{
struct lustre_mount_info *lmi;
struct lustre_sb_info *lsi;
- int count = atomic_read(&mnt->mnt_count) - 1;
+ int count = mnt_get_count(mnt) - 1;
ENTRY;
/* This might be the last one, can't deref after this */
unlock_mntput(mnt);
- cfs_down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(-ENOENT);
RETURN(rc);
}
-CFS_DECLARE_MUTEX(mgc_start_lock);
+CFS_DEFINE_MUTEX(mgc_start_lock);
/** Set up a mgc obd to process startup logs
*
RETURN(-EINVAL);
}
- cfs_mutex_down(&mgc_start_lock);
+ cfs_mutex_lock(&mgc_start_lock);
len = strlen(LUSTRE_MGC_OBDNAME) + strlen(libcfs_nid2str(nid)) + 1;
OBD_ALLOC(mgcname, len);
obd = class_name2obd(mgcname);
if (obd && !obd->obd_stopping) {
- rc = obd_set_info_async(obd->obd_self_export,
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
strlen(KEY_MGSSEC), KEY_MGSSEC,
strlen(mgssec), mgssec, NULL);
if (rc)
int vallen = sizeof(*data);
__u32 *flags = &lsi->lsi_lmd->lmd_flags;
- rc = obd_get_info(obd->obd_self_export,
+ rc = obd_get_info(NULL, obd->obd_self_export,
strlen(KEY_CONN_DATA), KEY_CONN_DATA,
&vallen, data, NULL);
LASSERT(rc == 0);
if at all possible. */
recov_bk++;
CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname,recov_bk);
- rc = obd_set_info_async(obd->obd_self_export,
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
sizeof(KEY_INIT_RECOV_BACKUP),
KEY_INIT_RECOV_BACKUP,
sizeof(recov_bk), &recov_bk, NULL);
GOTO(out_free, rc = -ENOTCONN);
}
- rc = obd_set_info_async(obd->obd_self_export,
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
strlen(KEY_MGSSEC), KEY_MGSSEC,
strlen(mgssec), mgssec, NULL);
if (rc)
/* Try all connections, but only once. */
recov_bk = 1;
- rc = obd_set_info_async(obd->obd_self_export,
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
sizeof(KEY_INIT_RECOV_BACKUP),
KEY_INIT_RECOV_BACKUP,
sizeof(recov_bk), &recov_bk, NULL);
to the same mgc.*/
lsi->lsi_mgc = obd;
out_free:
- cfs_mutex_up(&mgc_start_lock);
+ cfs_mutex_unlock(&mgc_start_lock);
if (data)
OBD_FREE_PTR(data);
RETURN(-ENOENT);
lsi->lsi_mgc = NULL;
- cfs_mutex_down(&mgc_start_lock);
+ cfs_mutex_lock(&mgc_start_lock);
LASSERT(cfs_atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
if (!cfs_atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
/* This is not fatal, every client that stops
OBD_FREE(niduuid, len);
/* class_import_put will get rid of the additional connections */
- cfs_mutex_up(&mgc_start_lock);
+ cfs_mutex_unlock(&mgc_start_lock);
RETURN(rc);
}
CDEBUG(D_MOUNT, "Set mgc disk for %s\n", lsi->lsi_lmd->lmd_dev);
/* cl_mgc_sem in mgc insures we sleep if the mgc_fs is busy */
- rc = obd_set_info_async(mgc->obd_self_export,
+ rc = obd_set_info_async(NULL, mgc->obd_self_export,
sizeof(KEY_SET_FS), KEY_SET_FS,
sizeof(*sb), sb, NULL);
if (rc) {
CDEBUG(D_MOUNT, "Unassign mgc disk\n");
- rc = obd_set_info_async(mgc->obd_self_export,
+ rc = obd_set_info_async(NULL, mgc->obd_self_export,
sizeof(KEY_CLEAR_FS), KEY_CLEAR_FS,
0, NULL, NULL);
RETURN(rc);
}
-CFS_DECLARE_MUTEX(server_start_lock);
+CFS_DEFINE_MUTEX(server_start_lock);
/* Stop MDS/OSS if nobody is using them */
static int server_stop_servers(int lddflags, int lsiflags)
int rc = 0;
ENTRY;
- cfs_mutex_down(&server_start_lock);
+ cfs_mutex_lock(&server_start_lock);
/* Either an MDT or an OST or neither */
/* if this was an MDT, and there are no more MDT's, clean up the MDS */
rc = err;
}
- cfs_mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
RETURN(rc);
}
/* Register the target */
/* FIXME use mgc_process_config instead */
- rc = obd_set_info_async(mgc->u.cli.cl_mgc_mgsexp,
+ rc = obd_set_info_async(NULL, mgc->u.cli.cl_mgc_mgsexp,
sizeof(KEY_REGISTER_TARGET), KEY_REGISTER_TARGET,
sizeof(*mti), mti, NULL);
if (rc) {
mti->mti_flags |= LDD_F_OPC_READY;
/* FIXME use mgc_process_config instead */
- rc = obd_set_info_async(mgc->u.cli.cl_mgc_mgsexp,
+ rc = obd_set_info_async(NULL, mgc->u.cli.cl_mgc_mgsexp,
sizeof(KEY_REGISTER_TARGET),
KEY_REGISTER_TARGET,
sizeof(*mti), mti, NULL);
/* If we're an MDT, make sure the global MDS is running */
if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_MDT) {
/* make sure the MDS is started */
- cfs_mutex_down(&server_start_lock);
+ cfs_mutex_lock(&server_start_lock);
obd = class_name2obd(LUSTRE_MDS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_MDS_OBDNAME,
LUSTRE_MDS_OBDNAME"_uuid",
0, 0);
if (rc) {
- cfs_mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
CERROR("failed to start MDS: %d\n", rc);
RETURN(rc);
}
}
- cfs_mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
}
#endif
/* If we're an OST, make sure the global OSS is running */
if (IS_OST(lsi->lsi_ldd)) {
/* make sure OSS is started */
- cfs_mutex_down(&server_start_lock);
+ cfs_mutex_lock(&server_start_lock);
obd = class_name2obd(LUSTRE_OSS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_OSS_OBDNAME,
LUSTRE_OSS_OBDNAME"_uuid",
0, 0);
if (rc) {
- cfs_mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
CERROR("failed to start OSS: %d\n", rc);
RETURN(rc);
}
}
- cfs_mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
}
/* Set the mgc fs to our server disk. This allows the MGC to
struct lustre_disk_data *ldd;
struct lustre_mount_data *lmd = lsi->lsi_lmd;
struct vfsmount *mnt;
+ struct file_system_type *type;
char *options = NULL;
unsigned long page, s_flags;
struct page *__page;
+ int len;
int rc;
ENTRY;
/* Pre-mount ldiskfs to read the MOUNT_DATA_FILE */
CDEBUG(D_MOUNT, "Pre-mount ldiskfs %s\n", lmd->lmd_dev);
- mnt = ll_kern_mount("ldiskfs", s_flags, lmd->lmd_dev, (void *)options);
+ type = get_fs_type("ldiskfs");
+ if (!type) {
+ CERROR("premount failed: cannot find ldiskfs module\n");
+ GOTO(out_free, rc = -ENODEV);
+ }
+ mnt = vfs_kern_mount(type, s_flags, lmd->lmd_dev, (void *)options);
+ cfs_module_put(type->owner);
if (IS_ERR(mnt)) {
rc = PTR_ERR(mnt);
CERROR("premount %s:%#lx ldiskfs failed: %d "
memset(options, 0, CFS_PAGE_SIZE);
strncpy(options, ldd->ldd_mount_opts, CFS_PAGE_SIZE - 2);
+ len = CFS_PAGE_SIZE - strlen(options) - 2;
+ if (*options != 0)
+ strcat(options, ",");
+ strncat(options, "no_mbcache", len);
+
/* Add in any mount-line options */
if (lmd->lmd_opts && (*(lmd->lmd_opts) != 0)) {
- int len = CFS_PAGE_SIZE - strlen(options) - 2;
- if (*options != 0)
- strcat(options, ",");
+ len = CFS_PAGE_SIZE - strlen(options) - 2;
+ strcat(options, ",");
strncat(options, lmd->lmd_opts, len);
}
CDEBUG(D_MOUNT, "kern_mount: %s %s %s\n",
MT_STR(ldd), lmd->lmd_dev, options);
- mnt = ll_kern_mount(MT_STR(ldd), s_flags, lmd->lmd_dev,
- (void *)options);
+ type = get_fs_type(MT_STR(ldd));
+ if (!type) {
+ CERROR("get_fs_type failed\n");
+ GOTO(out_free, rc = -ENODEV);
+ }
+ mnt = vfs_kern_mount(type, s_flags, lmd->lmd_dev, (void *)options);
+ cfs_module_put(type->owner);
if (IS_ERR(mnt)) {
rc = PTR_ERR(mnt);
- CERROR("ll_kern_mount failed: rc = %d\n", rc);
+ CERROR("vfs_kern_mount failed: rc = %d\n", rc);
GOTO(out_free, rc);
}
cfs_waitq_init(&waitq);
- while (atomic_read(&mnt->mnt_count) > 1) {
+ while (mnt_get_count(mnt) > 1) {
if (waited && (waited % 30 == 0))
LCONSOLE_WARN("Mount still busy with %d refs after "
"%d secs.\n",
- atomic_read(&mnt->mnt_count),
+ mnt_get_count(mnt),
waited);
/* Cannot use l_event_wait() for an interruptible sleep. */
waited += 3;
blocked = cfs_block_sigsinv(sigmask(SIGKILL));
cfs_waitq_wait_event_interruptible_timeout(
waitq,
- (atomic_read(&mnt->mnt_count) == 1),
+ (mnt_get_count(mnt) == 1),
cfs_time_seconds(3),
rc);
- cfs_block_sigs(blocked);
+ cfs_restore_sigs(blocked);
if (rc < 0) {
LCONSOLE_EMERG("Danger: interrupted umount %s with "
"%d refs!\n", mnt->mnt_devname,
- atomic_read(&mnt->mnt_count));
+ mnt_get_count(mnt));
break;
}
sb->s_blocksize = 4096;
sb->s_blocksize_bits = log2(sb->s_blocksize);
sb->s_magic = LUSTRE_SUPER_MAGIC;
- sb->s_maxbytes = 0; //PAGE_CACHE_MAXBYTES;
+ sb->s_maxbytes = 0; /* we don't allow file IO on server mountpoints */
sb->s_flags |= MS_RDONLY;
sb->s_op = &server_ops;
} else if (strncmp(s1, "nomgs", 5) == 0) {
lmd->lmd_flags |= LMD_FLG_NOMGS;
clear++;
+ } else if (strncmp(s1, "noscrub", 7) == 0) {
+ lmd->lmd_flags |= LMD_FLG_NOSCRUB;
+ clear++;
} else if (strncmp(s1, "writeconf", 9) == 0) {
lmd->lmd_flags |= LMD_FLG_WRITECONF;
clear++;
*/
cfs_lockdep_off();
+ /*
+ * LU-639: the obd cleanup of last mount may not finish yet, wait here.
+ */
+ obd_zombie_barrier();
+
/* Figure out the lmd from the mount options */
if (lmd_parse((char *)(lmd2->lmd2_data), lmd)) {
lustre_put_lsi(sb);