ll_generate_random_uuid(uuid);
class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
- CDEBUG(D_HA, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
+ CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
spin_lock(&ll_sb_lock);
list_add_tail(&sbi->ll_list, &ll_super_blocks);
sbi->ll_flags |= LL_SBI_CHECKSUM;
#endif
+#ifdef HAVE_LRU_RESIZE_SUPPORT
+ sbi->ll_flags |= LL_SBI_LRU_RESIZE;
+#endif
+
#ifdef HAVE_EXPORT___IGET
INIT_LIST_HEAD(&sbi->ll_deathrow);
spin_lock_init(&sbi->ll_deathrow_lock);
}
static struct dentry_operations ll_d_root_ops = {
-#ifdef LUSTRE_KERNEL_VERSION
+#ifdef DCACHE_LUSTRE_INVALID
.d_compare = ll_dcompare,
#endif
};
OBD_CONNECT_ATTRFID | OBD_CONNECT_VERSION |
OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
OBD_CONNECT_CANCELSET;
+#ifdef HAVE_LRU_RESIZE_SUPPORT
+ if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
+ data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
+#endif
#ifdef CONFIG_FS_POSIX_ACL
data->ocd_connect_flags |= OBD_CONNECT_ACL;
#endif
sbi->ll_flags |= LL_SBI_OSS_CAPA;
}
+ sbi->ll_sdev_orig = sb->s_dev;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
/* We set sb->s_dev equal on all lustre clients in order to support
* NFS export clustering. NFSD requires that the FSID be the same
* on all clients. */
/* s_dev is also used in lt_compare() to compare two fs, but that is
* only a node-local comparison. */
-
+
/* XXX: this will not work with LMV */
sb->s_dev = get_uuid2int(sbi2mdc(sbi)->cl_target_uuid.uuid,
strlen(sbi2mdc(sbi)->cl_target_uuid.uuid));
if (sbi->ll_flags & LL_SBI_OSS_CAPA)
data->ocd_connect_flags |= OBD_CONNECT_OSS_CAPA;
+#ifdef HAVE_LRU_RESIZE_SUPPORT
+ data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
+#endif
CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d "
"ocd_grant: %d\n", data->ocd_connect_flags,
data->ocd_version, data->ocd_grant);
ll_async_page_slab_size =
size_round(sizeof(struct ll_async_page)) + err;
ll_async_page_slab = cfs_mem_cache_create("ll_async_page",
- ll_async_page_slab_size,
- 0, 0);
+ ll_async_page_slab_size,
+ 0, 0);
if (!ll_async_page_slab)
GOTO(out_dt_fid, err = -ENOMEM);
}
sbi->ll_root_fid = rootfid;
sb->s_op = &lustre_super_operations;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
sb->s_export_op = &lustre_export_operations;
-#endif
/* make root inode
* XXX: move this to after cbd setup? */
err = obd_set_info_async(sbi->ll_dt_exp, strlen("checksum"),"checksum",
sizeof(checksum), &checksum, NULL);
- /* making vm readahead 0 for 2.4.x. In the case of 2.6.x,
- backing dev info assigned to inode mapping is used for
- determining maximal readahead. */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \
- !defined(KERNEL_HAS_AS_MAX_READAHEAD)
- /* bug 2805 - set VM readahead to zero */
- vm_max_readahead = vm_min_readahead = 0;
-#endif
-
sb->s_root = d_alloc_root(root);
if (data != NULL)
OBD_FREE(data, sizeof(*data));
}
}
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-void lustre_throw_orphan_dentries(struct super_block *sb)
-{
- struct dentry *dentry, *next;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
-
- /* Do this to get rid of orphaned dentries. That is not really trw. */
- list_for_each_entry_safe(dentry, next, &sbi->ll_orphan_dentry_list,
- d_hash) {
- CWARN("found orphan dentry %.*s (%p->%p) at unmount, dumping "
- "before and after shrink_dcache_parent\n",
- dentry->d_name.len, dentry->d_name.name, dentry, next);
- lustre_dump_dentry(dentry, 1);
- shrink_dcache_parent(dentry);
- lustre_dump_dentry(dentry, 1);
- }
-}
-#else
-#define lustre_throw_orphan_dentries(sb)
-#endif
-
#ifdef HAVE_EXPORT___IGET
static void prune_dir_dentries(struct inode *inode)
{
prune_deathrow(sbi, 0);
list_del(&sbi->ll_conn_chain);
-
+
obd_fid_fini(sbi->ll_dt_exp);
obd_disconnect(sbi->ll_dt_exp);
sbi->ll_dt_exp = NULL;
obd_disconnect(sbi->ll_md_exp);
sbi->ll_md_exp = NULL;
- lustre_throw_orphan_dentries(sb);
+ EXIT;
+}
+
+void ll_kill_super(struct super_block *sb)
+{
+ struct ll_sb_info *sbi;
+
+ ENTRY;
+
+ /* not init sb ?*/
+ if (!(sb->s_flags & MS_ACTIVE))
+ return;
+
+ sbi = ll_s2sbi(sb);
+ /* we need restore s_dev from changed for clustred NFS before put_super
+ * because new kernels have cached s_dev and change sb->s_dev in
+ * put_super not affected real removing devices */
+ if (sbi)
+ sb->s_dev = sbi->ll_sdev_orig;
EXIT;
}
*flags &= ~tmp;
goto next;
}
+ tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
+ if (tmp) {
+ *flags &= ~tmp;
+ goto next;
+ }
LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
s1);
UP_WRITE_I_ALLOC_SEM(inode);
rc = ll_extent_lock(NULL, inode, lsm, LCK_PW, &policy, &lockh,
ast_flags);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- DOWN_WRITE_I_ALLOC_SEM(inode);
- LOCK_INODE_MUTEX(inode);
-#else
LOCK_INODE_MUTEX(inode);
DOWN_WRITE_I_ALLOC_SEM(inode);
-#endif
+
if (rc != 0)
GOTO(out, rc);
if (body->valid & OBD_MD_FLNLINK)
inode->i_nlink = body->nlink;
if (body->valid & OBD_MD_FLRDEV)
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- inode->i_rdev = body->rdev;
-#else
inode->i_rdev = old_decode_dev(body->rdev);
-#endif
- if (body->valid & OBD_MD_FLSIZE) {
- if (ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) {
- if (lli->lli_flags & (LLIF_DONE_WRITING |
- LLIF_EPOCH_PENDING |
- LLIF_SOM_DIRTY))
- CWARN("ino %lu flags %lu still has size authority!"
- "do not trust the size got from MDS\n",
- inode->i_ino, lli->lli_flags);
- else {
- i_size_write(inode, body->size);
- lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
- }
- } else {
- i_size_write(inode, body->size);
- }
-
- if (body->valid & OBD_MD_FLBLOCKS)
- inode->i_blocks = body->blocks;
- }
if (body->valid & OBD_MD_FLID) {
/* FID shouldn't be changed! */
LASSERT(fid_seq(&lli->lli_fid) != 0);
+ if (body->valid & OBD_MD_FLSIZE) {
+ if ((ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) &&
+ S_ISREG(inode->i_mode) && lli->lli_smd) {
+ struct lustre_handle lockh;
+ ldlm_mode_t mode;
+
+ /* As it is possible a blocking ast has been processed
+ * by this time, we need to check there is an UPDATE
+ * lock on the client and set LLIF_MDS_SIZE_LOCK holding
+ * it. */
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
+ &lockh);
+ if (mode) {
+ if (lli->lli_flags & (LLIF_DONE_WRITING |
+ LLIF_EPOCH_PENDING |
+ LLIF_SOM_DIRTY)) {
+ CERROR("ino %lu flags %lu still has "
+ "size authority! do not trust "
+ "the size got from MDS\n",
+ inode->i_ino, lli->lli_flags);
+ } else {
+ i_size_write(inode, body->size);
+ lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
+ }
+ ldlm_lock_decref(&lockh, mode);
+ }
+ } else {
+ i_size_write(inode, body->size);
+ }
+
+ if (body->valid & OBD_MD_FLBLOCKS)
+ inode->i_blocks = body->blocks;
+ }
+
if (body->valid & OBD_MD_FLMDSCAPA) {
LASSERT(md->mds_capa);
ll_add_capa(inode, md->mds_capa);
}
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
static struct backing_dev_info ll_backing_dev_info = {
.ra_pages = 0, /* No readahead */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
.memory_backed = 0, /* Does contribute to dirty memory */
#endif
};
-#endif
void ll_read_inode2(struct inode *inode, void *opaque)
{
} else {
inode->i_op = &ll_special_inode_operations;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
init_special_inode(inode, inode->i_mode,
kdev_t_to_nr(inode->i_rdev));
/* initializing backing dev info. */
inode->i_mapping->backing_dev_info = &ll_backing_dev_info;
-#else
- init_special_inode(inode, inode->i_mode, inode->i_rdev);
-#endif
+
EXIT;
}
}
body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
sizeof(*body));
-
- /*Now the ext3 will be packed directly back to client,
- *no need convert here*/
flags = body->flags;
ptlrpc_req_finished (req);