return container_of(inode, struct ll_inode_info, lli_vfs_inode);
}
-/* default to about 40meg of readahead on a given system. That much tied
- * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
-#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
+/* default to about 64M of readahead on a given system. */
+#define SBI_DEFAULT_READAHEAD_MAX (64UL << (20 - PAGE_CACHE_SHIFT))
/* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
enum ra_stat {
RA_STAT_HIT = 0,
* counted by page index.
*/
struct ra_io_arg {
- unsigned long ria_start; /* start offset of read-ahead*/
- unsigned long ria_end; /* end offset of read-ahead*/
- /* If stride read pattern is detected, ria_stoff means where
- * stride read is started. Note: for normal read-ahead, the
- * value here is meaningless, and also it will not be accessed*/
- pgoff_t ria_stoff;
- /* ria_length and ria_pages are the length and pages length in the
- * stride I/O mode. And they will also be used to check whether
- * it is stride I/O read-ahead in the read-ahead pages*/
- unsigned long ria_length;
- unsigned long ria_pages;
+ unsigned long ria_start; /* start offset of read-ahead*/
+ unsigned long ria_end; /* end offset of read-ahead*/
+ unsigned long ria_reserved; /* reserved pages for read-ahead */
+ unsigned long ria_end_min; /* minimum end to cover current read */
+ bool ria_eof; /* reach end of file */
+ /* If stride read pattern is detected, ria_stoff means where
+ * stride read is started. Note: for normal read-ahead, the
+ * value here is meaningless, and also it will not be accessed*/
+ pgoff_t ria_stoff;
+ /* ria_length and ria_pages are the length and pages length in the
+ * stride I/O mode. And they will also be used to check whether
+ * it is stride I/O read-ahead in the read-ahead pages*/
+ unsigned long ria_length;
+ unsigned long ria_pages;
};
/* LL_HIST_MAX=32 causes an overflow */
#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
#define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */
+#define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server
+ * suppress_pings */
#define LL_SBI_FLAGS { \
"nolck", \
"user_fid2path",\
"xattr_cache", \
"norootsquash", \
+ "always_ping", \
}
#define RCE_HASHES 32
* PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
*/
unsigned long ras_window_start, ras_window_len;
+ /*
+ * Optimal RPC size. It decides how many pages will be sent
+ * for each read-ahead.
+ */
+ unsigned long ras_rpc_size;
/*
* Where next read-ahead should start at. This lies within read-ahead
* window. Read-ahead window is read in pieces rather than at once
void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io);
void ll_cl_remove(struct file *file, const struct lu_env *env);
-#ifndef MS_HAS_NEW_AOPS
extern const struct address_space_operations ll_aops;
-#else
-extern const struct address_space_operations_ext ll_aops;
-#endif
/* llite/file.c */
extern struct file_operations ll_file_operations;
int cl_sb_init(struct super_block *sb);
int cl_sb_fini(struct super_block *sb);
+enum ras_update_flags {
+ LL_RAS_HIT = 0x1,
+ LL_RAS_MMAP = 0x2
+};
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
struct lookup_intent *it, __u64 *bits)
{
- if (!it->d.lustre.it_lock_set) {
+ if (!it->it_lock_set) {
struct lustre_handle handle;
/* If this inode is a remote object, it will get two
* UPDATE|PERM lock. The inode will be attched to both
* LOOKUP and PERM locks, so revoking either locks will
* case the dcache being cleared */
- if (it->d.lustre.it_remote_lock_mode) {
- handle.cookie = it->d.lustre.it_remote_lock_handle;
+ if (it->it_remote_lock_mode) {
+ handle.cookie = it->it_remote_lock_handle;
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID
"(%p) for remote lock "LPX64"\n",
PFID(ll_inode2fid(inode)), inode,
md_set_lock_data(exp, &handle.cookie, inode, NULL);
}
- handle.cookie = it->d.lustre.it_lock_handle;
+ handle.cookie = it->it_lock_handle;
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)"
" for lock "LPX64"\n",
PFID(ll_inode2fid(inode)), inode, handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode,
- &it->d.lustre.it_lock_bits);
- it->d.lustre.it_lock_set = 1;
+ &it->it_lock_bits);
+ it->it_lock_set = 1;
}
if (bits != NULL)
- *bits = it->d.lustre.it_lock_bits;
+ *bits = it->it_lock_bits;
}
static inline void ll_lock_dcache(struct inode *inode)