s64 lli_ctime;
spinlock_t lli_agl_lock;
- /* update atime from MDS no matter if it's older than
- * local inode atime. */
- unsigned int lli_update_atime:1;
-
/* Try to make the d::member and f::member are aligned. Before using
* these members, make clear whether it is directory or not. */
union {
/* for directory */
struct {
- /* serialize normal readdir and statahead-readdir. */
- struct mutex lli_readdir_mutex;
-
/* metadata statahead */
/* since parent-child threads can share the same @file
* struct, "opendir_key" is the token when dir close for
struct mutex lli_pcc_lock;
enum lu_pcc_state_flags lli_pcc_state;
+ /*
+ * @lli_pcc_generation saves the gobal PCC generation
+ * when the file was successfully attached into PCC.
+ * The flags of the PCC dataset are saved in
+ * @lli_pcc_dsflags.
+ * The gobal PCC generation will be increased when add
+ * or delete a PCC backend, or change the configuration
+ * parameters for PCC.
+ * If @lli_pcc_generation is same as the gobal PCC
+ * generation, we can use the saved flags of the PCC
+ * dataset to determine whether need to try auto attach
+ * safely.
+ */
+ __u64 lli_pcc_generation;
+ enum pcc_dataset_flags lli_pcc_dsflags;
struct pcc_inode *lli_pcc_inode;
+
+ struct mutex lli_group_mutex;
+ __u64 lli_group_users;
+ unsigned long lli_group_gid;
};
};
LLIF_XATTR_CACHE = 2,
/* Project inherit */
LLIF_PROJECT_INHERIT = 3,
+ /* update atime from MDS even if it's older than local inode atime. */
+ LLIF_UPDATE_ATIME = 4,
+
};
static inline void ll_file_set_flag(struct ll_inode_info *lli,
* counted by page index.
*/
struct ra_io_arg {
- unsigned long ria_start; /* start offset of read-ahead*/
- unsigned long ria_end; /* end offset of read-ahead*/
+ pgoff_t ria_start; /* start offset of read-ahead*/
+ pgoff_t ria_end; /* end offset of read-ahead*/
unsigned long ria_reserved; /* reserved pages for read-ahead */
- unsigned long ria_end_min; /* minimum end to cover current read */
- bool ria_eof; /* reach end of file */
+ pgoff_t ria_end_min; /* minimum end to cover current read */
+ bool ria_eof; /* reach end of file */
/* If stride read pattern is detected, ria_stoff means where
* stride read is started. Note: for normal read-ahead, the
* value here is meaningless, and also it will not be accessed*/
- pgoff_t ria_stoff;
- /* ria_length and ria_pages are the length and pages length in the
+ unsigned long ria_stoff;
+ /* ria_length and ria_bytes are the length and pages length in the
* stride I/O mode. And they will also be used to check whether
* it is stride I/O read-ahead in the read-ahead pages*/
unsigned long ria_length;
- unsigned long ria_pages;
+ unsigned long ria_bytes;
};
/* LL_HIST_MAX=32 causes an overflow */
struct lu_fid ll_root_fid; /* root object fid */
int ll_flags;
- unsigned int ll_umounting:1,
- ll_xattr_cache_enabled:1,
+ unsigned int ll_xattr_cache_enabled:1,
ll_xattr_cache_set:1, /* already set to 0/1 */
ll_client_common_fill_super_succeeded:1,
ll_checksum_set:1;
*/
struct ll_readahead_state {
spinlock_t ras_lock;
+ /* End byte that read(2) try to read. */
+ unsigned long ras_last_read_end;
/*
- * index of the last page that read(2) needed and that wasn't in the
- * cache. Used by ras_update() to detect seeks.
- *
- * XXX nikita: if access seeks into cached region, Lustre doesn't see
- * this.
- */
- unsigned long ras_last_readpage;
- /*
- * number of pages read after last read-ahead window reset. As window
+ * number of bytes read after last read-ahead window reset. As window
* is reset on each seek, this is effectively a number of consecutive
* accesses. Maybe ->ras_accessed_in_window is better name.
*
* case, it probably doesn't make sense to expand window to
* PTLRPC_MAX_BRW_PAGES on the third access.
*/
- unsigned long ras_consecutive_pages;
+ unsigned long ras_consecutive_bytes;
/*
* number of read requests after the last read-ahead window reset
* As window is reset on each seek, this is effectively the number
* on consecutive read request and is used to trigger read-ahead.
*/
- unsigned long ras_consecutive_requests;
+ unsigned long ras_consecutive_requests;
/*
* Parameters of current read-ahead window. Handled by
* ras_update(). On the initial access to the file or after a seek,
* expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
* PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
*/
- unsigned long ras_window_start, ras_window_len;
+ pgoff_t ras_window_start, ras_window_len;
/*
* Optimal RPC size. It decides how many pages will be sent
* for each read-ahead.
*/
- unsigned long ras_rpc_size;
+ unsigned long ras_rpc_size;
/*
* Where next read-ahead should start at. This lies within read-ahead
* window. Read-ahead window is read in pieces rather than at once
* ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
* not covered by DLM lock.
*/
- unsigned long ras_next_readahead;
+ pgoff_t ras_next_readahead;
/*
* Total number of ll_file_read requests issued, reads originating
* due to mmap are not counted in this total. This value is used to
* trigger full file read-ahead after multiple reads to a small file.
*/
- unsigned long ras_requests;
- /*
- * Page index with respect to the current request, these value
- * will not be accurate when dealing with reads issued via mmap.
- */
- unsigned long ras_request_index;
+ unsigned long ras_requests;
/*
* The following 3 items are used for detecting the stride I/O
* mode.
* In stride I/O mode,
* ...............|-----data-----|****gap*****|--------|******|....
- * offset |-stride_pages-|-stride_gap-|
+ * offset |-stride_bytes-|-stride_gap-|
* ras_stride_offset = offset;
- * ras_stride_length = stride_pages + stride_gap;
- * ras_stride_pages = stride_pages;
- * Note: all these three items are counted by pages.
- */
- unsigned long ras_stride_length;
- unsigned long ras_stride_pages;
- pgoff_t ras_stride_offset;
+ * ras_stride_length = stride_bytes + stride_gap;
+ * ras_stride_bytes = stride_bytes;
+ * Note: all these three items are counted by bytes.
+ */
+ unsigned long ras_stride_length;
+ unsigned long ras_stride_bytes;
+ unsigned long ras_stride_offset;
/*
* number of consecutive stride request count, and it is similar as
* ras_consecutive_requests, but used for stride I/O mode.
* Note: only more than 2 consecutive stride request are detected,
* stride read-ahead will be enable
*/
- unsigned long ras_consecutive_stride_requests;
+ unsigned long ras_consecutive_stride_requests;
/* index of the last page that async readahead starts */
- unsigned long ras_async_last_readpage;
+ pgoff_t ras_async_last_readpage;
+ /* whether we should increase readahead window */
+ bool ras_need_increase_window;
+ /* whether ra miss check should be skipped */
+ bool ras_no_miss_check;
};
struct ll_readahead_work {
return !!(sbi->ll_flags & LL_SBI_FILE_HEAT);
}
-void ll_ras_enter(struct file *f);
+void ll_ras_enter(struct file *f, unsigned long pos, unsigned long count);
/* llite/lcommon_misc.c */
int cl_ocd_update(struct obd_device *host, struct obd_device *watched,
/* llite/lproc_llite.c */
int ll_debugfs_register_super(struct super_block *sb, const char *name);
void ll_debugfs_unregister_super(struct super_block *sb);
-void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count);
+void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, long count);
enum {
LPROC_LL_READ_BYTES,
LPROC_LL_WRITE_BYTES,
+ LPROC_LL_READ,
+ LPROC_LL_WRITE,
LPROC_LL_IOCTL,
LPROC_LL_OPEN,
LPROC_LL_RELEASE,
- LPROC_LL_MAP,
+ LPROC_LL_MMAP,
LPROC_LL_FAULT,
LPROC_LL_MKWRITE,
LPROC_LL_LLSEEK,
LPROC_LL_MKNOD,
LPROC_LL_RENAME,
LPROC_LL_STATFS,
- LPROC_LL_ALLOC_INODE,
LPROC_LL_SETXATTR,
LPROC_LL_GETXATTR,
LPROC_LL_GETXATTR_HITS,
};
/* llite/dir.c */
+enum get_default_layout_type {
+ GET_DEFAULT_LAYOUT_ROOT = 1,
+};
+
struct ll_dir_chain {
};
int ll_getattr_dentry(struct dentry *de, struct kstat *stat);
struct posix_acl *ll_get_acl(struct inode *inode, int type);
#ifdef HAVE_IOP_SET_ACL
-#ifdef CONFIG_FS_POSIX_ACL
+#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-#else /* !CONFIG_FS_POSIX_ACL */
+#else /* !CONFIG_LUSTRE_FS_POSIX_ACL */
#define ll_set_acl NULL
-#endif /* CONFIG_FS_POSIX_ACL */
+#endif /* CONFIG_LUSTRE_FS_POSIX_ACL */
#endif
int ll_migrate(struct inode *parent, struct file *file,
struct ptlrpc_request **request);
int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
int set_default);
-int ll_dir_getstripe(struct inode *inode, void **lmmp,
- int *lmm_size, struct ptlrpc_request **request,
- u64 valid);
+int ll_dir_getstripe_default(struct inode *inode, void **lmmp,
+ int *lmm_size, struct ptlrpc_request **request,
+ struct ptlrpc_request **root_request, u64 valid);
+int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
+ struct ptlrpc_request **request, u64 valid);
int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
int ll_merge_attr(const struct lu_env *env, struct inode *inode);
int ll_fid2path(struct inode *inode, void __user *arg);
extern struct super_operations lustre_super_operations;
void ll_lli_init(struct ll_inode_info *lli);
-int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
+int ll_fill_super(struct super_block *sb);
void ll_put_super(struct super_block *sb);
void ll_kill_super(struct super_block *sb);
struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
};
struct ll_thread_info {
- struct iov_iter lti_iter;
struct vvp_io_args lti_args;
struct ra_io_arg lti_ria;
struct ll_cl_context lti_io_ctx;
u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
u32 cl_fid_build_gen(const struct lu_fid *fid);
+static inline struct pcc_super *ll_i2pccs(struct inode *inode)
+{
+ return &ll_i2sbi(inode)->ll_pcc_super;
+}
+
+static inline struct pcc_super *ll_info2pccs(struct ll_inode_info *lli)
+{
+ return ll_i2pccs(ll_info2i(lli));
+}
+
#endif /* LLITE_INTERNAL_H */