#define LL_MAX_BLKSIZE_BITS 22
#define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
-#define LUSTRE_FPRIVATE(file) ((file)->private_data)
+
+#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
struct ll_dentry_data {
struct lookup_intent *lld_it;
static inline bool obd_connect_has_secctx(struct obd_connect_data *data)
{
-#if defined(HAVE_SECURITY_DENTRY_INIT_SECURITY) && defined(CONFIG_SECURITY)
+#ifdef CONFIG_SECURITY
return data->ocd_connect_flags & OBD_CONNECT_FLAGS2 &&
data->ocd_connect_flags2 & OBD_CONNECT2_FILE_SECCTX;
#else
return false;
-#endif /* HAVE_SECURITY_DENTRY_INIT_SECURITY */
+#endif
}
static inline void obd_connect_set_secctx(struct obd_connect_data *data)
{
-#if defined(HAVE_SECURITY_DENTRY_INIT_SECURITY) && defined(CONFIG_SECURITY)
+#ifdef CONFIG_SECURITY
data->ocd_connect_flags2 |= OBD_CONNECT2_FILE_SECCTX;
-#endif /* HAVE_SECURITY_DENTRY_INIT_SECURITY */
+#endif
}
int ll_dentry_init_security(struct dentry *dentry, int mode, struct qstr *name,
/* default to use at least 16M for fast read if possible */
#define RA_REMAIN_WINDOW_MIN MiB_TO_PAGES(16UL)
-/* default to about 64M of readahead on a given system. */
-#define SBI_DEFAULT_READAHEAD_MAX MiB_TO_PAGES(64UL)
+/* default readahead on a given system. */
+#define SBI_DEFAULT_READ_AHEAD_MAX MiB_TO_PAGES(64UL)
-/* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX MiB_TO_PAGES(2UL)
+/* default read-ahead full files smaller than limit on the second read */
+#define SBI_DEFAULT_READ_AHEAD_WHOLE_MAX MiB_TO_PAGES(2UL)
enum ra_stat {
RA_STAT_HIT = 0,
* counted by page index.
*/
struct ra_io_arg {
- pgoff_t ria_start; /* start offset of read-ahead*/
- pgoff_t ria_end; /* end offset of read-ahead*/
- unsigned long ria_reserved; /* reserved pages for read-ahead */
- pgoff_t ria_end_min; /* minimum end to cover current read */
- bool ria_eof; /* reach end of file */
- /* If stride read pattern is detected, ria_stoff means where
- * stride read is started. Note: for normal read-ahead, the
+ pgoff_t ria_start_idx; /* start offset of read-ahead*/
+ pgoff_t ria_end_idx; /* end offset of read-ahead*/
+ unsigned long ria_reserved; /* reserved pages for read-ahead */
+ pgoff_t ria_end_idx_min;/* minimum end to cover current read */
+ bool ria_eof; /* reach end of file */
+ /* If stride read pattern is detected, ria_stoff is the byte offset
+ * where stride read is started. Note: for normal read-ahead, the
* value here is meaningless, and also it will not be accessed*/
- unsigned long ria_stoff;
+ loff_t ria_stoff;
/* ria_length and ria_bytes are the length and pages length in the
* stride I/O mode. And they will also be used to check whether
* it is stride I/O read-ahead in the read-ahead pages*/
- unsigned long ria_length;
- unsigned long ria_bytes;
+ loff_t ria_length;
+ loff_t ria_bytes;
};
/* LL_HIST_MAX=32 causes an overflow */
* per file-descriptor read-ahead data.
*/
struct ll_readahead_state {
- spinlock_t ras_lock;
+ spinlock_t ras_lock;
/* End byte that read(2) try to read. */
- unsigned long ras_last_read_end;
+ loff_t ras_last_read_end_bytes;
/*
* number of bytes read after last read-ahead window reset. As window
* is reset on each seek, this is effectively a number of consecutive
* case, it probably doesn't make sense to expand window to
* PTLRPC_MAX_BRW_PAGES on the third access.
*/
- unsigned long ras_consecutive_bytes;
+ loff_t ras_consecutive_bytes;
/*
* number of read requests after the last read-ahead window reset
* As window is reset on each seek, this is effectively the number
* on consecutive read request and is used to trigger read-ahead.
*/
- unsigned long ras_consecutive_requests;
+ unsigned long ras_consecutive_requests;
/*
* Parameters of current read-ahead window. Handled by
* ras_update(). On the initial access to the file or after a seek,
* expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
* PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
*/
- pgoff_t ras_window_start, ras_window_len;
+ pgoff_t ras_window_start_idx;
+ pgoff_t ras_window_pages;
/*
- * Optimal RPC size. It decides how many pages will be sent
- * for each read-ahead.
+ * Optimal RPC size in pages.
+ * It decides how many pages will be sent for each read-ahead.
*/
- unsigned long ras_rpc_size;
+ unsigned long ras_rpc_pages;
/*
* Where next read-ahead should start at. This lies within read-ahead
* window. Read-ahead window is read in pieces rather than at once
* ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
* not covered by DLM lock.
*/
- pgoff_t ras_next_readahead;
+ pgoff_t ras_next_readahead_idx;
/*
* Total number of ll_file_read requests issued, reads originating
* due to mmap are not counted in this total. This value is used to
* trigger full file read-ahead after multiple reads to a small file.
*/
- unsigned long ras_requests;
+ unsigned long ras_requests;
/*
* The following 3 items are used for detecting the stride I/O
* mode.
* ras_stride_bytes = stride_bytes;
* Note: all these three items are counted by bytes.
*/
- unsigned long ras_stride_length;
- unsigned long ras_stride_bytes;
- unsigned long ras_stride_offset;
+ loff_t ras_stride_offset;
+ loff_t ras_stride_length;
+ loff_t ras_stride_bytes;
/*
* number of consecutive stride request count, and it is similar as
* ras_consecutive_requests, but used for stride I/O mode.
* Note: only more than 2 consecutive stride request are detected,
* stride read-ahead will be enable
*/
- unsigned long ras_consecutive_stride_requests;
+ unsigned long ras_consecutive_stride_requests;
/* index of the last page that async readahead starts */
- pgoff_t ras_async_last_readpage;
+ pgoff_t ras_async_last_readpage_idx;
/* whether we should increase readahead window */
- bool ras_need_increase_window;
+ bool ras_need_increase_window;
/* whether ra miss check should be skipped */
- bool ras_no_miss_check;
+ bool ras_no_miss_check;
};
struct ll_readahead_work {
/** File to readahead */
struct file *lrw_file;
- /** Start bytes */
- unsigned long lrw_start;
- /** End bytes */
- unsigned long lrw_end;
+ pgoff_t lrw_start_idx;
+ pgoff_t lrw_end_idx;
/* async worker to handler read */
struct work_struct lrw_readahead_work;
return !!(sbi->ll_flags & LL_SBI_FILE_HEAT);
}
-void ll_ras_enter(struct file *f, unsigned long pos, unsigned long count);
+void ll_ras_enter(struct file *f, loff_t pos, size_t count);
/* llite/lcommon_misc.c */
int cl_ocd_update(struct obd_device *host, struct obd_device *watched,
int ll_flush_ctx(struct inode *inode);
void ll_umount_begin(struct super_block *sb);
int ll_remount_fs(struct super_block *sb, int *flags, char *data);
-#ifdef HAVE_SUPEROPS_USE_DENTRY
int ll_show_options(struct seq_file *seq, struct dentry *dentry);
-#else
-int ll_show_options(struct seq_file *seq, struct vfsmount *vfs);
-#endif
void ll_dirty_page_discard_warn(struct page *page, int ioret);
int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
struct super_block *, struct lookup_intent *);
sai_agl_valid:1,/* AGL is valid for the dir */
sai_in_readpage:1;/* statahead is in readdir()*/
wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
- struct ptlrpc_thread sai_thread; /* stat-ahead thread */
- struct ptlrpc_thread sai_agl_thread; /* AGL thread */
+ struct task_struct *sai_task; /* stat-ahead thread */
+ struct task_struct *sai_agl_task; /* AGL thread */
struct list_head sai_interim_entries; /* entries which got async
* stat reply, but not
* instantiated */
static inline int ll_file_nolock(const struct file *file)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct inode *inode = file_inode((struct file *)file);
- LASSERT(fd != NULL);
- return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
- (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
+ LASSERT(fd != NULL);
+ return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
+ (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
}
static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,