union {
/* for directory */
struct {
- /* serialize normal readdir and statahead-readdir. */
- struct mutex lli_readdir_mutex;
-
/* metadata statahead */
/* since parent-child threads can share the same @file
* struct, "opendir_key" is the token when dir close for
struct mutex lli_pcc_lock;
enum lu_pcc_state_flags lli_pcc_state;
struct pcc_inode *lli_pcc_inode;
+ struct mutex lli_group_mutex;
+ __u64 lli_group_users;
+ unsigned long lli_group_gid;
};
};
* counted by page index.
*/
struct ra_io_arg {
- unsigned long ria_start; /* start offset of read-ahead*/
- unsigned long ria_end; /* end offset of read-ahead*/
+ pgoff_t ria_start; /* start offset of read-ahead*/
+ pgoff_t ria_end; /* end offset of read-ahead*/
unsigned long ria_reserved; /* reserved pages for read-ahead */
- unsigned long ria_end_min; /* minimum end to cover current read */
- bool ria_eof; /* reach end of file */
+ pgoff_t ria_end_min; /* minimum end to cover current read */
+ bool ria_eof; /* reach end of file */
/* If stride read pattern is detected, ria_stoff means where
* stride read is started. Note: for normal read-ahead, the
* value here is meaningless, and also it will not be accessed*/
- pgoff_t ria_stoff;
- /* ria_length and ria_pages are the length and pages length in the
+ unsigned long ria_stoff;
+ /* ria_length and ria_bytes are the length and pages length in the
* stride I/O mode. And they will also be used to check whether
* it is stride I/O read-ahead in the read-ahead pages*/
unsigned long ria_length;
- unsigned long ria_pages;
+ unsigned long ria_bytes;
};
/* LL_HIST_MAX=32 causes an overflow */
struct lu_fid ll_root_fid; /* root object fid */
int ll_flags;
- unsigned int ll_umounting:1,
- ll_xattr_cache_enabled:1,
+ unsigned int ll_xattr_cache_enabled:1,
ll_xattr_cache_set:1, /* already set to 0/1 */
ll_client_common_fill_super_succeeded:1,
ll_checksum_set:1;
/* st_blksize returned by stat(2), when non-zero */
unsigned int ll_stat_blksize;
+ /* maximum relative age of cached statfs results */
+ unsigned int ll_statfs_max_age;
+
struct kset ll_kset; /* sysfs object */
struct completion ll_kobj_unregister;
*/
struct ll_readahead_state {
spinlock_t ras_lock;
+ /* End byte that read(2) try to read. */
+ unsigned long ras_last_read_end;
/*
- * index of the last page that read(2) needed and that wasn't in the
- * cache. Used by ras_update() to detect seeks.
- *
- * XXX nikita: if access seeks into cached region, Lustre doesn't see
- * this.
- */
- unsigned long ras_last_readpage;
- /*
- * number of pages read after last read-ahead window reset. As window
+ * number of bytes read after last read-ahead window reset. As window
* is reset on each seek, this is effectively a number of consecutive
* accesses. Maybe ->ras_accessed_in_window is better name.
*
* case, it probably doesn't make sense to expand window to
* PTLRPC_MAX_BRW_PAGES on the third access.
*/
- unsigned long ras_consecutive_pages;
+ unsigned long ras_consecutive_bytes;
/*
* number of read requests after the last read-ahead window reset
* As window is reset on each seek, this is effectively the number
* on consecutive read request and is used to trigger read-ahead.
*/
- unsigned long ras_consecutive_requests;
+ unsigned long ras_consecutive_requests;
/*
* Parameters of current read-ahead window. Handled by
* ras_update(). On the initial access to the file or after a seek,
* expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
* PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
*/
- unsigned long ras_window_start, ras_window_len;
+ pgoff_t ras_window_start, ras_window_len;
/*
* Optimal RPC size. It decides how many pages will be sent
* for each read-ahead.
*/
- unsigned long ras_rpc_size;
+ unsigned long ras_rpc_size;
/*
* Where next read-ahead should start at. This lies within read-ahead
* window. Read-ahead window is read in pieces rather than at once
* ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
* not covered by DLM lock.
*/
- unsigned long ras_next_readahead;
+ pgoff_t ras_next_readahead;
/*
* Total number of ll_file_read requests issued, reads originating
* due to mmap are not counted in this total. This value is used to
* trigger full file read-ahead after multiple reads to a small file.
*/
- unsigned long ras_requests;
+ unsigned long ras_requests;
/*
* Page index with respect to the current request, these value
* will not be accurate when dealing with reads issued via mmap.
*/
- unsigned long ras_request_index;
+ unsigned long ras_request_index;
/*
* The following 3 items are used for detecting the stride I/O
* mode.
* In stride I/O mode,
* ...............|-----data-----|****gap*****|--------|******|....
- * offset |-stride_pages-|-stride_gap-|
+ * offset |-stride_bytes-|-stride_gap-|
* ras_stride_offset = offset;
- * ras_stride_length = stride_pages + stride_gap;
- * ras_stride_pages = stride_pages;
- * Note: all these three items are counted by pages.
- */
- unsigned long ras_stride_length;
- unsigned long ras_stride_pages;
- pgoff_t ras_stride_offset;
+ * ras_stride_length = stride_bytes + stride_gap;
+ * ras_stride_bytes = stride_bytes;
+ * Note: all these three items are counted by bytes.
+ */
+ unsigned long ras_stride_length;
+ unsigned long ras_stride_bytes;
+ unsigned long ras_stride_offset;
/*
* number of consecutive stride request count, and it is similar as
* ras_consecutive_requests, but used for stride I/O mode.
* Note: only more than 2 consecutive stride request are detected,
* stride read-ahead will be enable
*/
- unsigned long ras_consecutive_stride_requests;
+ unsigned long ras_consecutive_stride_requests;
/* index of the last page that async readahead starts */
- unsigned long ras_async_last_readpage;
+ pgoff_t ras_async_last_readpage;
};
struct ll_readahead_work {
/* llite/lproc_llite.c */
int ll_debugfs_register_super(struct super_block *sb, const char *name);
void ll_debugfs_unregister_super(struct super_block *sb);
-void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count);
+void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, long count);
enum {
- LPROC_LL_DIRTY_HITS,
- LPROC_LL_DIRTY_MISSES,
LPROC_LL_READ_BYTES,
LPROC_LL_WRITE_BYTES,
- LPROC_LL_BRW_READ,
- LPROC_LL_BRW_WRITE,
+ LPROC_LL_READ,
+ LPROC_LL_WRITE,
LPROC_LL_IOCTL,
LPROC_LL_OPEN,
LPROC_LL_RELEASE,
- LPROC_LL_MAP,
+ LPROC_LL_MMAP,
LPROC_LL_FAULT,
LPROC_LL_MKWRITE,
LPROC_LL_LLSEEK,
LPROC_LL_RMDIR,
LPROC_LL_MKNOD,
LPROC_LL_RENAME,
- LPROC_LL_STAFS,
- LPROC_LL_ALLOC_INODE,
+ LPROC_LL_STATFS,
LPROC_LL_SETXATTR,
LPROC_LL_GETXATTR,
LPROC_LL_GETXATTR_HITS,
extern void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
struct ll_file_data *file, loff_t pos,
size_t count, int rw);
-void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot);
#ifdef HAVE_INODEOPS_ENHANCED_GETATTR
int ll_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
int ll_getattr_dentry(struct dentry *de, struct kstat *stat);
struct posix_acl *ll_get_acl(struct inode *inode, int type);
#ifdef HAVE_IOP_SET_ACL
-#ifdef CONFIG_FS_POSIX_ACL
+#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-#else /* !CONFIG_FS_POSIX_ACL */
+#else /* !CONFIG_LUSTRE_FS_POSIX_ACL */
#define ll_set_acl NULL
-#endif /* CONFIG_FS_POSIX_ACL */
+#endif /* CONFIG_LUSTRE_FS_POSIX_ACL */
#endif
int ll_migrate(struct inode *parent, struct file *file,
struct lmv_user_md *lum, const char *name);
int ll_get_fid_by_name(struct inode *parent, const char *name,
int namelen, struct lu_fid *fid, struct inode **inode);
-#ifdef HAVE_GENERIC_PERMISSION_4ARGS
-int ll_inode_permission(struct inode *inode, int mask, unsigned int flags);
-#else
-# ifndef HAVE_INODE_PERMISION_2ARGS
-int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd);
-# else
int ll_inode_permission(struct inode *inode, int mask);
-# endif
-#endif
int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa);
int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
unsigned long arg);
};
struct ll_thread_info {
- struct iov_iter lti_iter;
struct vvp_io_args lti_args;
struct ra_io_arg lti_ria;
struct ll_cl_context lti_io_ctx;
return via;
}
+void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
+ struct vvp_io_args *args);
+
/* llite/llite_mmap.c */
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
size_t count);
-static inline void ll_invalidate_page(struct page *vmpage)
-{
- struct address_space *mapping = vmpage->mapping;
- loff_t offset = vmpage->index << PAGE_SHIFT;
-
- LASSERT(PageLocked(vmpage));
- if (mapping == NULL)
- return;
-
- /*
- * truncate_complete_page() calls
- * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
- */
- ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
- truncate_complete_page(mapping, vmpage);
-}
-
#define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi)
/* don't need an addref as the sb_info should be holding one */
*bits = it->it_lock_bits;
}
-static inline void ll_lock_dcache(struct inode *inode)
-{
-#ifdef HAVE_DCACHE_LOCK
- spin_lock(&dcache_lock);
-#else
- spin_lock(&inode->i_lock);
-#endif
-}
-
-static inline void ll_unlock_dcache(struct inode *inode)
-{
-#ifdef HAVE_DCACHE_LOCK
- spin_unlock(&dcache_lock);
-#else
- spin_unlock(&inode->i_lock);
-#endif
-}
-
static inline int d_lustre_invalid(const struct dentry *dentry)
{
struct ll_dentry_data *lld = ll_d2d(dentry);