#define FMODE_EXEC 0
#endif
+#ifndef DCACHE_LUSTRE_INVALID
+#define DCACHE_LUSTRE_INVALID 0x100
+#endif
+
#define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
#define LUSTRE_FPRIVATE(file) ((file)->private_data)
/* this lock protects posix_acl, pending_write_llaps, mmap_cnt */
spinlock_t lli_lock;
- struct list_head lli_pending_write_llaps;
struct list_head lli_close_list;
/* handle is to be sent to MDS later on done_writing and setattr.
* Open handle data are needed for the recovery to reconstruct
struct ll_ra_info {
atomic_t ra_cur_pages;
unsigned long ra_max_pages;
+ unsigned long ra_max_pages_per_file;
unsigned long ra_max_read_ahead_whole_pages;
};
#define LL_SBI_OSS_CAPA 0x100 /* support oss capa */
#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
#define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
+#define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
/* default value for ll_sb_info->contention_time */
#define SBI_DEFAULT_CONTENTION_SECONDS 60
/*
* The following 3 items are used for detecting the stride I/O
* mode.
- * In stride I/O mode,
+ * In stride I/O mode,
* ...............|-----data-----|****gap*****|--------|******|....
* offset |-stride_pages-|-stride_gap-|
* ras_stride_offset = offset;
struct ll_file_data {
struct ll_readahead_state fd_ras;
int fd_omode;
- struct lustre_handle fd_cwlockh;
- unsigned long fd_gid;
+ struct ccc_grouplock fd_grouplock;
struct ll_file_dir fd_dir;
__u32 fd_flags;
struct file *fd_file;
extern int ll_have_md_lock(struct inode *inode, __u64 bits);
extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
struct lustre_handle *lockh);
+int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *, __u64 bits);
+int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd);
int ll_file_open(struct inode *inode, struct file *file);
int ll_file_release(struct inode *inode, struct file *file);
-int ll_lsm_getattr(struct obd_export *, struct lov_stripe_md *, struct obdo *);
int ll_glimpse_ioctl(struct ll_sb_info *sbi,
struct lov_stripe_md *lsm, lstat_t *st);
+void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch);
int ll_local_open(struct file *file,
struct lookup_intent *it, struct ll_file_data *fd,
struct obd_client_handle *och);
int ll_md_real_close(struct inode *inode, int flags);
void ll_epoch_close(struct inode *inode, struct md_op_data *op_data,
struct obd_client_handle **och, unsigned long flags);
-int ll_sizeonmds_update(struct inode *inode, struct md_open_data *data,
- struct lustre_handle *fh, __u64 ioepoch);
+int ll_sizeonmds_update(struct inode *inode, struct lustre_handle *fh,
+ __u64 ioepoch);
int ll_inode_getattr(struct inode *inode, struct obdo *obdo);
int ll_md_setattr(struct inode *inode, struct md_op_data *op_data,
struct md_open_data **mod);
int ll_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
int num_bytes);
int ll_merge_lvb(struct inode *inode);
+int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg);
+int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
+int ll_fid2path(struct obd_export *exp, void *arg);
/* llite/dcache.c */
/* llite/namei.c */
struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt);
+/** direct write pages */
+struct ll_dio_pages {
+ /** page array to be written. we don't support
+ * partial pages except the last one. */
+ struct page **ldp_pages;
+ /* offset of each page */
+ loff_t *ldp_offsets;
+ /** if ldp_offsets is NULL, it means a sequential
+ * pages to be written, then this is the file offset
+ * of the * first page. */
+ loff_t ldp_start_offset;
+ /** how many bytes are to be written. */
+ size_t ldp_size;
+ /** # of pages in the array. */
+ int ldp_nr;
+};
+
+extern ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
+ int rw, struct inode *inode,
+ struct ll_dio_pages *pv);
+
#endif /* LLITE_INTERNAL_H */