+#ifdef CONFIG_FS_POSIX_ACL
+# include <linux/fs.h>
+#ifdef HAVE_XATTR_ACL
+# include <linux/xattr_acl.h>
+#endif
+#ifdef HAVE_LINUX_POSIX_ACL_XATTR_H
+# include <linux/posix_acl_xattr.h>
+#endif
+#endif
+
+#include <lustre_debug.h>
+#include <lustre_ver.h>
+#include <linux/lustre_version.h>
+#include <lustre_disk.h> /* for s2sbi */
+
+/*
+struct lustre_intent_data {
+ __u64 it_lock_handle[2];
+ __u32 it_disposition;
+ __u32 it_status;
+ __u32 it_lock_mode;
+ }; */
+
+/* If there is no FMODE_EXEC defined, make it to match nothing */
+#ifndef FMODE_EXEC
+#define FMODE_EXEC 0
+#endif
+
+#define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
+#define LUSTRE_FPRIVATE(file) ((file)->private_data)
+
+#ifdef LUSTRE_KERNEL_VERSION
+static inline struct lookup_intent *ll_nd2it(struct nameidata *nd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+ return &nd->intent;
+#else
+ return nd->intent;
+#endif
+}
+#endif
+
+struct ll_dentry_data {
+ int lld_cwd_count;
+ int lld_mnt_count;
+ struct obd_client_handle lld_cwd_och;
+ struct obd_client_handle lld_mnt_och;
+#ifndef LUSTRE_KERNEL_VERSION
+ struct lookup_intent *lld_it;
+#endif
+};
+
+#define ll_d2d(de) ((struct ll_dentry_data*) de->d_fsdata)
+
+extern struct file_operations ll_pgcache_seq_fops;
+
+#define LLI_INODE_MAGIC 0x111d0de5
+#define LLI_INODE_DEAD 0xdeadd00d
+#define LLI_F_HAVE_OST_SIZE_LOCK 0
+#define LLI_F_HAVE_MDS_SIZE_LOCK 1
+
+struct ll_inode_info {
+ int lli_inode_magic;
+ struct semaphore lli_size_sem;
+ void *lli_size_sem_owner;
+ struct semaphore lli_open_sem;
+ struct semaphore lli_write_sem;
+ struct lov_stripe_md *lli_smd;
+ char *lli_symlink_name;
+ __u64 lli_maxbytes;
+ __u64 lli_io_epoch;
+ unsigned long lli_flags;
+
+ /* this lock protects s_d_w and p_w_ll and mmap_cnt */
+ spinlock_t lli_lock;
+ struct list_head lli_pending_write_llaps;
+ int lli_send_done_writing;
+ atomic_t lli_mmap_cnt;
+
+ struct list_head lli_close_item;
+
+ /* for writepage() only to communicate to fsync */
+ int lli_async_rc;
+
+ struct posix_acl *lli_posix_acl;
+
+ struct list_head lli_dead_list;
+
+ struct semaphore lli_och_sem; /* Protects access to och pointers
+ and their usage counters */
+ /* We need all three because every inode may be opened in different
+ modes */
+ struct obd_client_handle *lli_mds_read_och;
+ __u64 lli_open_fd_read_count;
+ struct obd_client_handle *lli_mds_write_och;
+ __u64 lli_open_fd_write_count;
+ struct obd_client_handle *lli_mds_exec_och;
+ __u64 lli_open_fd_exec_count;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+ struct inode lli_vfs_inode;
+#endif
+};
+
+/*
+ * Locking to guarantee consistency of non-atomic updates to long long i_size,
+ * consistency between file size and KMS, and consistency within
+ * ->lli_smd->lsm_oinfo[]'s.
+ *
+ * Implemented by ->lli_size_sem and ->lsm_sem, nested in that order.
+ */
+
+void ll_inode_size_lock(struct inode *inode, int lock_lsm);
+void ll_inode_size_unlock(struct inode *inode, int unlock_lsm);
+
+// FIXME: replace the name of this with LL_I to conform to kernel stuff
+// static inline struct ll_inode_info *LL_I(struct inode *inode)
+static inline struct ll_inode_info *ll_i2info(struct inode *inode)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+ return container_of(inode, struct ll_inode_info, lli_vfs_inode);
+#else
+ return (struct ll_inode_info *)&(inode->u.generic_ip);
+#endif
+}
+
+/* default to about 40meg of readahead on a given system. That much tied
+ * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
+#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - CFS_PAGE_SHIFT))
+
+/* default to read-ahead full files smaller than 2MB on the second read */
+#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - CFS_PAGE_SHIFT))
+
+enum ra_stat {
+ RA_STAT_HIT = 0,
+ RA_STAT_MISS,
+ RA_STAT_DISTANT_READPAGE,
+ RA_STAT_MISS_IN_WINDOW,
+ RA_STAT_FAILED_GRAB_PAGE,
+ RA_STAT_FAILED_MATCH,
+ RA_STAT_DISCARDED,
+ RA_STAT_ZERO_LEN,
+ RA_STAT_ZERO_WINDOW,
+ RA_STAT_EOF,
+ RA_STAT_MAX_IN_FLIGHT,
+ RA_STAT_WRONG_GRAB_PAGE,
+ _NR_RA_STAT,
+};
+
+struct ll_ra_info {
+ unsigned long ra_cur_pages;
+ unsigned long ra_max_pages;
+ unsigned long ra_max_read_ahead_whole_pages;
+ unsigned long ra_stats[_NR_RA_STAT];
+};
+
+/* LL_HIST_MAX=32 causes an overflow */
+#define LL_HIST_MAX 28
+#define LL_HIST_START 12 /* buckets start at 2^12 = 4k */
+#define LL_PROCESS_HIST_MAX 10
+struct per_process_info {
+ pid_t pid;
+ struct obd_histogram pp_r_hist;
+ struct obd_histogram pp_w_hist;
+};
+
+/* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */
+struct ll_rw_extents_info {
+ struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];
+};
+
+#define LL_OFFSET_HIST_MAX 100
+struct ll_rw_process_info {
+ pid_t rw_pid;
+ int rw_op;
+ loff_t rw_range_start;
+ loff_t rw_range_end;
+ loff_t rw_last_file_pos;
+ loff_t rw_offset;
+ size_t rw_smallest_extent;
+ size_t rw_largest_extent;
+ struct file *rw_last_file;
+};
+
+enum vfs_ops_list {
+ VFS_OPS_READ = 0,
+ VFS_OPS_WRITE,
+ VFS_OPS_IOCTL,
+ VFS_OPS_OPEN,
+ VFS_OPS_RELEASE,
+ VFS_OPS_MMAP,
+ VFS_OPS_SEEK,
+ VFS_OPS_FSYNC,
+ VFS_OPS_FLOCK,
+ VFS_OPS_SETATTR,
+ VFS_OPS_GETATTR,
+ VFS_OPS_SETXATTR,
+ VFS_OPS_GETXATTR,
+ VFS_OPS_LISTXATTR,
+ VFS_OPS_REMOVEXATTR,
+ VFS_OPS_TRUNCATE,
+ VFS_OPS_INODE_PERMISSION,
+ VFS_OPS_LAST,
+};
+
+enum vfs_track_type {
+ VFS_TRACK_ALL = 0, /* track all processes */
+ VFS_TRACK_PID, /* track process with this pid */
+ VFS_TRACK_PPID, /* track processes with this ppid */
+ VFS_TRACK_GID, /* track processes with this gid */
+ VFS_TRACK_LAST,
+};
+
+/* flags for sbi->ll_flags */
+#define LL_SBI_NOLCK 0x01 /* DLM locking disabled (directio-only) */
+#define LL_SBI_CHECKSUM 0x02 /* checksum each page as it's written */
+#define LL_SBI_FLOCK 0x04
+#define LL_SBI_USER_XATTR 0x08 /* support user xattr */
+#define LL_SBI_ACL 0x10 /* support ACL */
+#define LL_SBI_JOIN 0x20 /* support JOIN */
+