#ifndef LLITE_INTERNAL_H
#define LLITE_INTERNAL_H
+/* default to about 40meg of readahead on a given system. That much tied
+ * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
+#define SBI_DEFAULT_RA_MAX ((40 << 20) >> PAGE_CACHE_SHIFT)
+
struct ll_sb_info {
+ /* this protects pglist and max_r_a_pages. It isn't safe to
+ * grab from interrupt contexts */
+ spinlock_t ll_lock;
struct obd_uuid ll_sb_uuid;
-// struct lustre_handle ll_mdc_conn;
struct obd_export *ll_mdc_exp;
struct obd_export *ll_osc_exp;
struct proc_dir_entry* ll_proc_root;
obd_id ll_rootino; /* number of root inode */
- struct obd_uuid ll_mds_uuid;
- struct obd_uuid ll_mds_peer_uuid;
struct lustre_mount_data *ll_lmd;
char *ll_instance;
int ll_flags;
- wait_queue_head_t ll_commitcbd_waitq;
- wait_queue_head_t ll_commitcbd_ctl_waitq;
- int ll_commitcbd_flags;
- struct task_struct *ll_commitcbd_thread;
- time_t ll_commitcbd_waketime;
- time_t ll_commitcbd_timeout;
- spinlock_t ll_commitcbd_lock;
struct list_head ll_conn_chain; /* per-conn chain of SBs */
struct hlist_head ll_orphan_dentry_list; /*please don't ask -p*/
struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
- spinlock_t ll_pglist_lock;
unsigned long ll_pglist_gen;
struct list_head ll_pglist;
+
+ unsigned long ll_read_ahead_pages;
+ unsigned long ll_max_read_ahead_pages;
+
};
struct ll_readahead_state {
spinlock_t ras_lock;
- unsigned long ras_last, ras_window, ras_next_index;
+ unsigned long ras_last_readpage, ras_consecutive;
+ unsigned long ras_window_start, ras_window_len;
+ unsigned long ras_next_readahead;
+
};
extern kmem_cache_t *ll_file_data_slab;
void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa);
void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc);
void ll_removepage(struct page *page);
-int ll_sync_page(struct page *page);
int ll_readpage(struct file *file, struct page *page);
struct ll_async_page *llap_from_cookie(void *cookie);
struct ll_async_page *llap_from_page(struct page *page);