#ifndef LCLIENT_H
#define LCLIENT_H
+#include <lustre/lustre_idl.h>
+#include <cl_object.h>
+
+enum obd_notify_event;
+struct inode;
+struct lov_stripe_md;
+struct lustre_md;
+struct obd_capa;
+struct obd_device;
+struct obd_export;
+struct page;
+
blkcnt_t dirty_cnt(struct inode *inode);
int cl_glimpse_size0(struct inode *inode, int agl);
struct {
enum ccc_setattr_lock_type cui_local_lock;
} setattr;
- } u;
- /**
- * True iff io is processing glimpse right now.
- */
- int cui_glimpse;
+ struct {
+ struct cl_page_list cui_queue;
+ unsigned long cui_written;
+ int cui_from;
+ int cui_to;
+ } write;
+ } u;
+ /**
+ * True iff io is processing glimpse right now.
+ */
+ int cui_glimpse;
/**
* Layout version when this IO is initialized
*/
};
/**
- * True, if \a io is a normal io, False for other (sendfile, splice*).
+ * True, if \a io is a normal io, False for other splice_{read,write}.
* must be impementated in arch specific code.
*/
int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
*
* \see ccc_page::cpg_pending_linkage
*/
- cfs_list_t cob_pending_list;
+ struct list_head cob_pending_list;
/**
* Access this counter is protected by inode->i_sem. Now that
* we don't need to hold any lock..
*/
int cob_transient_pages;
- /**
- * Number of outstanding mmaps on this file.
- *
- * \see ll_vm_open(), ll_vm_close().
- */
- cfs_atomic_t cob_mmap_cnt;
+ /**
+ * Number of outstanding mmaps on this file.
+ *
+ * \see ll_vm_open(), ll_vm_close().
+ */
+ atomic_t cob_mmap_cnt;
/**
* various flags
* ccc-private page state.
*/
struct ccc_page {
- struct cl_page_slice cpg_cl;
- int cpg_defer_uptodate;
- int cpg_ra_used;
- int cpg_write_queued;
- /**
- * Non-empty iff this page is already counted in
- * ccc_object::cob_pending_list. Protected by
- * ccc_object::cob_pending_guard. This list is only used as a flag,
- * that is, never iterated through, only checked for list_empty(), but
- * having a list is useful for debugging.
- */
- cfs_list_t cpg_pending_linkage;
- /** VM page */
- struct page *cpg_page;
+ struct cl_page_slice cpg_cl;
+ unsigned cpg_defer_uptodate:1,
+ cpg_ra_used:1,
+ cpg_write_queued:1;
+ /**
+ * Non-empty iff this page is already counted in
+ * ccc_object::cob_pending_list. Protected by
+ * ccc_object::cob_pending_guard. This list is only used as a flag,
+ * that is, never iterated through, only checked for list_empty(), but
+ * having a list is useful for debugging.
+ */
+ struct list_head cpg_pending_linkage;
+ /** VM page */
+ struct page *cpg_page;
};
static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
return container_of(slice, struct ccc_page, cpg_cl);
}
+static inline pgoff_t ccc_index(struct ccc_page *ccc)
+{
+ return ccc->cpg_cl.cpl_index;
+}
+
struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
struct ccc_device {
const struct cl_object *obj, struct ost_lvb *lvb);
int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_object_conf *conf);
-struct page *ccc_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice);
-int ccc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
void ccc_transient_page_verify(const struct cl_page *page);
int ccc_transient_page_own(const struct lu_env *env,
void ccc_lock_fini(const struct lu_env *env,struct cl_lock_slice *slice);
int ccc_lock_enqueue(const struct lu_env *env,const struct cl_lock_slice *slice,
struct cl_io *io, __u32 enqflags);
+int ccc_lock_use(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_unuse(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_wait(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_fits_into(const struct lu_env *env,
void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
/**
- * Data structure managing a client's cached clean pages. An LRU of
- * pages is maintained, along with other statistics.
+ * Data structure managing a client's cached pages. A count of
+ * "unstable" pages is maintained, and an LRU of clean pages is
+ * maintained. "unstable" pages are pages pinned by the ptlrpc
+ * layer for recovery purposes.
*/
struct cl_client_cache {
- cfs_atomic_t ccc_users; /* # of users (OSCs) of this data */
- cfs_list_t ccc_lru; /* LRU list of cached clean pages */
- spinlock_t ccc_lru_lock; /* lock for list */
- cfs_atomic_t ccc_lru_left; /* # of LRU entries available */
- unsigned long ccc_lru_max; /* Max # of LRU entries possible */
- unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
+ /**
+ * # of users (OSCs)
+ */
+ atomic_t ccc_users;
+ /**
+ * # of LRU entries available
+ */
+ atomic_t ccc_lru_left;
+ /**
+ * List of entities(OSCs) for this LRU cache
+ */
+ struct list_head ccc_lru;
+ /**
+ * Max # of LRU entries
+ */
+ unsigned long ccc_lru_max;
+ /**
+ * Lock to protect ccc_lru list
+ */
+ spinlock_t ccc_lru_lock;
+ /**
+ * # of threads are doing shrinking
+ */
+ unsigned int ccc_lru_shrinkers;
+ /**
+ * Set if unstable check is enabled
+ */
+ unsigned int ccc_unstable_check:1;
+ /**
+ * Waitq for awaiting unstable pages to reach zero.
+ * Used at umounting time and signaled on BRW commit
+ */
+ wait_queue_head_t ccc_unstable_waitq;
+ /**
+ * # of unstable pages for this mount point
+ */
+ atomic_t ccc_unstable_nr;
+};
+
+enum {
+ LUSTRE_OPC_MKDIR = 0,
+ LUSTRE_OPC_SYMLINK = 1,
+ LUSTRE_OPC_MKNOD = 2,
+ LUSTRE_OPC_CREATE = 3,
+ LUSTRE_OPC_ANY = 5
+};
+
+enum op_cli_flags {
+ CLI_SET_MEA = 1 << 0,
+ CLI_RM_ENTRY = 1 << 1,
+ CLI_HASH64 = 1 << 2,
+ CLI_API32 = 1 << 3,
+ CLI_MIGRATE = 1 << 4,
};
#endif /*LCLIENT_H */