#ifndef LCLIENT_H
#define LCLIENT_H
+#include <lustre/lustre_idl.h>
+#include <cl_object.h>
+
+enum obd_notify_event;
+struct inode;
+struct lov_stripe_md;
+struct lustre_md;
+struct obd_capa;
+struct obd_device;
+struct obd_export;
+struct page;
+
blkcnt_t dirty_cnt(struct inode *inode);
int cl_glimpse_size0(struct inode *inode, int agl);
struct {
enum ccc_setattr_lock_type cui_local_lock;
} setattr;
- } u;
- /**
- * True iff io is processing glimpse right now.
- */
- int cui_glimpse;
+ struct {
+ struct cl_page_list cui_queue;
+ unsigned long cui_written;
+ int cui_from;
+ int cui_to;
+ } write;
+ } u;
+ /**
+ * True iff io is processing glimpse right now.
+ */
+ int cui_glimpse;
/**
* Layout version when this IO is initialized
*/
* File descriptor against which IO is done.
*/
struct ll_file_data *cui_fd;
-#ifndef HAVE_FILE_WRITEV
struct kiocb *cui_iocb;
-#endif
};
/**
- * True, if \a io is a normal io, False for other (sendfile, splice*).
+ * True, if \a io is a normal io, False for other splice_{read,write}.
* must be impementated in arch specific code.
*/
int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
* we don't need to hold any lock..
*/
int cob_transient_pages;
- /**
- * Number of outstanding mmaps on this file.
- *
- * \see ll_vm_open(), ll_vm_close().
- */
- cfs_atomic_t cob_mmap_cnt;
+ /**
+ * Number of outstanding mmaps on this file.
+ *
+ * \see ll_vm_open(), ll_vm_close().
+ */
+ atomic_t cob_mmap_cnt;
/**
* various flags
*/
cfs_list_t cpg_pending_linkage;
/** VM page */
- cfs_page_t *cpg_page;
+ struct page *cpg_page;
};
static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
return container_of(slice, struct ccc_page, cpg_cl);
}
-struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage);
+static inline pgoff_t ccc_index(struct ccc_page *ccc)
+{
+ return ccc->cpg_cl.cpl_index;
+}
+
+struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
struct ccc_device {
struct cl_device cdv_cl;
const struct cl_object *obj, struct ost_lvb *lvb);
int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_object_conf *conf);
-cfs_page_t *ccc_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice);
-int ccc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
void ccc_transient_page_verify(const struct cl_page *page);
int ccc_transient_page_own(const struct lu_env *env,
struct ccc_io *cl2ccc_io (const struct lu_env *env,
const struct cl_io_slice *slice);
struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
-cfs_page_t *cl2vm_page (const struct cl_page_slice *slice);
+struct page *cl2vm_page (const struct cl_page_slice *slice);
struct inode *ccc_object_inode(const struct cl_object *obj);
struct ccc_object *cl_inode2ccc (struct inode *inode);
int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
struct obd_capa *capa);
-struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage);
+struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
int ccc_object_invariant(const struct cl_object *obj);
int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
void cl_inode_fini(struct inode *inode);
__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
__u32 cl_fid_build_gen(const struct lu_fid *fid);
-#ifdef INVARIANT_CHECK
+#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
# define CLOBINVRNT(env, clob, expr) \
do { \
if (unlikely(!(expr))) { \
LINVRNT(0); \
} \
} while (0)
-#else /* !INVARIANT_CHECK */
+#else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
# define CLOBINVRNT(env, clob, expr) \
- ((void)sizeof(env), (void)sizeof(clob), (void)sizeof !!(expr))
-#endif /* !INVARIANT_CHECK */
+ ((void)sizeof(env), (void)sizeof(clob), (void)sizeof !!(expr))
+#endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
int cl_ocd_update(struct obd_device *host,
* layer for recovery purposes.
*/
struct cl_client_cache {
- cfs_atomic_t ccc_users; /* # of users (OSCs) of this data */
- cfs_list_t ccc_lru; /* LRU list of cached clean pages */
- spinlock_t ccc_lru_lock; /* lock for list */
- cfs_atomic_t ccc_lru_left; /* # of LRU entries available */
- unsigned long ccc_lru_max; /* Max # of LRU entries possible */
- unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
- cfs_atomic_t ccc_unstable_nr; /* # of unstable pages pinned */
- cfs_waitq_t ccc_unstable_waitq; /* Signaled on BRW commit */
+ atomic_t ccc_users; /* # of users (OSCs) */
+ cfs_list_t ccc_lru; /* LRU of cached clean pages */
+ spinlock_t ccc_lru_lock; /* lock for list */
+ atomic_t ccc_lru_left; /* # of LRU entries available */
+ unsigned long ccc_lru_max; /* Max # of LRU entries */
+ unsigned int ccc_lru_shrinkers; /* # of threads shrinking */
+ atomic_t ccc_unstable_nr; /* # of pages pinned */
+ wait_queue_head_t ccc_unstable_waitq; /* Signaled on BRW commit */
+};
+
+enum {
+ LUSTRE_OPC_MKDIR = 0,
+ LUSTRE_OPC_SYMLINK = 1,
+ LUSTRE_OPC_MKNOD = 2,
+ LUSTRE_OPC_CREATE = 3,
+ LUSTRE_OPC_ANY = 5
+};
+
+enum op_cli_flags {
+ CLI_SET_MEA = 1 << 0,
+ CLI_RM_ENTRY = 1 << 1,
+ CLI_HASH64 = 1 << 2,
+ CLI_API32 = 1 << 3,
+ CLI_MIGRATE = 1 << 4,
+ CLI_NEXT_ENTRY = 1 << 5,
};
#endif /*LCLIENT_H */