/*
* super-class definitions.
*/
+#include <libcfs/libcfs.h>
#include <lu_object.h>
+
#ifdef __KERNEL__
-# include <linux/mutex.h>
-# include <linux/radix-tree.h>
+# include <linux/mutex.h>
+# include <linux/radix-tree.h>
+#else
+# include <liblustre.h>
#endif
struct inode;
* Group identifier for quota purposes.
*/
gid_t cat_gid;
+
+ /* nlink of the directory */
+ __u64 cat_nlink;
};
/**
struct cl_object_header {
/** Standard lu_object_header. cl_object::co_lu::lo_header points
* here. */
- struct lu_object_header coh_lu;
+ struct lu_object_header coh_lu;
/** \name locks
* \todo XXX move locks below to the separate cache-lines, they are
* mostly useless otherwise.
*/
/** @{ */
/** Lock protecting lock list. */
- spinlock_t coh_lock_guard;
- /** @} locks */
- /** List of cl_lock's granted for this object. */
- cfs_list_t coh_locks;
+ spinlock_t coh_lock_guard;
+ /** @} locks */
+ /** List of cl_lock's granted for this object. */
+ struct list_head coh_locks;
/**
* Parent object. It is assumed that an object has a well-defined
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer top-to-bottom to \a slice.
*/
-#define cl_object_for_each(slice, obj) \
- cfs_list_for_each_entry((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each(slice, obj) \
+ list_for_each_entry((slice), \
+ &(obj)->co_lu.lo_header->loh_layers,\
+ co_lu.lo_linkage)
+
/**
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer bottom-to-top to \a slice.
*/
-#define cl_object_for_each_reverse(slice, obj) \
- cfs_list_for_each_entry_reverse((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each_reverse(slice, obj) \
+ list_for_each_entry_reverse((slice), \
+ &(obj)->co_lu.lo_header->loh_layers,\
+ co_lu.lo_linkage)
/** @} cl_object */
#define CL_PAGE_EOF ((pgoff_t)~0ull)
};
/**
- * Flags maintained for every cl_page.
- */
-enum cl_page_flags {
- /**
- * Set when pagein completes. Used for debugging (read completes at
- * most once for a page).
- */
- CPF_READ_COMPLETED = 1 << 0
-};
-
-/**
* Fields are protected by the lock on struct page, except for atomics and
* immutables.
*
*/
struct cl_page {
/** Reference counter. */
- atomic_t cp_ref;
+ atomic_t cp_ref;
+ /** Transfer error. */
+ int cp_error;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
- /** List of slices. Immutable after creation. */
- cfs_list_t cp_layers;
+ /** vmpage */
struct page *cp_vmpage;
+ /** Linkage of pages within group. Pages must be owned */
+ struct list_head cp_batch;
+ /** List of slices. Immutable after creation. */
+ struct list_head cp_layers;
+ /** Linkage of pages within cl_req. */
+ struct list_head cp_flight;
/**
* Page state. This field is const to avoid accidental update, it is
* modified only internally within cl_page.c. Protected by a VM lock.
*/
const enum cl_page_state cp_state;
- /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
- cfs_list_t cp_batch;
- /** Mutex serializing membership of a page in a batch. */
- struct mutex cp_mutex;
- /** Linkage of pages within cl_req. */
- cfs_list_t cp_flight;
- /** Transfer error. */
- int cp_error;
-
/**
* Page type. Only CPT_TRANSIENT is used so far. Immutable after
* creation.
*/
struct cl_io *cp_owner;
/**
- * Debug information, the task is owning the page.
- */
- struct task_struct *cp_task;
- /**
* Owning IO request in cl_page_state::CPS_PAGEOUT and
* cl_page_state::CPS_PAGEIN states. This field is maintained only in
* the top-level pages. Protected by a VM lock.
struct lu_ref_link cp_obj_ref;
/** Link to a queue, for debugging. */
struct lu_ref_link cp_queue_ref;
- /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
- unsigned cp_flags;
/** Assigned if doing a sync_io */
struct cl_sync_io *cp_sync_io;
};
struct cl_object *cpl_obj;
const struct cl_page_operations *cpl_ops;
/** Linkage into cl_page::cp_layers. Immutable after creation. */
- cfs_list_t cpl_linkage;
+ struct list_head cpl_linkage;
};
/**
CLM_PHANTOM,
CLM_READ,
CLM_WRITE,
- CLM_GROUP
+ CLM_GROUP,
+ CLM_MAX,
};
/**
* \see cl_page_is_under_lock()
*/
int (*cpo_is_under_lock)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
+ const struct cl_page_slice *slice,
+ struct cl_io *io, pgoff_t *max);
/**
* Optional debugging helper. Prints given page slice.
} \
} while (0)
-static inline int __page_in_use(const struct cl_page *page, int refc)
+static inline struct page *cl_page_vmpage(const struct cl_page *page)
{
- if (page->cp_type == CPT_CACHEABLE)
- ++refc;
- LASSERT(atomic_read(&page->cp_ref) > 0);
- return (atomic_read(&page->cp_ref) > refc);
+ LASSERT(page->cp_vmpage != NULL);
+ return page->cp_vmpage;
}
-#define cl_page_in_use(pg) __page_in_use(pg, 1)
-#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
-static inline struct page *cl_page_vmpage(struct cl_page *page)
+/**
+ * Check if a cl_page is in use.
+ *
+ * Client cache holds a refcount, this refcount will be dropped when
+ * the page is taken out of cache, see vvp_page_delete().
+ */
+static inline bool __page_in_use(const struct cl_page *page, int refc)
{
- LASSERT(page->cp_vmpage != NULL);
- return page->cp_vmpage;
+ return (atomic_read(&page->cp_ref) > refc + 1);
}
+/**
+ * Caller itself holds a refcount of cl_page.
+ */
+#define cl_page_in_use(pg) __page_in_use(pg, 1)
+/**
+ * Caller doesn't hold a refcount.
+ */
+#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
* List of enclosed locks, so far. Locks are linked here through
* cl_lock::cll_inclosure.
*/
- cfs_list_t clc_list;
+ struct list_head clc_list;
/**
* True iff closure is in a `wait' mode. This determines what
* cl_lock_enclosure() does when a lock L to be added to the closure
*/
struct cl_lock {
/** Reference counter. */
- atomic_t cll_ref;
+ atomic_t cll_ref;
/** List of slices. Immutable after creation. */
- cfs_list_t cll_layers;
- /**
- * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
- * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
- */
- cfs_list_t cll_linkage;
- /**
- * Parameters of this lock. Protected by
- * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
- * cl_lock::cll_guard. Modified only on lock creation and in
- * cl_lock_modify().
- */
+ struct list_head cll_layers;
+ /**
+ * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
+ * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
+ */
+ struct list_head cll_linkage;
+ /**
+ * Parameters of this lock. Protected by
+ * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
+ * cl_lock::cll_guard. Modified only on lock creation and in
+ * cl_lock_modify().
+ */
struct cl_lock_descr cll_descr;
/** Protected by cl_lock::cll_guard. */
enum cl_lock_state cll_state;
*
* \see cl_lock_closure
*/
- cfs_list_t cll_inclosure;
+ struct list_head cll_inclosure;
/**
* Confict lock at queuing time.
*/
struct cl_object *cls_obj;
const struct cl_lock_operations *cls_ops;
/** Linkage into cl_lock::cll_layers. Immutable after creation. */
- cfs_list_t cls_linkage;
+ struct list_head cls_linkage;
};
/**
* @{
*/
struct cl_page_list {
- unsigned pl_nr;
- cfs_list_t pl_pages;
- struct task_struct *pl_owner;
+ unsigned pl_nr;
+ struct list_head pl_pages;
+ struct task_struct *pl_owner;
};
/**
* \see vvp_io, lov_io, osc_io, ccc_io
*/
struct cl_io_slice {
- struct cl_io *cis_io;
- /** corresponding object slice. Immutable after creation. */
- struct cl_object *cis_obj;
- /** io operations. Immutable after creation. */
- const struct cl_io_operations *cis_iop;
- /**
- * linkage into a list of all slices for a given cl_io, hanging off
- * cl_io::ci_layers. Immutable after creation.
- */
- cfs_list_t cis_linkage;
+ struct cl_io *cis_io;
+ /** corresponding object slice. Immutable after creation. */
+ struct cl_object *cis_obj;
+ /** io operations. Immutable after creation. */
+ const struct cl_io_operations *cis_iop;
+ /**
+ * linkage into a list of all slices for a given cl_io, hanging off
+ * cl_io::ci_layers. Immutable after creation.
+ */
+ struct list_head cis_linkage;
};
typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
* same lock can be part of multiple io's simultaneously.
*/
struct cl_io_lock_link {
- /** linkage into one of cl_lockset lists. */
- cfs_list_t cill_linkage;
- struct cl_lock_descr cill_descr;
- struct cl_lock *cill_lock;
- /** optional destructor */
- void (*cill_fini)(const struct lu_env *env,
- struct cl_io_lock_link *link);
+ /** linkage into one of cl_lockset lists. */
+ struct list_head cill_linkage;
+ struct cl_lock_descr cill_descr;
+ struct cl_lock *cill_lock;
+ /** optional destructor */
+ void (*cill_fini)(const struct lu_env *env,
+ struct cl_io_lock_link *link);
};
/**
* enqueued.
*/
struct cl_lockset {
- /** locks to be acquired. */
- cfs_list_t cls_todo;
- /** locks currently being processed. */
- cfs_list_t cls_curr;
- /** locks acquired. */
- cfs_list_t cls_done;
+ /** locks to be acquired. */
+ struct list_head cls_todo;
+ /** locks currently being processed. */
+ struct list_head cls_curr;
+ /** locks acquired. */
+ struct list_head cls_done;
};
/**
*/
struct cl_io *ci_parent;
/** List of slices. Immutable after creation. */
- cfs_list_t ci_layers;
+ struct list_head ci_layers;
/** list of locks (to be) acquired by this io. */
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
* req's pages.
*/
struct cl_req {
- enum cl_req_type crq_type;
- /** A list of pages being transfered */
- cfs_list_t crq_pages;
- /** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
- /** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
- /** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- cfs_list_t crq_layers;
+ enum cl_req_type crq_type;
+ /** A list of pages being transfered */
+ struct list_head crq_pages;
+ /** Number of pages in cl_req::crq_pages */
+ unsigned crq_nrpages;
+ /** An array of objects which pages are in ->crq_pages */
+ struct cl_req_obj *crq_o;
+ /** Number of elements in cl_req::crq_objs[] */
+ unsigned crq_nrobjs;
+ struct list_head crq_layers;
};
/**
* Per-layer state for request.
*/
struct cl_req_slice {
- struct cl_req *crs_req;
- struct cl_device *crs_dev;
- cfs_list_t crs_linkage;
- const struct cl_req_operations *crs_ops;
+ struct cl_req *crs_req;
+ struct cl_device *crs_dev;
+ struct list_head crs_linkage;
+ const struct cl_req_operations *crs_ops;
};
/* @} cl_req */
return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
}
-static inline struct cl_device *cl_object_device(const struct cl_object *o)
-{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
- return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
-}
-
static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
{
return container_of0(h, struct cl_object_header, coh_lu);
}
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
- const struct cl_page_operations *ops);
+ struct cl_object *obj, pgoff_t index,
+ const struct cl_page_operations *ops);
void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
const struct cl_lock_operations *ops);
{
clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
+ WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512);
}
static inline void *cl_object_page_slice(struct cl_object *clob,
void cl_page_export (const struct lu_env *env,
struct cl_page *pg, int uptodate);
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
+ struct cl_page *page, pgoff_t *max_index);
loff_t cl_offset (const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index (const struct cl_object *obj, loff_t offset);
int cl_page_size (const struct cl_object *obj);
int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state);
-int cl_queue_match (const cfs_list_t *queue,
- const struct cl_lock_descr *need);
+int cl_queue_match(const struct list_head *queue,
+ const struct cl_lock_descr *need);
void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock);
*/
static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
{
- LASSERT(plist->pl_nr > 0);
- return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
+ LASSERT(plist->pl_nr > 0);
+ return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
}
static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
{
LASSERT(plist->pl_nr > 0);
- return cfs_list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
+ return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
}
/**
* Iterate over pages in a page list.
*/
#define cl_page_list_for_each(page, list) \
- cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch)
+ list_for_each_entry((page), &(list)->pl_pages, cp_batch)
/**
* Iterate over pages in a page list, taking possible removals into account.
*/
#define cl_page_list_for_each_safe(page, temp, list) \
- cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
+ list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
void cl_page_list_init (struct cl_page_list *plist);
void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);