* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
*/
/** @{ */
/** Lock protecting page tree. */
- spinlock_t coh_page_guard;
+ cfs_spinlock_t coh_page_guard;
/** Lock protecting lock list. */
- spinlock_t coh_lock_guard;
+ cfs_spinlock_t coh_lock_guard;
/** @} locks */
/** Radix tree of cl_page's, cached for this object. */
struct radix_tree_root coh_tree;
/** # of pages in radix tree. */
unsigned long coh_pages;
/** List of cl_lock's granted for this object. */
- struct list_head coh_locks;
+ cfs_list_t coh_locks;
/**
* Parent object. It is assumed that an object has a well-defined
*
* \todo XXX this can be read/write lock if needed.
*/
- spinlock_t coh_attr_guard;
+ cfs_spinlock_t coh_attr_guard;
/**
* Number of objects above this one: 0 for a top-object, 1 for its
* sub-object, etc.
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer top-to-bottom to \a slice.
*/
-#define cl_object_for_each(slice, obj) \
- list_for_each_entry((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each(slice, obj) \
+ cfs_list_for_each_entry((slice), \
+ &(obj)->co_lu.lo_header->loh_layers, \
+ co_lu.lo_linkage)
/**
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer bottom-to-top to \a slice.
*/
-#define cl_object_for_each_reverse(slice, obj) \
- list_for_each_entry_reverse((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each_reverse(slice, obj) \
+ cfs_list_for_each_entry_reverse((slice), \
+ &(obj)->co_lu.lo_header->loh_layers, \
+ co_lu.lo_linkage)
/** @} cl_object */
#ifndef pgoff_t
*/
struct cl_page {
/** Reference counter. */
- atomic_t cp_ref;
+ cfs_atomic_t cp_ref;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
/** Logical page index within the object. Immutable after creation. */
pgoff_t cp_index;
/** List of slices. Immutable after creation. */
- struct list_head cp_layers;
+ cfs_list_t cp_layers;
/** Parent page, NULL for top-level page. Immutable after creation. */
struct cl_page *cp_parent;
/** Lower-layer page. NULL for bottommost page. Immutable after
/**
* Linkage of pages within some group. Protected by
* cl_page::cp_mutex. */
- struct list_head cp_batch;
+ cfs_list_t cp_batch;
/** Mutex serializing membership of a page in a batch. */
- struct mutex cp_mutex;
+ cfs_mutex_t cp_mutex;
/** Linkage of pages within cl_req. */
- struct list_head cp_flight;
+ cfs_list_t cp_flight;
/** Transfer error. */
int cp_error;
struct cl_object *cpl_obj;
const struct cl_page_operations *cpl_ops;
/** Linkage into cl_page::cp_layers. Immutable after creation. */
- struct list_head cpl_linkage;
+ cfs_list_t cpl_linkage;
};
/**
do { \
static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
cl_page_print(env, &__info, lu_cdebug_printer, page); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
do { \
static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
cl_page_header_print(env, &__info, lu_cdebug_printer, page); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
* List of enclosed locks, so far. Locks are linked here through
* cl_lock::cll_inclosure.
*/
- struct list_head clc_list;
+ cfs_list_t clc_list;
/**
* True iff closure is in a `wait' mode. This determines what
* cl_lock_enclosure() does when a lock L to be added to the closure
*/
struct cl_lock {
/** Reference counter. */
- atomic_t cll_ref;
+ cfs_atomic_t cll_ref;
/** List of slices. Immutable after creation. */
- struct list_head cll_layers;
+ cfs_list_t cll_layers;
/**
* Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
* by cl_lock::cll_descr::cld_obj::coh_lock_guard.
*/
- struct list_head cll_linkage;
+ cfs_list_t cll_linkage;
/**
* Parameters of this lock. Protected by
* cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
*
* \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
*/
- struct mutex cll_guard;
+ cfs_mutex_t cll_guard;
cfs_task_t *cll_guarder;
int cll_depth;
*
* \see cl_lock_closure
*/
- struct list_head cll_inclosure;
+ cfs_list_t cll_inclosure;
+ /**
+ * Confict lock at queuing time.
+ */
+ struct cl_lock *cll_conflict;
/**
* A list of references to this lock, for debugging.
*/
struct cl_object *cls_obj;
const struct cl_lock_operations *cls_ops;
/** Linkage into cl_lock::cll_layers. Immutable after creation. */
- struct list_head cls_linkage;
+ cfs_list_t cls_linkage;
};
/**
do { \
static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
cl_lock_print(env, &__info, lu_cdebug_printer, lock); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
* @{
*/
struct cl_page_list {
- unsigned pl_nr;
- struct list_head pl_pages;
- cfs_task_t *pl_owner;
+ unsigned pl_nr;
+ cfs_list_t pl_pages;
+ cfs_task_t *pl_owner;
};
/**
CIT_READ,
/** write system call */
CIT_WRITE,
- /** truncate system call */
- CIT_TRUNC,
+ /** truncate, utime system calls */
+ CIT_SETATTR,
/**
* page fault handling
*/
* linkage into a list of all slices for a given cl_io, hanging off
* cl_io::ci_layers. Immutable after creation.
*/
- struct list_head cis_linkage;
+ cfs_list_t cis_linkage;
};
*/
struct cl_io_lock_link {
/** linkage into one of cl_lockset lists. */
- struct list_head cill_linkage;
+ cfs_list_t cill_linkage;
struct cl_lock_descr cill_descr;
struct cl_lock *cill_lock;
/** optional destructor */
*/
struct cl_lockset {
/** locks to be acquired. */
- struct list_head cls_todo;
+ cfs_list_t cls_todo;
/** locks currently being processed. */
- struct list_head cls_curr;
+ cfs_list_t cls_curr;
/** locks acquired. */
- struct list_head cls_done;
+ cfs_list_t cls_done;
};
/**
*/
struct cl_io *ci_parent;
/** List of slices. Immutable after creation. */
- struct list_head ci_layers;
+ cfs_list_t ci_layers;
/** list of locks (to be) acquired by this io. */
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
int wr_append;
} ci_wr;
struct cl_io_rw_common ci_rw;
- struct cl_truncate_io {
- /** new size to which file is truncated */
- size_t tr_size;
- struct obd_capa *tr_capa;
- } ci_truncate;
+ struct cl_setattr_io {
+ struct ost_lvb sa_attr;
+ unsigned int sa_valid;
+ struct obd_capa *sa_capa;
+ } ci_setattr;
struct cl_fault_io {
/** page index within file. */
pgoff_t ft_index;
* req's pages.
*/
struct cl_req {
- enum cl_req_type crq_type;
+ enum cl_req_type crq_type;
/** A list of pages being transfered */
- struct list_head crq_pages;
+ cfs_list_t crq_pages;
/** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
+ unsigned crq_nrpages;
/** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
+ struct cl_req_obj *crq_o;
/** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- struct list_head crq_layers;
+ unsigned crq_nrobjs;
+ cfs_list_t crq_layers;
};
/**
struct cl_req_slice {
struct cl_req *crs_req;
struct cl_device *crs_dev;
- struct list_head crs_linkage;
+ cfs_list_t crs_linkage;
const struct cl_req_operations *crs_ops;
};
struct cache_stats {
const char *cs_name;
/** how many entities were created at all */
- atomic_t cs_created;
+ cfs_atomic_t cs_created;
/** how many cache lookups were performed */
- atomic_t cs_lookup;
+ cfs_atomic_t cs_lookup;
/** how many times cache lookup resulted in a hit */
- atomic_t cs_hit;
+ cfs_atomic_t cs_hit;
/** how many entities are in the cache right now */
- atomic_t cs_total;
+ cfs_atomic_t cs_total;
/** how many entities in the cache are actively used (and cannot be
* evicted) right now */
- atomic_t cs_busy;
+ cfs_atomic_t cs_busy;
};
/** These are not exported so far */
*/
struct cache_stats cs_pages;
struct cache_stats cs_locks;
- atomic_t cs_pages_state[CPS_NR];
- atomic_t cs_locks_state[CLS_NR];
+ cfs_atomic_t cs_pages_state[CPS_NR];
+ cfs_atomic_t cs_locks_state[CLS_NR];
};
int cl_site_init (struct cl_site *s, struct cl_device *top);
struct cl_io *io,
pgoff_t start, pgoff_t end,
struct cl_page_list *plist,
- int nonblock);
+ int nonblock,
+ int *resched);
struct cl_page *cl_page_find (const struct lu_env *env,
struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
enum cl_page_type type);
+struct cl_page *cl_page_find_sub (const struct lu_env *env,
+ struct cl_object *obj,
+ pgoff_t idx, struct page *vmpage,
+ struct cl_page *parent);
void cl_page_get (struct cl_page *page);
void cl_page_put (const struct lu_env *env,
struct cl_page *page);
struct cl_page *page);
struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj);
struct cl_page *cl_page_top (struct cl_page *page);
-int cl_is_page (const void *addr);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype);
const char *scope, const void *source);
void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock);
int cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_compatible(const struct cl_lock *lock1,
- const struct cl_lock *lock2);
enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
struct cl_lock *lock);
-
void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state);
-
int cl_lock_is_intransit(struct cl_lock *lock);
+int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
+ int keep_mutex);
+
/** \name statemachine statemachine
* Interface to lock state machine consists of 3 parts:
*
int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock);
int cl_wait_try (const struct lu_env *env, struct cl_lock *lock);
int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic);
+
/** @} statemachine */
void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock);
int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state);
-int cl_queue_match (const struct list_head *queue,
+int cl_queue_match (const cfs_list_t *queue,
const struct cl_lock_descr *need);
void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error);
void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
-int cl_is_lock (const void *addr);
unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
}
+/**
+ * True, iff \a io is a truncate(2).
+ */
+static inline int cl_io_is_trunc(const struct cl_io *io)
+{
+ return io->ci_type == CIT_SETATTR &&
+ (io->u.ci_setattr.sa_valid & ATTR_SIZE);
+}
+
struct cl_io *cl_io_top(struct cl_io *io);
void cl_io_print(const struct lu_env *env, void *cookie,
* Iterate over pages in a page list.
*/
#define cl_page_list_for_each(page, list) \
- list_for_each_entry((page), &(list)->pl_pages, cp_batch)
+ cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch)
/**
* Iterate over pages in a page list, taking possible removals into account.
*/
#define cl_page_list_for_each_safe(page, temp, list) \
- list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
+ cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
void cl_page_list_init (struct cl_page_list *plist);
void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
*/
struct cl_sync_io {
/** number of pages yet to be transferred. */
- atomic_t csi_sync_nr;
+ cfs_atomic_t csi_sync_nr;
/** completion to be signaled when transfer is complete. */
cfs_waitq_t csi_waitq;
/** error code. */
- int csi_sync_rc;
+ int csi_sync_rc;
};
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);